summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO18
-rw-r--r--INFO.yaml42
-rw-r--r--VNFs/DPPD-PROX/Makefile29
-rw-r--r--VNFs/DPPD-PROX/README127
-rw-r--r--VNFs/DPPD-PROX/acl_field_def.h2
-rw-r--r--VNFs/DPPD-PROX/arp.h4
-rw-r--r--VNFs/DPPD-PROX/bng_pkts.h10
-rw-r--r--VNFs/DPPD-PROX/cmd_parser.c184
-rw-r--r--VNFs/DPPD-PROX/commands.c8
-rw-r--r--VNFs/DPPD-PROX/defaults.c34
-rw-r--r--VNFs/DPPD-PROX/display_latency.c18
-rw-r--r--VNFs/DPPD-PROX/display_pkt_len.c2
-rw-r--r--VNFs/DPPD-PROX/display_ports.c16
-rw-r--r--VNFs/DPPD-PROX/display_rings.c2
-rw-r--r--VNFs/DPPD-PROX/eld.h7
-rw-r--r--VNFs/DPPD-PROX/git_version.c.in1
-rw-r--r--VNFs/DPPD-PROX/handle_cgnat.c166
-rw-r--r--VNFs/DPPD-PROX/handle_dump.c15
-rw-r--r--VNFs/DPPD-PROX/handle_esp.c786
-rw-r--r--VNFs/DPPD-PROX/handle_fm.c2
-rw-r--r--VNFs/DPPD-PROX/handle_gen.c352
-rw-r--r--VNFs/DPPD-PROX/handle_gen.h5
-rw-r--r--VNFs/DPPD-PROX/handle_impair.c63
-rw-r--r--VNFs/DPPD-PROX/handle_impair.h4
-rw-r--r--VNFs/DPPD-PROX/handle_ipv6_tunnel.c2
-rw-r--r--VNFs/DPPD-PROX/handle_irq.c3
-rw-r--r--VNFs/DPPD-PROX/handle_lat.c115
-rw-r--r--VNFs/DPPD-PROX/handle_lat.h9
-rw-r--r--VNFs/DPPD-PROX/handle_lb_5tuple.c5
-rw-r--r--VNFs/DPPD-PROX/handle_lb_qinq.c14
-rw-r--r--VNFs/DPPD-PROX/handle_master.c140
-rw-r--r--VNFs/DPPD-PROX/handle_master.h4
-rw-r--r--VNFs/DPPD-PROX/handle_mirror.c43
-rw-r--r--VNFs/DPPD-PROX/handle_nat.c5
-rw-r--r--VNFs/DPPD-PROX/handle_nsh.c36
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_decap4.c7
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_encap4.c2
-rw-r--r--VNFs/DPPD-PROX/handle_qos.c4
-rw-r--r--VNFs/DPPD-PROX/handle_swap.c108
-rw-r--r--VNFs/DPPD-PROX/handle_tsc.c4
-rw-r--r--VNFs/DPPD-PROX/hash_utils.c5
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile89
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh66
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/config_file2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg81
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg47
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/gen.cfg)4
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/gen_gw.cfg)4
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/genv6.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg)6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l2gen.cfg)5
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l2gen_bare.cfg)3
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg)2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg50
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/swapv6.cfg)2
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py15
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py4
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh53
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh4
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh2
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml105
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua16
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/machine.map3
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml81
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml11
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build101
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c4
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py231
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml94
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods9
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py1
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py32
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py333
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py98
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore23
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml26
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml36
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml8
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py85
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py107
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/k8sdeployment.py)64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/pod.py)76
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py123
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py199
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py144
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key49
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub1
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/sshclient.py)67
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py353
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py56
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py169
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg16
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.py9
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py60
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/start.sh21
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/README194
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/TST009_Throughput.test)28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test61
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/bare.test)17
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test)27
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test73
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test63
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test)9
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test70
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/impair.test)20
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/ipv6.test)35
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/irq.test)6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test)15
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test)26
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test)26
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test)9
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test (renamed from VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test)24
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml54
-rw-r--r--VNFs/DPPD-PROX/input_curses.c1
-rw-r--r--VNFs/DPPD-PROX/lconf.h9
-rw-r--r--VNFs/DPPD-PROX/main.c65
-rw-r--r--VNFs/DPPD-PROX/meson.build206
-rw-r--r--VNFs/DPPD-PROX/meson_options.txt9
-rw-r--r--VNFs/DPPD-PROX/packet_utils.c170
-rw-r--r--VNFs/DPPD-PROX/packet_utils.h3
-rw-r--r--VNFs/DPPD-PROX/parse_utils.c66
-rw-r--r--VNFs/DPPD-PROX/parse_utils.h2
-rw-r--r--VNFs/DPPD-PROX/prox_args.c266
-rw-r--r--VNFs/DPPD-PROX/prox_cksum.c10
-rw-r--r--VNFs/DPPD-PROX/prox_cksum.h2
-rw-r--r--VNFs/DPPD-PROX/prox_compat.h395
-rw-r--r--VNFs/DPPD-PROX/prox_globals.h1
-rw-r--r--VNFs/DPPD-PROX/prox_ipv6.c85
-rw-r--r--VNFs/DPPD-PROX/prox_ipv6.h9
-rw-r--r--VNFs/DPPD-PROX/prox_lua_types.h2
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.c259
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.h19
-rw-r--r--VNFs/DPPD-PROX/prox_shared.c1
-rw-r--r--VNFs/DPPD-PROX/qinq.h2
-rw-r--r--VNFs/DPPD-PROX/rw_reg.c4
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.c6
-rw-r--r--VNFs/DPPD-PROX/stats_irq.h2
-rw-r--r--VNFs/DPPD-PROX/stats_latency.c3
-rw-r--r--VNFs/DPPD-PROX/stats_latency.h3
-rw-r--r--VNFs/DPPD-PROX/stats_port.c11
-rw-r--r--VNFs/DPPD-PROX/stats_task.h5
-rw-r--r--VNFs/DPPD-PROX/task_base.h9
-rw-r--r--VNFs/DPPD-PROX/task_init.c8
-rw-r--r--VNFs/DPPD-PROX/task_init.h23
-rw-r--r--VNFs/DPPD-PROX/thread_generic.c1
-rw-r--r--VNFs/DPPD-PROX/toeplitz.c4
-rw-r--r--VNFs/DPPD-PROX/toeplitz.h2
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.c24
-rw-r--r--VNFs/DPPD-PROX/version.h2
-rw-r--r--docs/conf.py3
-rw-r--r--docs/index.rst2
-rw-r--r--docs/release/release-notes/release-notes.rst410
-rw-r--r--docs/release/results/overview.rst2
-rw-r--r--docs/testing/developer/design/02-Get_started_Guide.rst2
-rw-r--r--docs/testing/developer/design/04-SampleVNF_Design.rst77
-rw-r--r--docs/testing/developer/requirements/03-Requirements.rst2
-rw-r--r--[-rwxr-xr-x]docs/testing/user/userguide/01-introduction.rst48
-rw-r--r--docs/testing/user/userguide/01-prox_documentation.rst4
-rw-r--r--docs/testing/user/userguide/02-methodology.rst101
-rw-r--r--[-rwxr-xr-x]docs/testing/user/userguide/03-architecture.rst14
-rw-r--r--docs/testing/user/userguide/03-installation.rst162
-rw-r--r--docs/testing/user/userguide/04-installation.rst230
-rw-r--r--docs/testing/user/userguide/04-running_the_test.rst226
-rw-r--r--docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst26
-rw-r--r--docs/testing/user/userguide/06-How_to_use_REST_api.rst23
-rw-r--r--docs/testing/user/userguide/07-Config_files.rst2
-rw-r--r--docs/testing/user/userguide/images/rapid.pngbin0 -> 34588 bytes
-rw-r--r--docs/testing/user/userguide/index.rst15
-rw-r--r--docs/testing/user/userguide/references.rst6
-rw-r--r--rapidvm/README.rst38
-rwxr-xr-xrapidvm/dib/build-image.sh99
-rw-r--r--rapidvm/dib/elements/rapid/element-deps5
-rw-r--r--rapidvm/dib/elements/rapid/package-installs.yaml20
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/40-mlib30
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk38
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/60-compile-prox28
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/70-os-cfg50
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/80-change-permissions18
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/81-clean-rpms20
-rw-r--r--rapidvm/dib/elements/rapid/source-repository-dpdk1
-rw-r--r--rapidvm/dib/elements/rapid/source-repository-samplevnf1
-rw-r--r--tox.ini2
199 files changed, 7324 insertions, 2841 deletions
diff --git a/INFO b/INFO
index 1fe3befb..f27c106b 100644
--- a/INFO
+++ b/INFO
@@ -1,21 +1,19 @@
Project: Sample Virtual Network Function (SAMPLEVNF)
Project Creation Date:27/03/2017
Lifecycle State: Incubation
-Primary Contact: kannan.babu.ramia@intel.com
-Project Lead: deepak.s@intel.com
+Primary Contact: luc.provoost@gmail.com
+Project Lead: luc.provoost@gmail.com
Jira Name: Sample Virtual Network Function
Jira Prefix: [SAMPLEVNF]
mailing list tag:[samplevnf]
Repo: samplevnf
Committers:
-deepak.s@intel.com
-sonika.jindal@intel.com
-anand.b.jyoti@intel.com
-fbrockne@cisco.com
-shang.xiaodong@zte.com.cn
-xavier.simonart@intel.com
-patrice.buriez@intel.com
+luc.provoost@gmail.com
+acm@research.att.com
+trevor.cooper@intel.com
+simonartxavier@gmail.com
+patrice.buriez@chenapan.org
-Link to TSC approval: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-02-07-15.00.html
+Link to TSC approval: https://wiki.opnfv.org/display/meetings/OPNFV+TSC+Meeting+2020-09-29
Link to approval of additional submitters:
diff --git a/INFO.yaml b/INFO.yaml
index 7091c004..a335375b 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -4,10 +4,10 @@ project_creation_date: '27/03/2017'
project_category: ''
lifecycle_state: 'Incubation'
project_lead: &opnfv_samplevnf_ptl
- name: 'Deepak S'
- email: 'deepak.s@linux.intel.com'
- company: 'linux.intel.com'
- id: 'ds2'
+ name: 'Luc Provoost'
+ email: 'luc.provoost@gmail.com'
+ company: '-'
+ id: 'LucProvoost'
timezone: 'Unknown'
primary_contact: *opnfv_samplevnf_ptl
issue_tracking:
@@ -34,23 +34,23 @@ repositories:
- 'samplevnf'
committers:
- <<: *opnfv_samplevnf_ptl
- - name: 'Frank Brockners'
- email: 'fbrockne@cisco.com'
- company: 'cisco.com'
- id: 'brockners'
- - name: 'xiaodong shang'
- email: 'shang.xiaodong@zte.com.cn'
- company: 'zte.com.cn'
- id: 'shangxdy'
- - name: 'Deepak S'
- email: 'deepak.s@linux.intel.com'
- company: 'linux.intel.com'
- id: 'ds2'
- - name: 'Sonika Jindal'
- email: 'sonijindal@gmail.com'
- company: 'gmail.com'
- id: 'sonika.jindal'
+ - name: 'Al Morton'
+ email: 'acm@research.att.com'
+ company: 'att.com'
+ id: 'acm'
+ - name: 'Trevor Cooper'
+ email: 'trevor.cooper@intel.com'
+ company: 'intel.com'
+ id: 'trev'
+ - name: 'Xavier Simonart'
+ email: 'simonartxavier@gmail.com'
+ company: '-'
+ id: 'xavier.simonart'
+ - name: 'Patrice Buriez'
+ email: 'patrice.buriez@chenapan.org'
+ company: '-'
+ id: 'pburiez'
tsc:
# yamllint disable rule:line-length
- approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-02-07-15.00.html'
+ approval: 'https://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2020/opnfv_tsc_09_29_20/opnfv-meeting-opnfv_tsc_09_29_20.2020-09-29-13.06.html'
# yamllint enable rule:line-length
diff --git a/VNFs/DPPD-PROX/Makefile b/VNFs/DPPD-PROX/Makefile
index ff75c178..9a675ca0 100644
--- a/VNFs/DPPD-PROX/Makefile
+++ b/VNFs/DPPD-PROX/Makefile
@@ -15,12 +15,28 @@
##
ifeq ($(RTE_SDK),)
-$(error "Please define RTE_SDK environment variable")
+define err_msg
+
+Please define RTE_SDK environment variable.
+If DPDK was built with Meson, please use meson to build Prox too.
+***
+endef
+$(error $(err_msg))
endif
# Default target, can be overriden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
+ifeq ($(wildcard $(RTE_SDK)/$(RTE_TARGET)/.),)
+define err_msg
+
+Could not find build target: $(RTE_TARGET)
+Perhaps DPDK was built using meson?
+***
+endef
+$(error $(err_msg))
+endif
+
rte_version_h := $(RTE_SDK)/$(RTE_TARGET)/include/rte_version.h
rte_config_h := $(RTE_SDK)/$(RTE_TARGET)/include/rte_config.h
rte_ver_part = $(shell sed -n -e 's/^\#define\s*$1\s*\(.*\)$$/\1/p' $(rte_version_h))
@@ -214,12 +230,15 @@ SRCS-y += stats_latency.c stats_global.c stats_core.c stats_task.c stats_prio.c
SRCS-y += cmd_parser.c input.c prox_shared.c prox_lua_types.c
SRCS-y += genl4_bundle.c heap.c genl4_stream_tcp.c genl4_stream_udp.c cdf.c
SRCS-y += stats.c stats_cons_log.c stats_cons_cli.c stats_parser.c hash_set.c prox_lua.c prox_malloc.c prox_ipv6.c prox_compat.c
+SRCS-y += git_version.c
+
+GIT_VERSION := "$(shell git describe --abbrev=8 --dirty --always)"
ifeq ($(FIRST_PROX_MAKE),)
MAKEFLAGS += --no-print-directory
FIRST_PROX_MAKE = 1
export FIRST_PROX_MAKE
-all: libedit_autoconf.h
+all: libedit_autoconf.h git_version.c
@./helper-scripts/trailing.sh
@$(MAKE) $@
clean:
@@ -246,6 +265,12 @@ libedit_autoconf.h: $(AUTO-CONFIG-SCRIPT)
> /dev/null
# auto-conf adds empty line at the end of the file, considered as error by trailing.sh script
$(Q) sed -i '$$ d' '$@'
+
+git_version.c: force
+ @echo 'const char *git_version=$(GIT_VERSION);' | cmp -s - $@ || echo 'const char *git_version=$(GIT_VERSION);' > $@
+ @echo $@
+force:
+
else
include $(RTE_SDK)/mk/rte.extapp.mk
endif
diff --git a/VNFs/DPPD-PROX/README b/VNFs/DPPD-PROX/README
index 2380ab61..1d7ad51f 100644
--- a/VNFs/DPPD-PROX/README
+++ b/VNFs/DPPD-PROX/README
@@ -24,43 +24,104 @@ finer grained network functions like QoS, Routing, load-balancing...
Compiling and running this application
--------------------------------------
-This application supports DPDK 16.04, 16.11, 16.11.1, 17.02, 17.05, 17.08,
-17.11, 18.02, 18.05, 18.08, 18.11, 19.02, 19.05, 19.08, 19.11, 20.02 and
-20.05.
-
-The following commands assume that the following variables have been set:
-
-export RTE_SDK=/path/to/dpdk
-export RTE_TARGET=x86_64-native-linuxapp-gcc
-
-IPSec is only supported in PROX starting from DPDK 17.11
-It will only be compiled if CONFIG_RTE_LIBRTE_PMD_AESNI_MB is
-set in DPDK .config. This also requires AESNI_MULTI_BUFFER_LIB_PATH to point to
-the multi-buffer library which can be downloaded from
-<https://github.com/01org/intel-ipsec-mb>.
-See doc/guides/cryptodevs/aesni_mb.rst within dpdk for more details
-
-Example: DPDK 17.05 installation
---------------------------------
+This application supports DPDK 16.04, 16.07, 16.11, 17.02, 17.05, 17.08,
+17.11, 18.02, 18.05, 18.08, 18.11, 19.02, 19.05, 19.08, 19.11, 20.02, 20.05,
+20.08, 20.11, 21.02, 21.05, 21.08, 21.11, 22.03, 22.07, 22.11
+
+DPDK meson compilation
+----------------------
+Compilation with meson and ninja is supported since DPDK 18.02, while support
+for make has been removed in DPDK 20.11.
+
+Example: DPDK 20.11 installation with meson
+-------------------------------------------
+cd /your/path/for/dpdk/meson/compilation/
+git clone http://dpdk.org/git/dpdk-stable
+cd dpdk-stable/
+git checkout 20.11
+meson setup build
+# For DPDK 21.11 and above, please run:
+# meson setup -Denable_driver_sdk=true build # instead, or
+# meson configure build/ -Denable_driver_sdk=true # afterwards.
+cd build/
+ninja
+sudo ninja install
+sudo ldconfig
+
+PROX meson compilation
+----------------------
+Depending on the distribution in use the DPDK libraries will be installed in
+different locations. The PKG_CONFIG_PATH environment variable is used to
+point to the correct location.
+
+On RHEL/CentOS: export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig
+On Ubuntu: export PKG_CONFIG_PATH=/usr/local/lib/x86_64-linux-gnu/pkgconfig
+
+cd /the/path/where/you/cloned/this/repo/
+cd VNFs/DPPD-PROX/
+meson setup build
+# Additional options can be specified on the 'meson setup' command line, or
+# using 'meson configure' afterwards. See the meson_options.txt file for
+# possible options.
+ninja -C build/
+
+Legacy DPDK make compilation
+----------------------------
+Compilation with make has been supported until DPDK 20.08, and has been removed
+in DPDK 20.11.
+
+The following commands require that the following environment variables are
+properly defined, as shown in the examples below:
+- RTE_SDK: absolute path to the DPDK sources directory,
+- RTE_TARGET: target (arch-machine-execenv-toolchain format) for
+ which we are building DPDK,
+- RTE_DEVEL_BUILD: set it to 'n' to prevent warnings to be considered
+ as errors when building DPDK inside a git tree.
+
+Configuration options can be defined, before building DPDK, by means of
+appending lines into the config/defconfig_$RTE_TARGET file.
+
+For example, IPSec is supported in PROX (handle_esp.c) since DPDK 17.11,
+although it has not been recently verified, but it only gets compiled when
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y has been defined in DPDK configuration. It also
+requires AESNI_MULTI_BUFFER_LIB_PATH environment variable to point to the
+"Multi-Buffer Crypto for IPsec" library, which can be downloaded from
+https://github.com/intel/intel-ipsec-mb. See doc/guides/cryptodevs/aesni_mb.rst
+within DPDK sources directory for more details.
+
+Example: DPDK 20.05 installation with make
+------------------------------------------
+cd /your/path/for/dpdk/make/compilation/
git clone http://dpdk.org/git/dpdk
-cd dpdk
-git checkout v17.05
-make install T=$RTE_TARGET
-
-PROX compilation
-----------------
-The Makefile with this application expects RTE_SDK to point to the
-root directory of DPDK (e.g. export RTE_SDK=/root/dpdk). If RTE_TARGET
-has not been set, x86_64-native-linuxapp-gcc will be assumed.
+cd dpdk/
+git checkout v20.05
+export RTE_SDK=$PWD
+export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_DEVEL_BUILD=n
+# Edit config/defconfig_$RTE_TARGET file to define options as needed.
+make config T=$RTE_TARGET O=$RTE_TARGET
+make O=$RTE_TARGET
+
+Legacy PROX make compilation
+----------------------------
+As explained above, PROX Makefile expects RTE_SDK to point to the DPDK sources
+directory. If RTE_TARGET is not set, it defaults to x86_64-native-linuxapp-gcc.
+
+cd /the/path/where/you/cloned/this/repo/
+cd VNFs/DPPD-PROX/
+export RTE_SDK=/your/path/for/dpdk/make/compilation/./dpdk/
+export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_DEVEL_BUILD=n
+make
Running PROX
------------
-After DPDK has been set up, run make from the directory where you have
-extracted this application. A build directory will be created
-containing the PROX executable. The usage of the application is shown
-below. Note that this application assumes that all required ports have
-been bound to the DPDK provided igb_uio driver. Refer to the "Getting
-Started Guide - DPDK" document for more details.
+After DPDK has been installed and PROX has been compiled, the build subdirectory
+has been created and contains the PROX executable. The usage of the application
+is shown below. Note that this application assumes that all required ports have
+been bound to the DPDK provided igb_uio driver. Refer to the "Getting Started
+Guide" (http://doc.dpdk.org/guides/linux_gsg/ or doc/guides/linux_gsg/*.rst in
+DPDK sources directory) for more details.
Usage: ./build/prox [-f CONFIG_FILE] [-l LOG_FILE] [-p] [-o DISPLAY] [-v] [-a|-e] \
[-m|-s|-i] [-n] [-w DEF] [-q] [-k] [-d] [-z] [-r VAL] [-u] [-t]
diff --git a/VNFs/DPPD-PROX/acl_field_def.h b/VNFs/DPPD-PROX/acl_field_def.h
index 4f05ae80..da60e1c0 100644
--- a/VNFs/DPPD-PROX/acl_field_def.h
+++ b/VNFs/DPPD-PROX/acl_field_def.h
@@ -27,7 +27,7 @@ struct pkt_eth_ipv4_udp {
prox_rte_ether_hdr ether_hdr;
prox_rte_ipv4_hdr ipv4_hdr;
prox_rte_udp_hdr udp_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = {
/* first input field - always one byte long. */
diff --git a/VNFs/DPPD-PROX/arp.h b/VNFs/DPPD-PROX/arp.h
index 3e8e0d90..ebf8a89e 100644
--- a/VNFs/DPPD-PROX/arp.h
+++ b/VNFs/DPPD-PROX/arp.h
@@ -30,7 +30,7 @@ struct _arp_ipv4 {
uint32_t spa; /* Sender protocol address */
prox_rte_ether_addr tha; /* Target hardware address */
uint32_t tpa; /* Target protocol address */
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
typedef struct _arp_ipv4 arp_ipv4_t;
struct my_arp_t {
@@ -40,7 +40,7 @@ struct my_arp_t {
uint8_t plen;
uint16_t oper;
arp_ipv4_t data;
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
struct ether_hdr_arp {
prox_rte_ether_hdr ether_hdr;
diff --git a/VNFs/DPPD-PROX/bng_pkts.h b/VNFs/DPPD-PROX/bng_pkts.h
index 50780e3b..85114a0c 100644
--- a/VNFs/DPPD-PROX/bng_pkts.h
+++ b/VNFs/DPPD-PROX/bng_pkts.h
@@ -37,12 +37,12 @@ struct cpe_pkt {
#endif
prox_rte_ipv4_hdr ipv4_hdr;
prox_rte_udp_hdr udp_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct cpe_packet_arp {
struct qinq_hdr qinq_hdr;
struct my_arp_t arp;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
/* Struct used for setting all the values a packet
going to the core netwerk. Payload may follow
@@ -59,7 +59,7 @@ struct core_net_pkt_m {
struct gre_hdr gre_hdr;
prox_rte_ipv4_hdr ip_hdr;
prox_rte_udp_hdr udp_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct core_net_pkt {
prox_rte_ether_hdr ether_hdr;
@@ -67,7 +67,7 @@ struct core_net_pkt {
struct gre_hdr gre_hdr;
prox_rte_ipv4_hdr ip_hdr;
prox_rte_udp_hdr udp_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
#define UPSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt) - sizeof(struct cpe_pkt)))
#define DOWNSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt_m) - sizeof(struct cpe_pkt)))
@@ -75,7 +75,7 @@ struct core_net_pkt {
struct cpe_pkt_delta {
uint8_t encap[DOWNSTREAM_DELTA];
struct cpe_pkt pkt;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static inline void extract_key_cpe(struct rte_mbuf *mbuf, uint64_t* key)
{
diff --git a/VNFs/DPPD-PROX/cmd_parser.c b/VNFs/DPPD-PROX/cmd_parser.c
index 2d3b5704..bc796b55 100644
--- a/VNFs/DPPD-PROX/cmd_parser.c
+++ b/VNFs/DPPD-PROX/cmd_parser.c
@@ -398,16 +398,16 @@ static int parse_cmd_count(const char *str, struct input *input)
return 0;
}
-static int parse_cmd_set_probability(const char *str, struct input *input)
+static int parse_cmd_set_proba_no_drop(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- float probability;
+ float proba_no_drop;
if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
- if (sscanf(str, "%f", &probability) != 1)
+ if (sscanf(str, "%f", &proba_no_drop) != 1)
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -417,7 +417,59 @@ static int parse_cmd_set_probability(const char *str, struct input *input)
plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
} else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
- task_impair_set_proba(tbase, probability);
+ task_impair_set_proba_no_drop(tbase, proba_no_drop);
+ }
+ }
+ }
+ return 0;
+}
+
+static int parse_cmd_set_proba_delay(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ float proba_delay;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%f", &proba_delay) != 1)
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
+ plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_impair_set_proba_delay(tbase, proba_delay);
+ }
+ }
+ }
+ return 0;
+}
+
+static int parse_cmd_set_proba_duplicate(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ float proba_duplicate;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%f", &proba_duplicate) != 1)
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
+ plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_impair_set_proba_duplicate(tbase, proba_duplicate);
}
}
}
@@ -657,7 +709,7 @@ static int parse_cmd_reset_randoms_all(const char *str, struct input *input)
unsigned task_id, lcore_id = -1;
while (prox_core_next(&lcore_id, 0) == 0) {
for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
- if (!task_is_mode(lcore_id, task_id, "gen")) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
uint32_t n_rands = task_gen_get_n_randoms(tbase);
@@ -669,6 +721,27 @@ static int parse_cmd_reset_randoms_all(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_reset_ranges_all(const char *str, struct input *input)
+{
+ if (strcmp(str, "") != 0) {
+ return -1;
+ }
+
+ unsigned task_id, lcore_id = -1;
+ while (prox_core_next(&lcore_id, 0) == 0) {
+ for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ uint32_t n_ranges = task_gen_get_n_ranges(tbase);
+
+ plog_info("Resetting ranges on core %d task %d from %d ranges\n", lcore_id, task_id, n_ranges);
+ task_gen_reset_ranges(tbase);
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_reset_values_all(const char *str, struct input *input)
{
if (strcmp(str, "") != 0) {
@@ -678,7 +751,7 @@ static int parse_cmd_reset_values_all(const char *str, struct input *input)
unsigned task_id, lcore_id = -1;
while (prox_core_next(&lcore_id, 0) == 0) {
for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
- if (!task_is_mode(lcore_id, task_id, "gen")) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
plog_info("Resetting values on core %d task %d\n", lcore_id, task_id);
@@ -789,6 +862,39 @@ static int parse_cmd_set_random(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_set_range(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ struct range range;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%u %u %u", &range.offset, &range.min, &range.max) != 3) {
+ return -1;
+ }
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
+ plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
+ } else if (range.offset > PROX_RTE_ETHER_MAX_LEN) {
+ plog_err("Offset out of range (must be less then %u)\n", PROX_RTE_ETHER_MAX_LEN);
+ } else if (range.min > range.max) {
+ plog_err("Wrong range: end (%d) must be >= start (%d)\n", range.max, range.min);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ if (task_gen_add_range(tbase, &range)) {
+ plog_warn("Range not added on core %u task %u\n", lcore_id, task_id);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_thread_info(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
@@ -943,12 +1049,7 @@ static int parse_cmd_local_ip(const char *str, struct input *input)
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
uint32_t local_ip = ((ip[3] & 0xFF) << 24) | ((ip[2] & 0xFF) << 16) | ((ip[1] & 0xFF) << 8) | ((ip[0] & 0xFF) << 0);
if (!task_is_mode_and_submode(lcore_id, task_id, "arp", "local")) {
- if (!task_is_sub_mode(lcore_id, task_id, "l3")) {
- plog_err("Core %u task %u is not in l3 mode\n", lcore_id, task_id);
- } else {
- plog_info("Setting local ip to %s\n", str);
- task_set_local_ip(tbase, local_ip);
- }
+ plog_err("Core %u task %u is not in arp mode\n", lcore_id, task_id);
} else {
plog_info("Setting local ip to %s\n", str);
task_arp_set_local_ip(tbase, local_ip);
@@ -1833,7 +1934,7 @@ static void handle_lat_stats(unsigned lcore_id, unsigned task_id, struct input *
if (input->reply) {
char buf[128];
snprintf(buf, sizeof(buf),
- "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%u,%u\n",
+ "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%u,%u,%"PRIu64",%"PRIu64",%"PRIu64"\n",
lat_min_usec,
lat_max_usec,
lat_avg_usec,
@@ -1842,18 +1943,24 @@ static void handle_lat_stats(unsigned lcore_id, unsigned task_id, struct input *
last_tsc,
rte_get_tsc_hz(),
lcore_id,
- task_id);
+ task_id,
+ stats->mis_ordered,
+ stats->extent,
+ stats->duplicate);
input->reply(input, buf, strlen(buf));
}
else {
- plog_info("core: %u, task: %u, min: %"PRIu64", max: %"PRIu64", avg: %"PRIu64", min since reset: %"PRIu64", max since reset: %"PRIu64"\n",
- lcore_id,
- task_id,
- lat_min_usec,
- lat_max_usec,
- lat_avg_usec,
- tot_lat_min_usec,
- tot_lat_max_usec);
+ plog_info("core: %u, task: %u, min: %"PRIu64", max: %"PRIu64", avg: %"PRIu64", min since reset: %"PRIu64", max since reset: %"PRIu64", mis_ordered: %"PRIu64", extent: %"PRIu64", duplicates: %"PRIu64"\n",
+ lcore_id,
+ task_id,
+ lat_min_usec,
+ lat_max_usec,
+ lat_avg_usec,
+ tot_lat_min_usec,
+ tot_lat_max_usec,
+ stats->mis_ordered,
+ stats->extent,
+ stats->duplicate);
}
}
@@ -2110,6 +2217,29 @@ static int parse_cmd_join_igmp(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_send_unsollicited_na(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+
+ if (!task_is_sub_mode(lcore_id, task_id, "ndp")) {
+ plog_err("Core %u task %u is not running ndp\n", lcore_id, task_id);
+ }
+ else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ send_unsollicited_neighbour_advertisement(tbase);
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_rx_tx_info(const char *str, struct input *input)
{
if (strcmp(str, "") != 0) {
@@ -2189,8 +2319,10 @@ static struct cmd_str cmd_strings[] = {
{"speed_byte", "<core_id> <task_id> <speed>", "Change speed to <speed>. The speed is specified in units of bytes per second.", parse_cmd_speed_byte},
{"set value", "<core_id> <task_id> <offset> <value> <value_len>", "Set <value_len> bytes to <value> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_value},
{"set random", "<core_id> <task_id> <offset> <random_str> <value_len>", "Set <value_len> bytes to <rand_str> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_random},
+ {"set range", "<core_id> <task_id> <offset> <range_start> <range_end>", "Set bytes from <range_start> to <range_end> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_range},
{"reset values all", "", "Undo all \"set value\" commands on all cores/tasks", parse_cmd_reset_values_all},
{"reset randoms all", "", "Undo all \"set random\" commands on all cores/tasks", parse_cmd_reset_randoms_all},
+ {"reset ranges all", "", "Undo all \"set range\" commands on all cores/tasks", parse_cmd_reset_ranges_all},
{"reset values", "<core id> <task id>", "Undo all \"set value\" commands on specified core/task", parse_cmd_reset_values},
{"arp add", "<core id> <task id> <port id> <gre id> <svlan> <cvlan> <ip addr> <mac addr> <user>", "Add a single ARP entry into a CPE table on <core id>/<task id>.", parse_cmd_arp_add},
@@ -2240,10 +2372,16 @@ static struct cmd_str cmd_strings[] = {
{"cgnat dump private hash", "<core id> <task id>", "Dump cgnat private hash table", parse_cmd_cgnat_private_hash},
{"delay_us", "<core_id> <task_id> <delay_us>", "Set the delay in usec for the impair mode to <delay_us>", parse_cmd_delay_us},
{"random delay_us", "<core_id> <task_id> <random delay_us>", "Set the delay in usec for the impair mode to <random delay_us>", parse_cmd_random_delay_us},
- {"probability", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_probability},
+ {"probability", "<core_id> <task_id> <probability>", "Old - Use <proba no drop> instead. Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop}, // old - backward compatibility
+ {"proba no drop", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop},
+ {"proba delay", "<core_id> <task_id> <probability>", "Set the percent of delayed packets for the impair mode", parse_cmd_set_proba_delay},
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ {"proba duplicate", "<core_id> <task_id> <probability>", "Set the percent of duplicate packets for the impair mode", parse_cmd_set_proba_duplicate},
+#endif
{"version", "", "Show version", parse_cmd_version},
{"join igmp", "<core_id> <task_id> <ip>", "Send igmp membership report for group <ip>", parse_cmd_join_igmp},
{"leave igmp", "<core_id> <task_id>", "Send igmp leave group", parse_cmd_leave_igmp},
+ {"send unsollicited na", "<core_id> <task_id>", "Send Unsollicited Neighbor Advertisement", parse_cmd_send_unsollicited_na},
{0,0,0,0},
};
diff --git a/VNFs/DPPD-PROX/commands.c b/VNFs/DPPD-PROX/commands.c
index 32b974cb..a8953a68 100644
--- a/VNFs/DPPD-PROX/commands.c
+++ b/VNFs/DPPD-PROX/commands.c
@@ -113,7 +113,7 @@ static inline int wait_command_handled(struct lcore_cfg *lconf)
static inline void start_l3(struct task_args *targ)
{
if (!task_is_master(targ)) {
- if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
+ if ((targ->nb_txports != 0)) {
if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP))
task_start_l3(targ->tbase, targ);
}
@@ -895,7 +895,7 @@ void cmd_portinfo(int port_id, char *dst, size_t max_len)
dst += snprintf(dst, end - dst,
"%2d:%10s; "MAC_BYTES_FMT"; %s\n",
port_id,
- port_cfg->name,
+ port_cfg->names[0],
MAC_BYTES(port_cfg->eth_addr.addr_bytes),
port_cfg->pci_addr);
}
@@ -909,7 +909,7 @@ void cmd_portinfo(int port_id, char *dst, size_t max_len)
struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
- dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
+ dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->names[0]);
dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
@@ -1004,7 +1004,7 @@ void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
}
plog_info("setting vlan offload to %d\n", val);
- if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
+ if (val & ~(RTE_ETH_VLAN_STRIP_OFFLOAD | RTE_ETH_VLAN_FILTER_OFFLOAD | RTE_ETH_VLAN_EXTEND_OFFLOAD)) {
plog_info("wrong vlan offload value\n");
}
int ret = rte_eth_dev_set_vlan_offload(port_id, val);
diff --git a/VNFs/DPPD-PROX/defaults.c b/VNFs/DPPD-PROX/defaults.c
index ac611d0c..f5624b97 100644
--- a/VNFs/DPPD-PROX/defaults.c
+++ b/VNFs/DPPD-PROX/defaults.c
@@ -50,7 +50,11 @@
static const struct rte_eth_conf default_port_conf = {
.rxmode = {
.mq_mode = 0,
- .max_rx_pkt_len = PROX_MTU + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+ .max_rx_pkt_len = PROX_MTU + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN,
+#else
+ .mtu = PROX_MTU,
+#endif
},
.rx_adv_conf = {
.rss_conf = {
@@ -76,6 +80,16 @@ static struct rte_eth_txconf default_tx_conf = {
.tx_rs_thresh = 32, /* Use PMD default values */
};
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+static struct rte_sched_subport_profile_params subport_profile_params_default = {
+ .tb_rate = TEN_GIGABIT / NB_PIPES,
+ .tb_size = 4000000,
+
+ .tc_rate = {TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES},
+ .tc_period = 40,
+};
+#endif
+
static struct rte_sched_port_params port_params_default = {
.name = "port_0",
.socket = 0,
@@ -83,6 +97,9 @@ static struct rte_sched_port_params port_params_default = {
.rate = 0,
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ .subport_profiles = &subport_profile_params_default,
+#endif
.n_pipes_per_subport = NB_PIPES,
#if RTE_VERSION < RTE_VERSION_NUM(19,11,0,0)
.qsize = {QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES},
@@ -106,11 +123,13 @@ static struct rte_sched_pipe_params pipe_params_default = {
};
static struct rte_sched_subport_params subport_params_default = {
+#if RTE_VERSION < RTE_VERSION_NUM(20,11,0,0)
.tb_rate = TEN_GIGABIT,
.tb_size = 4000000,
.tc_rate = {TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT},
.tc_period = 40, /* default was 10 */
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#endif
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
.qsize = {QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES},
.pipe_profiles = NULL,
.n_pipe_profiles = 1 /* only one profile */
@@ -154,7 +173,7 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in
targ->qos_conf.port_params = port_params_default;
targ->qos_conf.pipe_params[0] = pipe_params_default;
targ->qos_conf.subport_params[0] = subport_params_default;
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
targ->qos_conf.subport_params[0].pipe_profiles = targ->qos_conf.pipe_params;
#else
targ->qos_conf.port_params.pipe_profiles = targ->qos_conf.pipe_params;
@@ -187,6 +206,8 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in
targ->runtime_flags |= TASK_TX_CRC;
targ->accuracy_limit_nsec = 5000;
+ targ->probability_delay = 1000000;
+ targ->probability_no_drop = 1000000;
}
}
}
@@ -205,12 +226,13 @@ void set_port_defaults(void)
prox_port_cfg[i].tx_ring[0] = '\0';
prox_port_cfg[i].mtu = PROX_MTU;
prox_port_cfg[i].dpdk_mapping = NO_VDEV_PORT;
+ prox_port_cfg[i].v6_mask_length = 8;
// CRC_STRIP becoming the default behavior in DPDK 18.08, and
// DEV_RX_OFFLOAD_CRC_STRIP define has been deleted
-#if defined (DEV_RX_OFFLOAD_CRC_STRIP)
- prox_port_cfg[i].requested_rx_offload = DEV_RX_OFFLOAD_CRC_STRIP;
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ prox_port_cfg[i].requested_rx_offload = RTE_ETH_RX_OFFLOAD_CRC_STRIP;
#endif
- prox_port_cfg[i].requested_tx_offload = DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM;
+ prox_port_cfg[i].requested_tx_offload = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
}
}
diff --git a/VNFs/DPPD-PROX/display_latency.c b/VNFs/DPPD-PROX/display_latency.c
index 04382e46..f43dd69d 100644
--- a/VNFs/DPPD-PROX/display_latency.c
+++ b/VNFs/DPPD-PROX/display_latency.c
@@ -26,6 +26,9 @@ static struct display_column *stddev_col;
static struct display_column *accuracy_limit_col;
static struct display_column *used_col;
static struct display_column *lost_col;
+static struct display_column *mis_ordered_col;
+static struct display_column *extent_col;
+static struct display_column *duplicate_col;
static struct display_page display_page_latency;
static void display_latency_draw_frame(struct screen_state *screen_state)
@@ -68,12 +71,18 @@ static void display_latency_draw_frame(struct screen_state *screen_state)
used_col = display_table_add_col(acc);
display_column_init(used_col, "Used Packets (%)", 16);
accuracy_limit_col = display_table_add_col(acc);
- display_column_init(accuracy_limit_col, "limit (us)", 16);
+ display_column_init(accuracy_limit_col, "limit (us)", 12);
display_table_init(other, "Other");
lost_col = display_table_add_col(other);
- display_column_init(lost_col, "Lost Packets", 16);
+ display_column_init(lost_col, "Lost", 12);
+ mis_ordered_col = display_table_add_col(other);
+ display_column_init(mis_ordered_col, "mis-ordered", 12);
+ extent_col = display_table_add_col(other);
+ display_column_init(extent_col, "extent", 12);
+ duplicate_col = display_table_add_col(other);
+ display_column_init(duplicate_col, "duplicate", 12);
display_page_draw_frame(&display_page_latency, n_latency);
@@ -117,8 +126,11 @@ static void display_stats_latency_entry(int row, struct stats_latency *stats_lat
}
display_column_print(accuracy_limit_col, row, "%s", print_time_unit_usec(dst, &accuracy_limit));
- display_column_print(lost_col, row, "%16"PRIu64"", stats_latency->lost_packets);
+ display_column_print(lost_col, row, "%12"PRIu64"", stats_latency->lost_packets);
display_column_print(used_col, row, "%3u.%06u", used / AFTER_POINT, used % AFTER_POINT);
+ display_column_print(mis_ordered_col, row, "%12"PRIu64"", stats_latency->mis_ordered);
+ display_column_print(extent_col, row, "%12"PRIu64"", stats_latency->extent);
+ display_column_print(duplicate_col, row, "%12"PRIu64"", stats_latency->duplicate);
}
static void display_latency_draw_stats(struct screen_state *screen_state)
diff --git a/VNFs/DPPD-PROX/display_pkt_len.c b/VNFs/DPPD-PROX/display_pkt_len.c
index df34616a..83fbc655 100644
--- a/VNFs/DPPD-PROX/display_pkt_len.c
+++ b/VNFs/DPPD-PROX/display_pkt_len.c
@@ -81,7 +81,7 @@ static void display_pkt_len_draw_frame(struct screen_state *screen_state)
const uint32_t port_id = port_disp[i];
display_column_print(port_col, i, "%4u", port_id);
- display_column_print(name_col, i, "%8s", prox_port_cfg[port_id].name);
+ display_column_print(name_col, i, "%8s", prox_port_cfg[port_id].names[0]);
display_column_print(type_col, i, "%7s", prox_port_cfg[port_id].short_name);
}
}
diff --git a/VNFs/DPPD-PROX/display_ports.c b/VNFs/DPPD-PROX/display_ports.c
index 79a5a2d7..d2140f1e 100644
--- a/VNFs/DPPD-PROX/display_ports.c
+++ b/VNFs/DPPD-PROX/display_ports.c
@@ -116,7 +116,7 @@ static void display_ports_draw_frame(struct screen_state *state)
const uint32_t port_id = port_disp[i];
display_column_print(nb_col, i, "%u", port_id);
- display_column_print(name_col, i, "%s", prox_port_cfg[port_id].name);
+ display_column_print(name_col, i, "%s", prox_port_cfg[port_id].names[0]);
display_column_print(type_col, i, "%s", prox_port_cfg[port_id].short_name);
}
}
@@ -180,8 +180,8 @@ static void display_ports_draw_per_sec_stats(void)
struct percent rx_percent;
struct percent tx_percent;
if (strcmp(prox_port_cfg[port_id].short_name, "i40e_vf") == 0) {
-#if defined (DEV_RX_OFFLOAD_CRC_STRIP)
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
@@ -189,7 +189,7 @@ static void display_ports_draw_per_sec_stats(void)
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
}
} else {
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
@@ -198,8 +198,8 @@ static void display_ports_draw_per_sec_stats(void)
}
}
#else
-#if defined DEV_RX_OFFLOAD_KEEP_CRC
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_KEEP_CRC ) {
+#if defined RTE_ETH_RX_OFFLOAD_KEEP_CRC
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC ) {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
@@ -207,7 +207,7 @@ static void display_ports_draw_per_sec_stats(void)
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
}
} else {
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_KEEP_CRC ) {
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC ) {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
@@ -216,7 +216,7 @@ static void display_ports_draw_per_sec_stats(void)
}
}
#else
-#error neither DEV_RX_OFFLOAD_CRC_STRIP or DEV_RX_OFFLOAD_KEEP_CRC is defined
+#error neither RTE_ETH_RX_OFFLOAD_CRC_STRIP or RTE_ETH_RX_OFFLOAD_KEEP_CRC is defined
#endif
#endif
diff --git a/VNFs/DPPD-PROX/display_rings.c b/VNFs/DPPD-PROX/display_rings.c
index 618350e2..b3154237 100644
--- a/VNFs/DPPD-PROX/display_rings.c
+++ b/VNFs/DPPD-PROX/display_rings.c
@@ -68,7 +68,7 @@ static void display_rings_draw_frame(struct screen_state *state)
int offset = 0;
for (uint32_t j = 0; j < rs->nb_ports; j++)
- offset += sprintf(name + offset, "%s", rs->port[j]->name);
+ offset += sprintf(name + offset, "%s", rs->port[j]->names[0]);
}
sc_val = (rs->ring->flags & RING_F_SC_DEQ) ? 'y' : 'n';
diff --git a/VNFs/DPPD-PROX/eld.h b/VNFs/DPPD-PROX/eld.h
index b5de59d7..d3ec2f22 100644
--- a/VNFs/DPPD-PROX/eld.h
+++ b/VNFs/DPPD-PROX/eld.h
@@ -17,7 +17,7 @@
#ifndef _ELD_H_
#define _ELD_H_
-#define PACKET_QUEUE_BITS 14
+#define PACKET_QUEUE_BITS 20
#define PACKET_QUEUE_SIZE (1 << PACKET_QUEUE_BITS)
#define PACKET_QUEUE_MASK (PACKET_QUEUE_SIZE - 1)
@@ -76,7 +76,10 @@ static uint32_t early_loss_detect_add(struct early_loss_detect *eld, uint32_t pa
old_queue_id = eld->entries[queue_pos];
eld->entries[queue_pos] = packet_index >> PACKET_QUEUE_BITS;
- return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+ if (eld->entries[queue_pos] != old_queue_id)
+ return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+ else
+ return 0;
}
#endif /* _ELD_H_ */
diff --git a/VNFs/DPPD-PROX/git_version.c.in b/VNFs/DPPD-PROX/git_version.c.in
new file mode 100644
index 00000000..d151b589
--- /dev/null
+++ b/VNFs/DPPD-PROX/git_version.c.in
@@ -0,0 +1 @@
+const char *git_version="@GIT_VERSION@";
diff --git a/VNFs/DPPD-PROX/handle_cgnat.c b/VNFs/DPPD-PROX/handle_cgnat.c
index f516921c..9ce63b20 100644
--- a/VNFs/DPPD-PROX/handle_cgnat.c
+++ b/VNFs/DPPD-PROX/handle_cgnat.c
@@ -114,7 +114,7 @@ struct pkt_eth_ipv4 {
prox_rte_ether_hdr ether_hdr;
prox_rte_ipv4_hdr ipv4_hdr;
prox_rte_udp_hdr udp_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
void task_cgnat_dump_public_hash(struct task_nat *task)
{
@@ -148,7 +148,7 @@ static uint8_t route_ipv4(struct task_nat *task, struct rte_mbuf *mbuf)
break;
default:
/* Routing for other protocols is not implemented */
- plogx_info("Routing nit implemented for this protocol\n");
+ plogx_info("Routing not implemented for this protocol\n");
return OUT_DISCARD;
}
@@ -286,7 +286,7 @@ static int add_new_port_entry(struct task_nat *task, uint8_t proto, int public_i
task->private_flow_entries[ret].ip_addr = ip;
task->private_flow_entries[ret].l4_port = *port;
task->private_flow_entries[ret].flow_time = tsc;
- task->private_flow_entries[ret].private_ip_idx = private_ip_idx;
+ task->private_flow_entries[ret].private_ip_idx = private_ip_idx;
public_key.ip_addr = ip;
public_key.l4_port = *port;
@@ -303,15 +303,15 @@ static int add_new_port_entry(struct task_nat *task, uint8_t proto, int public_i
task->public_entries[ret].ip_addr = private_src_ip;
task->public_entries[ret].l4_port = private_udp_port;
task->public_entries[ret].dpdk_port = mbuf->port;
- task->public_entries[ret].private_ip_idx = private_ip_idx;
+ task->public_entries[ret].private_ip_idx = private_ip_idx;
return ret;
}
static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
- struct task_nat *task = (struct task_nat *)tbase;
- uint8_t out[MAX_PKT_BURST];
- uint16_t j;
+ struct task_nat *task = (struct task_nat *)tbase;
+ uint8_t out[MAX_PKT_BURST] = {0};
+ uint16_t j;
uint32_t *ip_addr, public_ip, private_ip;
uint16_t *udp_src_port, port, private_port, public_port;
struct pkt_eth_ipv4 *pkt[MAX_PKT_BURST];
@@ -322,6 +322,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
void *keys[MAX_PKT_BURST];
int32_t positions[MAX_PKT_BURST];
int map[MAX_PKT_BURST] = {0};
+ struct public_key null_key ={0};
if (unlikely(task->dump_public_hash)) {
const struct public_key *next_key;
@@ -348,24 +349,24 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task->dump_private_hash = 0;
}
- for (j = 0; j < n_pkts; ++j) {
- PREFETCH0(mbufs[j]);
+ for (j = 0; j < n_pkts; ++j) {
+ PREFETCH0(mbufs[j]);
}
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
pkt[j] = rte_pktmbuf_mtod(mbufs[j], struct pkt_eth_ipv4 *);
- PREFETCH0(pkt[j]);
+ PREFETCH0(pkt[j]);
}
if (task->private) {
- struct private_key key[MAX_PKT_BURST];
- for (j = 0; j < n_pkts; ++j) {
+ struct private_key key[MAX_PKT_BURST];
+ for (j = 0; j < n_pkts; ++j) {
/* Currently, only support eth/ipv4 packets */
if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) {
plogx_info("Currently, only support eth/ipv4 packets\n");
out[j] = OUT_DISCARD;
- keys[j] = (void *)NULL;
+ keys[j] = (void *)&null_key;
continue;
}
- key[j].ip_addr = pkt[j]->ipv4_hdr.src_addr;
+ key[j].ip_addr = pkt[j]->ipv4_hdr.src_addr;
key[j].l4_port = pkt[j]->udp_hdr.src_port;
keys[j] = &key[j];
}
@@ -375,27 +376,29 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
return -1;
}
int n_new_mapping = 0;
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
port_idx = positions[j];
- if (unlikely(port_idx < 0)) {
- plogx_dbg("ip %d.%d.%d.%d / port %x not found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
- map[n_new_mapping] = j;
- keys[n_new_mapping++] = (void *)&(pkt[j]->ipv4_hdr.src_addr);
- } else {
- ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
- udp_src_port = &(pkt[j]->udp_hdr.src_port);
- plogx_dbg("ip/port %d.%d.%d.%d / %x found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
- *ip_addr = task->private_flow_entries[port_idx].ip_addr;
- *udp_src_port = task->private_flow_entries[port_idx].l4_port;
- uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
- if (flow_time + tsc_hz < tsc) {
- task->private_flow_entries[port_idx].flow_time = tsc;
+ if (out[j] != OUT_DISCARD) {
+ if (unlikely(port_idx < 0)) {
+ plogx_dbg("ip %d.%d.%d.%d / port %x not found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
+ map[n_new_mapping] = j;
+ keys[n_new_mapping++] = (void *)&(pkt[j]->ipv4_hdr.src_addr);
+ } else {
+ ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
+ udp_src_port = &(pkt[j]->udp_hdr.src_port);
+ plogx_dbg("ip/port %d.%d.%d.%d / %x found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
+ *ip_addr = task->private_flow_entries[port_idx].ip_addr;
+ *udp_src_port = task->private_flow_entries[port_idx].l4_port;
+ uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
+ if (flow_time + tsc_hz < tsc) {
+ task->private_flow_entries[port_idx].flow_time = tsc;
+ }
+ private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx;
+ if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc)
+ task->private_ip_info[private_ip_idx].mac_aging_time = tsc;
+ prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
+ out[j] = route_ipv4(task, mbufs[j]);
}
- private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx;
- if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc)
- task->private_ip_info[private_ip_idx].mac_aging_time = tsc;
- prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
- out[j] = route_ipv4(task, mbufs[j]);
}
}
@@ -410,7 +413,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
}
n_new_mapping = 0;
}
- for (int k = 0; k < n_new_mapping; ++k) {
+ for (int k = 0; k < n_new_mapping; ++k) {
private_ip_idx = positions[k];
j = map[k];
ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
@@ -476,10 +479,10 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
private_port = *udp_src_port;
plogx_info("Added new ip/port: private ip/port = %d.%d.%d.%d/%x public ip/port = %d.%d.%d.%d/%x, index = %d\n", IP4(private_ip), private_port, IP4(public_ip), public_port, port_idx);
}
- // task->private_flow_entries[port_idx].ip_addr = task->private_ip_info[private_ip_idx].public_ip;
+ // task->private_flow_entries[port_idx].ip_addr = task->private_ip_info[private_ip_idx].public_ip;
plogx_info("Added new port: private ip/port = %d.%d.%d.%d/%x, public ip/port = %d.%d.%d.%d/%x\n", IP4(private_ip), private_port, IP4(task->private_ip_info[private_ip_idx].public_ip), public_port);
- *ip_addr = public_ip ;
- *udp_src_port = public_port;
+ *ip_addr = public_ip ;
+ *udp_src_port = public_port;
uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
if (flow_time + tsc_hz < tsc) {
task->private_flow_entries[port_idx].flow_time = tsc;
@@ -495,18 +498,18 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
}
}
}
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
} else {
struct public_key public_key[MAX_PKT_BURST];
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
/* Currently, only support eth/ipv4 packets */
if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) {
plogx_info("Currently, only support eth/ipv4 packets\n");
out[j] = OUT_DISCARD;
- keys[j] = (void *)NULL;
+ keys[j] = (void *)&null_key;
continue;
}
- public_key[j].ip_addr = pkt[j]->ipv4_hdr.dst_addr;
+ public_key[j].ip_addr = pkt[j]->ipv4_hdr.dst_addr;
public_key[j].l4_port = pkt[j]->udp_hdr.dst_port;
keys[j] = &public_key[j];
}
@@ -515,26 +518,28 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
plogx_err("Failed lookup bulk public_ip_port_hash\n");
return -1;
}
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
port_idx = positions[j];
- ip_addr = &(pkt[j]->ipv4_hdr.dst_addr);
+ ip_addr = &(pkt[j]->ipv4_hdr.dst_addr);
udp_src_port = &(pkt[j]->udp_hdr.dst_port);
- if (port_idx < 0) {
- plogx_err("Failed to find ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
- out[j] = OUT_DISCARD;
- } else {
- plogx_dbg("Found ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
- *ip_addr = task->public_entries[port_idx].ip_addr;
- *udp_src_port = task->public_entries[port_idx].l4_port;
- private_ip_idx = task->public_entries[port_idx].private_ip_idx;
- plogx_dbg("Found private IP info for ip %d.%d.%d.%d\n", IP4(*ip_addr));
- rte_memcpy(((uint8_t *)(pkt[j])) + 0, &task->private_ip_info[private_ip_idx].private_mac, 6);
- rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6);
- out[j] = task->public_entries[port_idx].dpdk_port;
+ if (out[j] != OUT_DISCARD) {
+ if (port_idx < 0) {
+ plogx_err("Failed to find ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
+ out[j] = OUT_DISCARD;
+ } else {
+ plogx_dbg("Found ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
+ *ip_addr = task->public_entries[port_idx].ip_addr;
+ *udp_src_port = task->public_entries[port_idx].l4_port;
+ private_ip_idx = task->public_entries[port_idx].private_ip_idx;
+ plogx_dbg("Found private IP info for ip %d.%d.%d.%d\n", IP4(*ip_addr));
+ rte_memcpy(((uint8_t *)(pkt[j])) + 0, &task->private_ip_info[private_ip_idx].private_mac, 6);
+ rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6);
+ out[j] = task->public_entries[port_idx].dpdk_port;
+ }
}
prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
}
}
@@ -562,9 +567,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
}
struct tmp_public_ip {
- uint32_t ip_beg;
+ uint32_t ip_beg;
uint32_t ip_end;
- uint16_t port_beg;
+ uint16_t port_beg;
uint16_t port_end;
};
struct tmp_static_ip {
@@ -595,10 +600,10 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
plogx_info("No dynamic table found\n");
} else {
uint64_t n_ip, n_port;
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
- }
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
+ }
lua_len(L, -1);
n_public_groups = lua_tointeger(L, -1);
plogx_info("%d groups of public IP\n", n_public_groups);
@@ -609,9 +614,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "public_ip_range_start", &dst_ip1) ||
- lua_to_ip(L, TABLE, "public_ip_range_stop", &dst_ip2) ||
- lua_to_val_range(L, TABLE, "public_port", &dst_port))
- return -1;
+ lua_to_ip(L, TABLE, "public_ip_range_stop", &dst_ip2) ||
+ lua_to_val_range(L, TABLE, "public_port", &dst_port))
+ return -1;
PROX_PANIC(dst_ip2 < dst_ip1, "public_ip_range error: %d.%d.%d.%d < %d.%d.%d.%d\n", (dst_ip2 >> 24), (dst_ip2 >> 16) & 0xFF, (dst_ip2 >> 8) & 0xFF, dst_ip2 & 0xFF, dst_ip1 >> 24, (dst_ip1 >> 16) & 0xFF, (dst_ip1 >> 8) & 0xFF, dst_ip1 & 0xFF);
PROX_PANIC(dst_port.end < dst_port.beg, "public_port error: %d < %d\n", dst_port.end, dst_port.beg);
n_ip = dst_ip2 - dst_ip1 + 1;
@@ -632,9 +637,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
if ((pop2 = lua_getfrom(L, TABLE, "static_ip")) < 0) {
plogx_info("No static ip table found\n");
} else {
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
}
lua_len(L, -1);
@@ -646,7 +651,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
lua_pushnil(L);
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "src_ip", &ip_from) ||
- lua_to_ip(L, TABLE, "dst_ip", &ip_to))
+ lua_to_ip(L, TABLE, "dst_ip", &ip_to))
return -1;
ip_from = rte_bswap32(ip_from);
ip_to = rte_bswap32(ip_to);
@@ -667,9 +672,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
if ((pop2 = lua_getfrom(L, TABLE, "static_ip_port")) < 0) {
plogx_info("No static table found\n");
} else {
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
}
lua_len(L, -1);
@@ -682,10 +687,10 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "src_ip", &ip_from) ||
- lua_to_ip(L, TABLE, "dst_ip", &ip_to) ||
- lua_to_port(L, TABLE, "src_port", &port_from) ||
- lua_to_port(L, TABLE, "dst_port", &port_to))
- return -1;
+ lua_to_ip(L, TABLE, "dst_ip", &ip_to) ||
+ lua_to_port(L, TABLE, "src_port", &port_from) ||
+ lua_to_port(L, TABLE, "dst_port", &port_to))
+ return -1;
ip_from = rte_bswap32(ip_from);
ip_to = rte_bswap32(ip_to);
@@ -740,7 +745,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
ip_info = &tmp_public_ip_config_info[ip_free_count];
ip_info->public_ip = rte_bswap32(ip);
ip_info->port_list = (uint16_t *)prox_zmalloc((dst_port.end - dst_port.beg) * sizeof(uint16_t), socket);
- PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", ip);
+ PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", ip);
for (uint32_t port = tmp_public_ip[i].port_beg; port <= tmp_public_ip[i].port_end; port++) {
ip_info->port_list[ip_info->port_free_count] = rte_bswap16(port);
ip_info->port_free_count++;
@@ -763,7 +768,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
ip_info = &tmp_public_ip_config_info[ip_free_count];
ip_info->public_ip = tmp_static_ip_port[i].public_ip;
ip_info->port_list = (uint16_t *)prox_zmalloc(tmp_static_ip_port[i].n_ports * sizeof(uint16_t), socket);
- PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", tmp_static_ip_port[i].public_ip);
+ PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", tmp_static_ip_port[i].public_ip);
ip_info->port_list[ip_info->port_free_count] = tmp_static_ip_port[i].public_port;
ip_info->port_free_count++;
ip_info->max_port_count = ip_info->port_free_count;
@@ -792,6 +797,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
.key_len = sizeof(struct private_key),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket,
};
plogx_info("hash table name = %s\n", hash_params.name);
struct private_key private_key;
@@ -959,7 +965,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
diff --git a/VNFs/DPPD-PROX/handle_dump.c b/VNFs/DPPD-PROX/handle_dump.c
index 29a46fef..8fbc514c 100644
--- a/VNFs/DPPD-PROX/handle_dump.c
+++ b/VNFs/DPPD-PROX/handle_dump.c
@@ -42,7 +42,12 @@ static uint16_t buffer_packets(struct task_dump *task, struct rte_mbuf **mbufs,
return 0;
for (j = 0; j < n_pkts && task->n_mbufs < task->n_pkts; ++j) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ uint64_t rdtsc = rte_rdtsc();
+ memcpy(&mbufs[j]->dynfield1[0], &rdtsc, sizeof(rdtsc));
+#else
mbufs[j]->udata64 = rte_rdtsc();
+#endif
task->mbufs[task->n_mbufs++] = mbufs[j];
}
@@ -93,10 +98,20 @@ static void stop(struct task_base *tbase)
pcap_dump_handle = pcap_dump_open(handle, task->pcap_file);
if (task->n_mbufs) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ memcpy(&beg, &task->mbufs[0]->dynfield1[0], sizeof(beg));
+#else
beg = task->mbufs[0]->udata64;
+#endif
}
for (uint32_t j = 0; j < task->n_mbufs; ++j) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ uint64_t mbufs_beg;
+ memcpy(&mbufs_beg, &task->mbufs[j]->dynfield1[0], sizeof(mbufs_beg));
+ tsc = mbufs_beg - beg;
+#else
tsc = task->mbufs[j]->udata64 - beg;
+#endif
header.len = rte_pktmbuf_pkt_len(task->mbufs[j]);
header.caplen = header.len;
tsc_to_tv(&header.ts, tsc);
diff --git a/VNFs/DPPD-PROX/handle_esp.c b/VNFs/DPPD-PROX/handle_esp.c
index 447fcfa2..a78130bf 100644
--- a/VNFs/DPPD-PROX/handle_esp.c
+++ b/VNFs/DPPD-PROX/handle_esp.c
@@ -34,7 +34,6 @@
#include "defines.h"
#include <rte_ip.h>
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
#include <rte_bus_vdev.h>
#include "prox_port_cfg.h"
#include "prox_compat.h"
@@ -55,9 +54,9 @@ typedef unsigned char u8;
#define MAX_SESSIONS 1024
#define POOL_CACHE_SIZE 128
-#define NUM_OPS 256
-
-struct task_esp_enc {
+//#define NUM_OPS 256
+#define NUM_OPS 128
+struct task_esp {
struct task_base base;
uint8_t cdev_id;
uint16_t qp_id;
@@ -69,19 +68,12 @@ struct task_esp_enc {
struct rte_mempool *session_pool;
struct rte_cryptodev_sym_session *sess;
struct rte_crypto_op *ops_burst[NUM_OPS];
-};
-
-struct task_esp_dec {
- struct task_base base;
- uint8_t cdev_id;
- uint16_t qp_id;
- uint32_t local_ipv4;
- prox_rte_ether_addr local_mac;
- prox_rte_ether_addr dst_mac;
- struct rte_mempool *crypto_op_pool;
- struct rte_mempool *session_pool;
- struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_op *ops_burst[NUM_OPS];
+ unsigned len; //number of ops ready to be enqueued
+ uint32_t pkts_in_flight; // difference between enqueued and dequeued
+ uint8_t (*handle_esp_finish)(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status);
+ uint8_t (*handle_esp_ah)(struct task_esp *task, struct rte_mbuf *mbuf,
+ struct rte_crypto_op *cop);
};
static uint8_t hmac_sha1_key[] = {
@@ -117,253 +109,115 @@ static void printf_cdev_info(uint8_t cdev_id)
}
}
-#if 0
static uint8_t get_cdev_id(void)
{
- //crypto devices must be configured in the config file
- //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
-
- static uint8_t cdev_id=0;
- PROX_PANIC(cdev_id+1 > rte_cryptodev_count(), "not enough crypto devices\n");
- //eal=-b 0000:00:03.0 --vdev crypto_aesni_mb0 --vdev crypto_aesni_mb1
- return cdev_id++;
-}
-#else
-static uint8_t get_cdev_id(void)
-{
- static uint8_t cdev_id=0;
+ static uint8_t last_unused_cdev_id=0;
char name[64]={0};
-
- sprintf(name, "crypto_aesni_mb%d", cdev_id);
-
- int cdev_id1 = rte_cryptodev_get_dev_id(name);
- if (cdev_id1 >= 0){
- plog_info("crypto dev %d preconfigured\n", cdev_id1);
- ++cdev_id;
- return cdev_id1;
+ uint8_t cdev_count, cdev_id;
+
+ cdev_count = rte_cryptodev_count();
+ plog_info("crypto dev count: %d \n", cdev_count);
+ for (cdev_id = last_unused_cdev_id; cdev_id < cdev_count; cdev_id++) {
+ if (cdev_id != 1) {
+ printf_cdev_info(cdev_id);
+ last_unused_cdev_id = cdev_id + 1;
+ return cdev_id;
+ }
}
+ sprintf(name, "crypto_aesni_mb%d", cdev_count);
+
#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
#else
int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
#endif
PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
+ cdev_id = rte_cryptodev_get_dev_id(name);
- return cdev_id++;
+ printf_cdev_info(cdev_id);
+ last_unused_cdev_id = cdev_id + 1;
+ return cdev_id;
}
-#endif
-static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
+static inline uint8_t handle_enc_finish(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status)
{
- struct task_esp_enc *task = (struct task_esp_enc *)tbase;
-
- tbase->flags |= FLAG_NEVER_FLUSH;
-
- uint8_t lcore_id = targ->lconf->id;
- char name[64];
- sprintf(name, "core_%03u_crypto_pool", lcore_id);
- task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- 8192, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
- PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC CRYPTO_OP_POOL\n");
-
- task->cdev_id = get_cdev_id();
-
- struct rte_cryptodev_config cdev_conf;
- cdev_conf.nb_queue_pairs = 2;
- //cdev_conf.socket_id = SOCKET_ID_ANY;
- cdev_conf.socket_id = rte_socket_id();
- rte_cryptodev_configure(task->cdev_id, &cdev_conf);
-
- unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
- plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
- sprintf(name, "core_%03u_session_pool", lcore_id);
- task->session_pool = rte_mempool_create(name,
- MAX_SESSIONS,
- session_size,
- POOL_CACHE_SIZE,
- 0, NULL, NULL, NULL,
- NULL, rte_socket_id(),
- 0);
- PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
-
- task->qp_id=0;
- plog_info("enc: task->qp_id=%u\n", task->qp_id);
- struct prox_rte_cryptodev_qp_conf qp_conf;
- qp_conf.nb_descriptors = 128;
- qp_conf.mp_session = task->session_pool;
- prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
-
- int ret = rte_cryptodev_start(task->cdev_id);
- PROX_PANIC(ret < 0, "Failed to start device\n");
-
- struct rte_cryptodev *dev;
- dev = rte_cryptodev_pmd_get_dev(task->cdev_id);
- PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
-
- //Setup Cipher Parameters
- struct rte_crypto_sym_xform cipher_xform = {0};
- struct rte_crypto_sym_xform auth_xform = {0};
-
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.next = &auth_xform;
-
- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- cipher_xform.cipher.key.data = aes_cbc_key;
- cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- //Setup HMAC Parameters
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.next = NULL;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
- auth_xform.auth.key.data = hmac_sha1_key;
- auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- auth_xform.auth.iv.offset = 0;
- auth_xform.auth.iv.length = 0;
-
- task->sess = rte_cryptodev_sym_session_create(task->session_pool);
- PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
-
- ret = rte_cryptodev_sym_session_init(task->cdev_id, task->sess, &cipher_xform, task->session_pool);
- PROX_PANIC(ret < 0, "Failed sym_session_init\n");
-
- //TODO: doublecheck task->ops_burst lifecycle!
- if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- task->ops_burst, NUM_OPS) != NUM_OPS) {
- PROX_PANIC(1, "Failed to allocate ENC crypto operations\n");
- }
-
- task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
- task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
- //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
- struct prox_port_cfg *port = find_reachable_port(targ);
- memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
-
- if (targ->flags & TASK_ARG_DST_MAC_SET){
- memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
- plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
- //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
- //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
- }
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ pip4->dst_addr = task->remote_ipv4;
+ pip4->src_addr = task->local_ipv4;
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
+ return 0;
}
-static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
+static inline uint8_t handle_dec_finish(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status)
{
- struct task_esp_dec *task = (struct task_esp_dec *)tbase;
-
- tbase->flags |= FLAG_NEVER_FLUSH;
-
- uint8_t lcore_id = targ->lconf->id;
- char name[64];
- sprintf(name, "core_%03u_crypto_pool", lcore_id);
- task->crypto_op_pool = rte_crypto_op_pool_create(name, RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- 8192, 128, MAXIMUM_IV_LENGTH, rte_socket_id());
- PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC CRYPTO_OP_POOL\n");
-
- task->cdev_id = get_cdev_id();
- struct rte_cryptodev_config cdev_conf;
- cdev_conf.nb_queue_pairs = 2;
- cdev_conf.socket_id = SOCKET_ID_ANY;
- cdev_conf.socket_id = rte_socket_id();
- rte_cryptodev_configure(task->cdev_id, &cdev_conf);
-
- unsigned int session_size = rte_cryptodev_sym_get_private_session_size(task->cdev_id);
- plog_info("rte_cryptodev_sym_get_private_session_size=%d\n", session_size);
- sprintf(name, "core_%03u_session_pool", lcore_id);
- task->session_pool = rte_mempool_create(name,
- MAX_SESSIONS,
- session_size,
- POOL_CACHE_SIZE,
- 0, NULL, NULL, NULL,
- NULL, rte_socket_id(),
- 0);
- PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
-
- task->qp_id=0;
- plog_info("dec: task->qp_id=%u\n", task->qp_id);
- struct prox_rte_cryptodev_qp_conf qp_conf;
- qp_conf.nb_descriptors = 128;
- qp_conf.mp_session = task->session_pool;
- prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id, &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
-
- int ret = rte_cryptodev_start(task->cdev_id);
- PROX_PANIC(ret < 0, "Failed to start device\n");
-
- struct rte_cryptodev *dev;
- dev = rte_cryptodev_pmd_get_dev(task->cdev_id);
- PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
-
- //Setup Cipher Parameters
- struct rte_crypto_sym_xform cipher_xform = {0};
- struct rte_crypto_sym_xform auth_xform = {0};
-
- cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- cipher_xform.next = NULL;
- cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- cipher_xform.cipher.key.data = aes_cbc_key;
- cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- cipher_xform.cipher.iv.offset = IV_OFFSET;
- cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- //Setup HMAC Parameters
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.next = &cipher_xform;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
- auth_xform.auth.key.data = hmac_sha1_key;
- auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- auth_xform.auth.iv.offset = 0;
- auth_xform.auth.iv.length = 0;
-
- task->sess = rte_cryptodev_sym_session_create(task->session_pool);
- PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
-
- ret = rte_cryptodev_sym_session_init(task->cdev_id, task->sess, &cipher_xform, task->session_pool);
- PROX_PANIC(ret < 0, "Failed sym_session_init\n");
-
- //TODO: doublecheck task->ops_burst lifecycle!
- if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- task->ops_burst, NUM_OPS) != NUM_OPS) {
- PROX_PANIC(1, "Failed to allocate DEC crypto operations\n");
- }
-
- task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
- //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
- struct prox_port_cfg *port = find_reachable_port(targ);
- memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
+ if (likely(status == RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+ u8* m = rte_pktmbuf_mtod(mbuf, u8*);
+ rte_memcpy(m + sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC, m,
+ sizeof(prox_rte_ether_hdr));
+ m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m +
+ sizeof(prox_rte_ether_hdr));
+
+ if (unlikely((pip4->version_ihl >> 4) != 4)) {
+ // plog_info("non IPv4 packet after esp dec %i\n",
+ // pip4->version_ihl);
+ // plogdx_info(mbuf, "DEC TX: ");
+ return OUT_DISCARD;
+ }
+ if (pip4->time_to_live) {
+ pip4->time_to_live--;
+ }
+ else {
+ plog_info("TTL = 0 => Dropping\n");
+ return OUT_DISCARD;
+ }
+ uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
+ int len = rte_pktmbuf_pkt_len(mbuf);
+ rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) -
+ ipv4_length);
- if (targ->flags & TASK_ARG_DST_MAC_SET){
- memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
- plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes));
- //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
- //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
+#if 0
+ do_ipv4_swap(task, mbuf);
+#else
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
+ prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
+ //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
+#endif
+ pip4->dst_addr = task->remote_ipv4;
+ pip4->src_addr = task->local_ipv4;
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
+ return 0;
+ }
+ else {
+ return OUT_DISCARD;
}
-
}
-static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
+static inline uint8_t handle_esp_ah_enc(struct task_esp *task,
+ struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
{
u8 *data;
- prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
struct rte_crypto_sym_op *sym_cop = cop->sym;
if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("Received non IPv4 packet at esp enc %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "ENC RX: ");
+ plog_info("Received non IPv4 packet at esp enc %i\n",
+ pip4->version_ihl);
return OUT_DISCARD;
}
if (pip4->time_to_live) {
@@ -389,7 +243,8 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb
encrypt_len += padding;
}
- const int extra_space = sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
+ const int extra_space = sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
prox_rte_ether_addr src_mac = peth->s_addr;
prox_rte_ether_addr dst_mac = peth->d_addr;
@@ -399,7 +254,8 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb
uint8_t version_ihl = pip4->version_ihl;
peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
- peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 +
+ padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
l1 = rte_pktmbuf_pkt_len(mbuf);
peth->ether_type = ETYPE_IPv4;
@@ -419,11 +275,15 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb
pip4->time_to_live = ttl;
pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
pip4->version_ihl = version_ihl; // 20 bytes, ipv4
- pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
+ pip4->total_length = rte_cpu_to_be_16(ipv4_length +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)
+ + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 +
+ DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
pip4->packet_id = 0x0101;
pip4->type_of_service = 0;
pip4->time_to_live = 64;
- prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
data = (u8*)(pip4 + 1);
#if 0
@@ -434,17 +294,20 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb
pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
static u32 sn = 0;
pesp->seq = ++sn;
- pesp->spi=0xAAAAAAAA;//debug
- pesp->seq =0xBBBBBBBB;//debug
+// pesp->spi=0xAAAAAAAA;//debug
+// pesp->seq =0xBBBBBBBB;//debug
#endif
u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
//padl += CIPHER_IV_LENGTH_AES_CBC;
*padl = padding;
*(padl + 1) = 4; // ipv4 in 4
- sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;
+ sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC +
+ encrypt_len;
//sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
+ + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
//sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
//sym_cop->cipher.iv.data = data + 8;
@@ -465,25 +328,31 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb
#else
//uint64_t *iv = (uint64_t *)(pesp + 1);
//memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
- sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
+ sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC;
#endif
- sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
- sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
+ sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr);
+ sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
sym_cop->m_src = mbuf;
rte_crypto_op_attach_sym_session(cop, task->sess);
+
//cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
//cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return 0;
}
-static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
+static inline uint8_t handle_esp_ah_dec(struct task_esp *task,
+ struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
{
struct rte_crypto_sym_op *sym_cop = cop->sym;
- prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
u8 *data = (u8*)(pip4 + 1);
@@ -496,9 +365,12 @@ static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mb
rte_crypto_op_attach_sym_session(cop, task->sess);
- sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
+ sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 +
+ ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
//sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr));
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
+ + sizeof(struct prox_esp_hdr));
//sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
//sym_cop->cipher.iv.data = (uint8_t *)data + 8;
@@ -516,19 +388,25 @@ static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mb
CIPHER_IV_LENGTH_AES_CBC);
#endif
- sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
- sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
+ sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr);
+ sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 -
+ CIPHER_IV_LENGTH_AES_CBC;
- sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
- sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
+ sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC;
+ sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) -
+ CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
sym_cop->m_src = mbuf;
return 0;
}
-static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf)
+static inline void do_ipv4_swap(struct task_esp *task, struct rte_mbuf *mbuf)
{
- prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
prox_rte_ether_addr src_mac = peth->s_addr;
prox_rte_ether_addr dst_mac = peth->d_addr;
uint32_t src_ip, dst_ip;
@@ -544,162 +422,312 @@ static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf
prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
}
-static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf)
+
+static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
{
- prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr), ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
- + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(prox_rte_ipv4_hdr));// next hdr, padding
- prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ struct task_esp *task = (struct task_esp *)tbase;
+ unsigned int session_size;
- if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "DEC TX: ");
- return OUT_DISCARD;
- }
- if (pip4->time_to_live) {
- pip4->time_to_live--;
- }
- else {
- plog_info("TTL = 0 => Dropping\n");
- return OUT_DISCARD;
- }
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr),
- ((u8*)peth) + sizeof(prox_rte_ether_hdr) +
- + 2 * sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(prox_rte_ipv4_hdr));
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
- int len = rte_pktmbuf_pkt_len(mbuf);
- rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
- peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ uint8_t lcore_id = targ->lconf->id;
+ char name[64];
+ task->handle_esp_finish = handle_enc_finish;
+ task->handle_esp_ah = handle_esp_ah_enc;
+ task->len = 0;
+ task->pkts_in_flight = 0;
+ sprintf(name, "core_%03u_crypto_pool", lcore_id);
+ task->crypto_op_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
+ MAXIMUM_IV_LENGTH, rte_socket_id());
+ plog_info("rte_crypto_op_pool_create nb_elements =%d\n",
+ targ->nb_mbuf);
+ PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC \
+ CRYPTO_OP_POOL\n");
-#if 0
- do_ipv4_swap(task, mbuf);
-#else
- prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
- prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
- //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
-#endif
- prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
+ task->cdev_id = get_cdev_id();
- return 0;
-}
+ struct rte_cryptodev_config cdev_conf;
+ cdev_conf.nb_queue_pairs = 2;
+ cdev_conf.socket_id = rte_socket_id();
+ rte_cryptodev_configure(task->cdev_id, &cdev_conf);
-static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struct rte_mbuf *mbuf)
-{
- u8* m = rte_pktmbuf_mtod(mbuf, u8*);
- rte_memcpy(m+sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC,
- m, sizeof(prox_rte_ether_hdr));
- m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC);
- prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m+sizeof(prox_rte_ether_hdr));
+ session_size = rte_cryptodev_sym_get_private_session_size(
+ task->cdev_id);
+ plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
+ session_size);
+ sprintf(name, "core_%03u_session_pool", lcore_id);
+ task->session_pool = rte_cryptodev_sym_session_pool_create(name,
+ MAX_SESSIONS,
+ session_size,
+ POOL_CACHE_SIZE,
+ 0, rte_socket_id());
+ PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
- if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "DEC TX: ");
- return OUT_DISCARD;
- }
- if (pip4->time_to_live) {
- pip4->time_to_live--;
- }
- else {
- plog_info("TTL = 0 => Dropping\n");
- return OUT_DISCARD;
- }
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- int len = rte_pktmbuf_pkt_len(mbuf);
- rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length);
+ task->qp_id=0;
+ plog_info("enc: task->qp_id=%u\n", task->qp_id);
+ struct prox_rte_cryptodev_qp_conf qp_conf;
+ qp_conf.nb_descriptors = 2048;
+ qp_conf.mp_session = task->session_pool;
+ prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
+ &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
-#if 0
- do_ipv4_swap(task, mbuf);
-#else
- prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
- prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
- //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
-#endif
+ int ret = rte_cryptodev_start(task->cdev_id);
+ PROX_PANIC(ret < 0, "Failed to start device\n");
- prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1);
- return 0;
-}
+ //Setup Cipher Parameters
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
-static int handle_esp_enc_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
-{
- struct task_esp_enc *task = (struct task_esp_enc *)tbase;
- uint8_t out[MAX_PKT_BURST];
- uint16_t i = 0, nb_rx = 0, nb_enc=0, j = 0;
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+// cipher_xform.next = &auth_xform;
+ cipher_xform.next = NULL; //CRYPTO_ONLY
- for (uint16_t j = 0; j < n_pkts; ++j) {
- out[j] = handle_esp_ah_enc(task, mbufs[j], task->ops_burst[nb_enc]);
- if (out[j] != OUT_DISCARD)
- ++nb_enc;
- }
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ cipher_xform.cipher.key.data = aes_cbc_key;
+ cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
- if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_enc) != nb_enc) {
- plog_info("Error enc enqueue_burst\n");
- return -1;
- }
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ //Setup HMAC Parameters
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
+ auth_xform.auth.key.data = hmac_sha1_key;
+ auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
+
+ auth_xform.auth.iv.offset = 0;
+ auth_xform.auth.iv.length = 0;
- do {
- nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id, task->ops_burst+i, nb_enc-i);
- i += nb_rx;
- } while (i < nb_enc);
+ task->sess = rte_cryptodev_sym_session_create(task->cdev_id,
+ &cipher_xform, task->session_pool);
+ PROX_PANIC(task->sess < 0, "Failed ENC sym_session_create\n");
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
+ task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
+ //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
+
+ if (targ->flags & TASK_ARG_DST_MAC_SET){
+ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
+ plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
+ MAC_BYTES(task->dst_mac.addr_bytes));
+ //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
+ //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
+ }
}
-static int handle_esp_dec_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
+static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
{
- struct task_esp_dec *task = (struct task_esp_dec *)tbase;
- uint8_t out[MAX_PKT_BURST];
- uint16_t j, nb_dec=0, nb_rx=0;
+ struct task_esp *task = (struct task_esp *)tbase;
+ unsigned int session_size;
- for (j = 0; j < n_pkts; ++j) {
- out[j] = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[nb_dec]);
- if (out[j] != OUT_DISCARD)
- ++nb_dec;
- }
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
+
+ uint8_t lcore_id = targ->lconf->id;
+ char name[64];
+ task->handle_esp_finish = handle_dec_finish;
+ task->handle_esp_ah = handle_esp_ah_dec;
+ task->len = 0;
+ task->pkts_in_flight = 0;
+ sprintf(name, "core_%03u_crypto_pool", lcore_id);
+ task->crypto_op_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
+ MAXIMUM_IV_LENGTH, rte_socket_id());
+ PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC \
+ CRYPTO_OP_POOL\n");
+
+ task->cdev_id = get_cdev_id();
+ struct rte_cryptodev_config cdev_conf;
+ cdev_conf.nb_queue_pairs = 2;
+ cdev_conf.socket_id = SOCKET_ID_ANY;
+ cdev_conf.socket_id = rte_socket_id();
+ rte_cryptodev_configure(task->cdev_id, &cdev_conf);
+
+ session_size = rte_cryptodev_sym_get_private_session_size(
+ task->cdev_id);
+ plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
+ session_size);
+ sprintf(name, "core_%03u_session_pool", lcore_id);
+ task->session_pool = rte_cryptodev_sym_session_pool_create(name,
+ MAX_SESSIONS,
+ session_size,
+ POOL_CACHE_SIZE,
+ 0, rte_socket_id());
+ PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
+
+ task->qp_id=0;
+ plog_info("dec: task->qp_id=%u\n", task->qp_id);
+ struct prox_rte_cryptodev_qp_conf qp_conf;
+ qp_conf.nb_descriptors = 2048;
+ qp_conf.mp_session = task->session_pool;
+ prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
+ &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
+
+ int ret = rte_cryptodev_start(task->cdev_id);
+ PROX_PANIC(ret < 0, "Failed to start device\n");
+
+ //Setup Cipher Parameters
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
- if (rte_cryptodev_enqueue_burst(task->cdev_id, task->qp_id, task->ops_burst, nb_dec) != nb_dec) {
- plog_info("Error dec enqueue_burst\n");
- return -1;
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ cipher_xform.cipher.key.data = aes_cbc_key;
+ cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ //Setup HMAC Parameters
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = &cipher_xform;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
+ auth_xform.auth.key.data = hmac_sha1_key;
+ auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
+
+ auth_xform.auth.iv.offset = 0;
+ auth_xform.auth.iv.length = 0;
+
+ task->sess = rte_cryptodev_sym_session_create(task->cdev_id, &cipher_xform,
+ task->session_pool);
+ PROX_PANIC(task->sess < 0, "Failed DEC sym_session_create\n");
+
+ task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
+ task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
+ //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
+
+ if (targ->flags & TASK_ARG_DST_MAC_SET){
+ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
+ plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
+ MAC_BYTES(task->dst_mac.addr_bytes));
+ //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
+ //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
}
+}
- j=0;
- do {
- nb_rx = rte_cryptodev_dequeue_burst(task->cdev_id, task->qp_id,
- task->ops_burst+j, nb_dec-j);
- j += nb_rx;
- } while (j < nb_dec);
-
- for (j = 0; j < nb_dec; ++j) {
- if (task->ops_burst[j]->status != RTE_CRYPTO_OP_STATUS_SUCCESS){
- plog_info("err: task->ops_burst[%d].status=%d\n", j, task->ops_burst[j]->status);
- //!!!TODO!!! find mbuf and discard it!!!
- //for now just send it further
- //plogdx_info(mbufs[j], "RX: ");
- }
- if (task->ops_burst[j]->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
- struct rte_mbuf *mbuf = task->ops_burst[j]->sym->m_src;
- handle_esp_ah_dec_finish2(task, mbuf);//TODO set out[j] properly
+static int crypto_send_burst(struct task_esp *task, uint16_t n)
+{
+ uint8_t out[MAX_PKT_BURST];
+ struct rte_mbuf *mbufs[MAX_PKT_BURST];
+ unsigned ret;
+ unsigned i = 0;
+ ret = rte_cryptodev_enqueue_burst(task->cdev_id,
+ task->qp_id, task->ops_burst, n);
+ task->pkts_in_flight += ret;
+ if (unlikely(ret < n)) {
+ for (i = 0; i < (n-ret); i++) {
+ mbufs[i] = task->ops_burst[ret + i]->sym->m_src;
+ out[i] = OUT_DISCARD;
+ rte_crypto_op_free(task->ops_burst[ret + i]);
+ }
+ return task->base.tx_pkt(&task->base, mbufs, i, out);
+ }
+ return 0;
+}
+
+static int handle_esp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
+ uint16_t n_pkts)
+{
+ struct task_esp *task = (struct task_esp *)tbase;
+ uint8_t out[MAX_PKT_BURST];
+ uint8_t result = 0;
+ uint16_t nb_deq = 0, j, idx = 0;
+ struct rte_mbuf *drop_mbufs[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+ int nbr_tx_pkt = 0;
+
+ if (likely(n_pkts != 0)) {
+ if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, n_pkts) != n_pkts) {
+ plog_info("Failed to allocate crypto operations, discarding \
+ %d packets\n", n_pkts);
+ for (j = 0; j < n_pkts; j++) {
+ out[j] = OUT_DISCARD;
+ }
+ nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, n_pkts,
+ out);
+ }
+ else {
+ for (j = 0; j < n_pkts; j++) {
+ result = task->handle_esp_ah(task, mbufs[j],
+ ops_burst[j]);
+ if (result == 0) {
+ task->ops_burst[task->len] = ops_burst[j];
+ task->len++;
+ /* enough ops to be sent */
+ if (task->len == MAX_PKT_BURST) {
+ nbr_tx_pkt += crypto_send_burst(task,
+ (uint16_t) MAX_PKT_BURST);
+ task->len = 0;
+ }
+ }
+ else {
+ drop_mbufs[idx] = mbufs[j];
+ out[idx] = result;
+ idx++;
+ rte_crypto_op_free(ops_burst[j]);
+ plog_info("Failed handle_esp_ah for 1 \
+ packet\n");
+ }
+ }
+ if (idx) nbr_tx_pkt += task->base.tx_pkt(&task->base,
+ drop_mbufs, idx, out);
}
+ } else if (task->len) {
+ // No packets where received on the rx queue, but this handle
+ // function was called anyway since some packets where not yet
+ // enqueued. Hence they get enqueued here in order to minimize
+ // latency or in case no new packets will arrive
+ nbr_tx_pkt += crypto_send_burst(task, task->len);
+ task->len = 0;
}
-
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ if (task->pkts_in_flight) {
+ do {
+ nb_deq = rte_cryptodev_dequeue_burst(task->cdev_id,
+ task->qp_id, ops_burst, MAX_PKT_BURST);
+ task->pkts_in_flight -= nb_deq;
+ for (j = 0; j < nb_deq; j++) {
+ mbufs[j] = ops_burst[j]->sym->m_src;
+ out[j] = task->handle_esp_finish(task, mbufs[j],
+ ops_burst[j]->status);
+ rte_crypto_op_free(ops_burst[j]);
+ }
+ nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, nb_deq,
+ out);
+ } while (nb_deq == MAX_PKT_BURST);
+ }
+ return nbr_tx_pkt;
}
struct task_init task_init_esp_enc = {
.mode = ESP_ENC,
.mode_str = "esp_enc",
.init = init_task_esp_enc,
- .handle = handle_esp_enc_bulk,
- .size = sizeof(struct task_esp_enc),
+ .handle = handle_esp_bulk,
+ .flag_features = TASK_FEATURE_ZERO_RX,
+ .size = sizeof(struct task_esp),
};
struct task_init task_init_esp_dec = {
- .mode = ESP_ENC,
+ .mode = ESP_DEC,
.mode_str = "esp_dec",
.init = init_task_esp_dec,
- .handle = handle_esp_dec_bulk,
- .size = sizeof(struct task_esp_dec),
+ .handle = handle_esp_bulk,
+ .flag_features = TASK_FEATURE_ZERO_RX,
+ .size = sizeof(struct task_esp),
};
__attribute__((constructor)) static void reg_task_esp_enc(void)
diff --git a/VNFs/DPPD-PROX/handle_fm.c b/VNFs/DPPD-PROX/handle_fm.c
index 470082b0..75d0cee1 100644
--- a/VNFs/DPPD-PROX/handle_fm.c
+++ b/VNFs/DPPD-PROX/handle_fm.c
@@ -57,7 +57,7 @@ struct eth_ip4_udp {
prox_rte_udp_hdr udp;
prox_rte_tcp_hdr tcp;
} l4;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
union pkt_type {
struct {
diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c
index adcabd79..2c8a65c7 100644
--- a/VNFs/DPPD-PROX/handle_gen.c
+++ b/VNFs/DPPD-PROX/handle_gen.c
@@ -13,6 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
*/
+
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <rte_mbuf.h>
#include <pcap.h>
#include <string.h>
@@ -21,6 +27,7 @@
#include <rte_version.h>
#include <rte_byteorder.h>
#include <rte_ether.h>
+#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_malloc.h>
@@ -49,6 +56,7 @@
#include "handle_master.h"
#include "defines.h"
#include "prox_ipv6.h"
+#include "handle_lb_5tuple.h"
struct pkt_template {
uint16_t len;
@@ -57,6 +65,13 @@ struct pkt_template {
uint8_t *buf;
};
+#define MAX_STORE_PKT_SIZE 2048
+
+struct packet {
+ unsigned int len;
+ unsigned char buf[MAX_STORE_PKT_SIZE];
+};
+
#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
#define DO_PANIC 1
@@ -65,6 +80,8 @@ struct pkt_template {
#define FROM_PCAP 1
#define NOT_FROM_PCAP 0
+#define MAX_RANGES 64
+
#define TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC 1
static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt)
@@ -90,6 +107,10 @@ struct task_gen_pcap {
uint32_t socket_id;
};
+struct flows {
+ uint32_t packet_id;
+};
+
struct task_gen {
struct task_base base;
uint64_t hz;
@@ -110,10 +131,13 @@ struct task_gen {
uint16_t packet_id_pos;
uint16_t accur_pos;
uint16_t sig_pos;
+ uint16_t flow_id_pos;
+ uint16_t packet_id_in_flow_pos;
uint32_t sig;
uint32_t socket_id;
uint8_t generator_id;
uint8_t n_rands; /* number of randoms */
+ uint8_t n_ranges; /* number of ranges */
uint8_t min_bulk_size;
uint8_t max_bulk_size;
uint8_t lat_enabled;
@@ -125,6 +149,7 @@ struct task_gen {
uint16_t rand_offset; /* each random has an offset*/
uint8_t rand_len; /* # bytes to take from random (no bias introduced) */
} rand[64];
+ struct range ranges[MAX_RANGES];
uint64_t accur[ACCURACY_WINDOW];
uint64_t pkt_tsc_offset[64];
struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */
@@ -136,6 +161,12 @@ struct task_gen {
uint32_t imix_pkt_sizes[MAX_IMIX_PKTS];
uint32_t imix_nb_pkts;
uint32_t new_imix_nb_pkts;
+ uint32_t store_pkt_id;
+ uint32_t store_msk;
+ struct packet *store_buf;
+ FILE *fp;
+ struct rte_hash *flow_id_table;
+ struct flows*flows;
} __rte_cache_aligned;
static void task_gen_set_pkt_templates_len(struct task_gen *task, uint32_t *pkt_sizes);
@@ -379,6 +410,146 @@ static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pk
task_gen_apply_random_fields(task, pkt_hdr[i]);
}
+static void task_gen_apply_ranges(struct task_gen *task, uint8_t *pkt_hdr)
+{
+ uint32_t ret;
+ if (!task->n_ranges)
+ return;
+
+ for (uint16_t j = 0; j < task->n_ranges; ++j) {
+ if (unlikely(task->ranges[j].value == task->ranges[j].max))
+ task->ranges[j].value = task->ranges[j].min;
+ else
+ task->ranges[j].value++;
+ ret = rte_bswap32(task->ranges[j].value);
+ uint8_t *pret = (uint8_t*)&ret;
+ rte_memcpy(pkt_hdr + task->ranges[j].offset, pret + 4 - task->ranges[j].range_len, task->ranges[j].range_len);
+ }
+}
+
+static void task_gen_apply_all_ranges(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
+{
+ uint32_t ret;
+ if (!task->n_ranges)
+ return;
+
+ for (uint16_t i = 0; i < count; ++i) {
+ task_gen_apply_ranges(task, pkt_hdr[i]);
+ }
+}
+
+static inline uint32_t gcd(uint32_t a, uint32_t b)
+{
+ // Euclidean algorithm
+ uint32_t t;
+ while (b != 0) {
+ t = b;
+ b = a % b;
+ a = t;
+ }
+ return a;
+}
+
+static inline uint32_t lcm(uint32_t a, uint32_t b)
+{
+ return ((a / gcd(a, b)) * b);
+}
+
+static uint32_t get_n_range_flows(struct task_gen *task)
+{
+ uint32_t t = 1;
+ for (int i = 0; i < task->n_ranges; i++) {
+ t = lcm((task->ranges[i].max - task->ranges[i].min) + 1, t);
+ }
+ return t;
+}
+
+static uint32_t get_n_rand_flows(struct task_gen *task)
+{
+ uint32_t t = 0;
+ for (int i = 0; i < task->n_rands; i++) {
+ t += __builtin_popcount(task->rand[i].rand_mask);
+ }
+ PROX_PANIC(t > 31, "Too many random bits - maximum 31 supported\n");
+ return 1 << t;
+}
+
+//void add_to_hash_table(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val, uint32_t fixed_bits, uint32_t rand_offset) {
+// uint32_t ret_tmp = val | fixed_bits;
+// ret_tmp = rte_bswap32(ret_tmp);
+// uint8_t *pret_tmp = (uint8_t*)&ret_tmp;
+// rte_memcpy(buf + rand_offset, pret_tmp + 4 - rand_len, rand_len);
+//
+// init idx
+// alloc buffer
+// init/alloc hash_table
+//void build_buffer(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val)
+//{
+// if (mask == 0) {
+// buffer[*idx] = val;
+// *idx = (*idx) + 1;
+// return;
+// }
+// build_buffer(task, but, mask >> 1, bit_pos + 1, val);
+// if (mask & 1) {
+// build_buffer(task, but, mask >> 1, bit_pos + 1, val | (1 << bit_pos));
+//}
+
+static void build_flow_table(struct task_gen *task)
+{
+ uint8_t buf[2048], *key_fields;
+ union ipv4_5tuple_host key;
+ struct pkt_template *pkt_template;
+ uint32_t n_range_flows = get_n_range_flows(task);
+ // uint32_t n_rand_flows = get_n_rand_flows(task);
+ // uint32_t n_flows= n_range_flows * n_rand_flows * task->orig_n_pkts;
+ // for (int i = 0; i < task->n_rands; i++) {
+ // build_buffer(task, task->values_buf[i], &task->values_idx[i], task->rand[i].rand_mask, 0, 0);
+ // }
+
+ uint32_t n_flows = n_range_flows * task->orig_n_pkts;
+
+ for (uint32_t k = 0; k < task->orig_n_pkts; k++) {
+ memcpy(buf, task->pkt_template[k].buf, task->pkt_template[k].len);
+ for (uint32_t j = 0; j < n_range_flows; j++) {
+ task_gen_apply_ranges(task, buf);
+ key_fields = buf + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live);
+ key.xmm = _mm_loadu_si128((__m128i*)(key_fields));
+ key.pad0 = key.pad1 = 0;
+ int idx = rte_hash_add_key(task->flow_id_table, (const void *)&key);
+ PROX_PANIC(idx < 0, "Unable to add key in table\n");
+ if (idx >= 0)
+ plog_dbg("Added key %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst);
+ }
+ }
+}
+
+static int32_t task_gen_get_flow_id(struct task_gen *task, uint8_t *pkt_hdr)
+{
+ int ret = 0;
+ union ipv4_5tuple_host key;
+ uint8_t *hdr = pkt_hdr + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live);
+ // __m128i data = _mm_loadu_si128((__m128i*)(hdr));
+ // key.xmm = _mm_and_si128(data, mask0);
+ key.xmm = _mm_loadu_si128((__m128i*)(hdr));
+ key.pad0 = key.pad1 = 0;
+ ret = rte_hash_lookup(task->flow_id_table, (const void *)&key);
+ if (ret < 0) {
+ plog_err("Flow not found: %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst);
+ }
+ return ret;
+}
+
+static void task_gen_apply_all_flow_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *flow_id)
+{
+ if (task->flow_id_pos) {
+ for (uint16_t j = 0; j < count; ++j) {
+ flow_id[j] = task_gen_get_flow_id(task, pkt_hdr[j]);
+ *(int32_t *)(pkt_hdr[j] + task->flow_id_pos) = flow_id[j];
+ }
+ }
+}
+
static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
{
*(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
@@ -390,7 +561,7 @@ static void task_gen_apply_sig(struct task_gen *task, struct pkt_template *dst)
*(uint32_t *)(dst->buf + task->sig_pos) = task->sig;
}
-static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
+static void task_gen_apply_all_accur_pos(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
{
if (!task->accur_pos)
return;
@@ -411,7 +582,7 @@ static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, co
*dst = *id;
}
-static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
+static void task_gen_apply_all_unique_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
{
if (!task->packet_id_pos)
return;
@@ -423,6 +594,26 @@ static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf
}
}
+static void task_gen_apply_id_in_flows(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id)
+{
+ struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_in_flow_pos);
+ *dst = *id;
+}
+
+static void task_gen_apply_all_id_in_flows(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *idx)
+{
+ if (!task->packet_id_in_flow_pos)
+ return;
+
+ for (uint16_t i = 0; i < count; ++i) {
+ struct unique_id id;
+ if (idx[i] >= 0 ) {
+ unique_id_init(&id, task->generator_id, task->flows[idx[i]].packet_id++);
+ task_gen_apply_id_in_flows(task, pkt_hdr[i], &id);
+ }
+ }
+}
+
static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
{
if (!(task->runtime_flags & TASK_TX_CRC))
@@ -712,7 +903,7 @@ static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int
return 0;
}
-static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t n_orig_pkts, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes)
+static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t orig_n_pkts, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes)
{
size_t k;
uint32_t l4_len;
@@ -720,8 +911,8 @@ static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t n_orig_
struct pkt_template *template;
for (size_t j = 0; j < nb_pkt_sizes; ++j) {
- for (size_t i = 0; i < n_orig_pkts; ++i) {
- k = j * n_orig_pkts + i;
+ for (size_t i = 0; i < orig_n_pkts; ++i) {
+ k = j * orig_n_pkts + i;
template = &task->pkt_template[k];
if (template->l2_len == 0)
continue;
@@ -949,17 +1140,31 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
if (new_pkts == NULL)
return 0;
uint8_t *pkt_hdr[MAX_RING_BURST];
-
+ int32_t flow_id[MAX_RING_BURST];
task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk);
task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk);
task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk);
- task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk);
- task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk);
+ task_gen_apply_all_ranges(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_accur_pos(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_flow_id(task, pkt_hdr, send_bulk, flow_id);
+ task_gen_apply_all_unique_id(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_id_in_flows(task, pkt_hdr, send_bulk, flow_id);
uint64_t tsc_before_tx;
tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk);
task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
+ if (task->store_msk) {
+ for (uint32_t i = 0; i < send_bulk; i++) {
+ if (out[i] != OUT_DISCARD) {
+ uint8_t *hdr;
+ hdr = (uint8_t *)rte_pktmbuf_mtod(new_pkts[i], prox_rte_ether_hdr *);
+ memcpy(&task->store_buf[task->store_pkt_id & task->store_msk].buf, hdr, rte_pktmbuf_pkt_len(new_pkts[i]));
+ task->store_buf[task->store_pkt_id & task->store_msk].len = rte_pktmbuf_pkt_len(new_pkts[i]);
+ task->store_pkt_id++;
+ }
+ }
+ }
ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
@@ -1184,7 +1389,10 @@ static void task_gen_reset_pkt_templates_content(struct task_gen *task)
static void task_gen_reset_pkt_templates(struct task_gen *task)
{
- task_gen_reset_pkt_templates_len(task);
+ if (task->imix_nb_pkts)
+ task_gen_set_pkt_templates_len(task, task->imix_pkt_sizes);
+ else
+ task_gen_reset_pkt_templates_len(task);
task_gen_reset_pkt_templates_content(task);
task_gen_pkt_template_recalc_all(task);
}
@@ -1254,7 +1462,7 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1
uint32_t mbuf_size = TX_MBUF_SIZE;
if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
- plog_info("\tCreating mempool with name '%s'\n", name);
+ plog_info("\t\tCreating mempool with name '%s'\n", name);
ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
@@ -1262,8 +1470,8 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1
PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
sock_id, targ->nb_mbuf - 1);
- plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
- targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
+ plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
+ targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
return ret;
}
@@ -1328,6 +1536,14 @@ void task_gen_reset_randoms(struct task_base *tbase)
task->n_rands = 0;
}
+void task_gen_reset_ranges(struct task_base *tbase)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+
+ memset(task->ranges, 0, task->n_ranges * sizeof(struct range));
+ task->n_ranges = 0;
+}
+
int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len)
{
struct task_gen *task = (struct task_gen *)tbase;
@@ -1351,6 +1567,11 @@ void task_gen_reset_values(struct task_base *tbase)
struct task_gen *task = (struct task_gen *)tbase;
task_gen_reset_pkt_templates_content(task);
+ task_gen_pkt_template_recalc_metadata(task);
+ check_all_pkt_size(task, DO_NOT_PANIC);
+ check_all_fields_in_bounds(task, DO_NOT_PANIC);
+ task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes);
+
if (task->flags & TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC) {
for (uint32_t i = 0; i < task->n_pkts; ++i) {
rte_memcpy(&task->pkt_template[i].buf[sizeof(prox_rte_ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr));
@@ -1365,6 +1586,13 @@ uint32_t task_gen_get_n_randoms(struct task_base *tbase)
return task->n_rands;
}
+uint32_t task_gen_get_n_ranges(struct task_base *tbase)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+
+ return task->n_ranges;
+}
+
static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ)
{
struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
@@ -1420,6 +1648,26 @@ static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offs
return UINT32_MAX;
}
+int task_gen_add_range(struct task_base *tbase, struct range *range)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+ if (task->n_ranges == MAX_RANGES) {
+ plog_err("Too many ranges\n");
+ return -1;
+ }
+ task->ranges[task->n_ranges].min = range->min;
+ task->ranges[task->n_ranges].value = range->min;
+ uint32_t m = range->max;
+ task->ranges[task->n_ranges].range_len = 0;
+ while (m != 0) {
+ m >>= 8;
+ task->ranges[task->n_ranges].range_len++;
+ }
+ task->ranges[task->n_ranges].offset = range->offset;
+ task->ranges[task->n_ranges++].max = range->max;
+ return 0;
+}
+
int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id)
{
struct task_gen *task = (struct task_gen *)tbase;
@@ -1477,6 +1725,31 @@ static void start(struct task_base *tbase)
*/
}
+static void stop_gen(struct task_base *tbase)
+{
+ uint32_t i, j;
+ struct task_gen *task = (struct task_gen *)tbase;
+ if (task->store_msk) {
+ for (i = task->store_pkt_id & task->store_msk; i < task->store_msk + 1; i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ for (i = 0; i < (task->store_pkt_id & task->store_msk); i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ }
+}
static void start_pcap(struct task_base *tbase)
{
struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
@@ -1508,7 +1781,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
struct prox_port_cfg *port = find_reachable_port(targ);
// TODO: check that all reachable ports have the same mtu...
if (port) {
- task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->cksum_offload = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
task->port = port;
task->max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
} else {
@@ -1522,6 +1795,8 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
task->lat_pos = targ->lat_pos;
task->accur_pos = targ->accur_pos;
task->sig_pos = targ->sig_pos;
+ task->flow_id_pos = targ->flow_id_pos;
+ task->packet_id_in_flow_pos = targ->packet_id_in_flow_pos;
task->sig = targ->sig;
task->new_rate_bps = targ->rate_bps;
@@ -1552,7 +1827,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
task->generator_id = targ->generator_id;
- plog_info("\tGenerator id = %d\n", task->generator_id);
+ plog_info("\t\tGenerator id = %d\n", task->generator_id);
// Allocate array holding bytes to tsc for supported frame sizes
task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), task->socket_id);
@@ -1564,7 +1839,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
uint64_t bytes_per_hz = UINT64_MAX;
if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
bytes_per_hz = task->port->max_link_speed * 125000L;
- plog_info("\tPort %u: max link speed is %ld Mbps\n",
+ plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
(uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
}
// There are cases where hz estimate might be slighly over-estimated
@@ -1582,10 +1857,10 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
task->imix_pkt_sizes[i] = targ->imix_pkt_sizes[i];
}
if (!strcmp(targ->pcap_file, "")) {
- plog_info("\tUsing inline definition of a packet\n");
+ plog_info("\t\tUsing inline definition of a packet\n");
task_init_gen_load_pkt_inline(task, targ);
} else {
- plog_info("Loading from pcap %s\n", targ->pcap_file);
+ plog_info("\t\tLoading from pcap %s\n", targ->pcap_file);
task_init_gen_load_pcap(task, targ);
}
@@ -1601,6 +1876,46 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX),
"Failed to add random\n");
}
+ for (uint32_t i = 0; i < targ->n_ranges; ++i) {
+ PROX_PANIC(task_gen_add_range(tbase, &targ->range[i]), "Failed to add range\n");
+ }
+ if (targ->store_max) {
+ char filename[256];
+ sprintf(filename, "gen_buf_%02d_%02d", targ->lconf->id, targ->task);
+
+ task->store_msk = targ->store_max - 1;
+ task->store_buf = (struct packet *)malloc(sizeof(struct packet) * targ->store_max);
+ task->fp = fopen(filename, "w+");
+ PROX_PANIC(task->fp == NULL, "Unable to open %s\n", filename);
+ } else {
+ task->store_msk = 0;
+ }
+ uint32_t n_entries = get_n_range_flows(task) * task->orig_n_pkts * 4;
+#ifndef RTE_HASH_BUCKET_ENTRIES
+#define RTE_HASH_BUCKET_ENTRIES 8
+#endif
+ // cuckoo hash requires at least RTE_HASH_BUCKET_ENTRIES (8) entries
+ if (n_entries < RTE_HASH_BUCKET_ENTRIES)
+ n_entries = RTE_HASH_BUCKET_ENTRIES;
+
+ static char hash_name[30];
+ sprintf(hash_name, "A%03d_hash_gen_table", targ->lconf->id);
+ struct rte_hash_parameters hash_params = {
+ .name = hash_name,
+ .entries = n_entries,
+ .key_len = sizeof(union ipv4_5tuple_host),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = task->socket_id,
+ };
+ plog_info("\t\thash table name = %s\n", hash_params.name);
+ task->flow_id_table = rte_hash_create(&hash_params);
+ PROX_PANIC(task->flow_id_table == NULL, "Failed to set up flow_id hash table for gen\n");
+ plog_info("\t\tflow_id hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ build_flow_table(task);
+ task->flows = (struct flows *)prox_zmalloc(n_entries * sizeof(struct flows), task->socket_id);
+ PROX_PANIC(task->flows == NULL, "Failed to allocate flows\n");
+ plog_info("\t\t%d flows allocated\n", n_entries);
}
static struct task_init task_init_gen = {
@@ -1616,7 +1931,8 @@ static struct task_init task_init_gen = {
#else
.flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
#endif
- .size = sizeof(struct task_gen)
+ .size = sizeof(struct task_gen),
+ .stop_last = stop_gen
};
static struct task_init task_init_gen_l3 = {
diff --git a/VNFs/DPPD-PROX/handle_gen.h b/VNFs/DPPD-PROX/handle_gen.h
index bb85b0ca..bd8fae7b 100644
--- a/VNFs/DPPD-PROX/handle_gen.h
+++ b/VNFs/DPPD-PROX/handle_gen.h
@@ -17,6 +17,8 @@
#ifndef _HANDLE_GEN_H_
#define _HANDLE_GEN_H_
+#include "task_init.h"
+
struct unique_id {
uint8_t generator_id;
uint32_t packet_id;
@@ -41,11 +43,14 @@ int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size);
int task_gen_set_imix(struct task_base *tbase, uint32_t nb_pkts, uint32_t *pkt_size);
void task_gen_set_rate(struct task_base *tbase, uint64_t bps);
void task_gen_reset_randoms(struct task_base *tbase);
+void task_gen_reset_ranges(struct task_base *tbase);
void task_gen_reset_values(struct task_base *tbase);
int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len);
int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id);
+int task_gen_add_range(struct task_base *tbase, struct range *range);
uint32_t task_gen_get_n_randoms(struct task_base *tbase);
+uint32_t task_gen_get_n_ranges(struct task_base *tbase);
uint32_t task_gen_get_n_values(struct task_base *tbase);
#endif /* _HANDLE_GEN_H_ */
diff --git a/VNFs/DPPD-PROX/handle_impair.c b/VNFs/DPPD-PROX/handle_impair.c
index 3896b70f..a147d44d 100644
--- a/VNFs/DPPD-PROX/handle_impair.c
+++ b/VNFs/DPPD-PROX/handle_impair.c
@@ -55,7 +55,9 @@ struct task_impair {
unsigned queue_head;
unsigned queue_tail;
unsigned queue_mask;
- int tresh;
+ int tresh_no_drop;
+ int tresh_duplicate;
+ int tresh_delay;
unsigned int seed;
struct random state;
uint64_t last_idx;
@@ -72,10 +74,23 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
-void task_impair_set_proba(struct task_base *tbase, float proba)
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba_no_drop)
{
struct task_impair *task = (struct task_impair *)tbase;
- task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * (uint32_t)(proba_no_drop * 10000) / 1000000;
+}
+
+void task_impair_set_proba_delay(struct task_base *tbase, float proba_delay)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * (uint32_t)(proba_delay * 10000) / 1000000;
+ task->flags |= IMPAIR_NEED_UPDATE;
+}
+
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba_dup)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * (uint32_t)(proba_dup * 10000) / 1000000;
}
void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us)
@@ -118,7 +133,7 @@ static void task_impair_update(struct task_base *tbase)
uint16_t idx = 0;
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx++] = task->queue[task->queue_tail].mbuf;
task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
}
@@ -140,7 +155,7 @@ static void task_impair_update(struct task_base *tbase)
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf;
queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
}
@@ -175,10 +190,10 @@ static void task_impair_update(struct task_base *tbase)
}
} else if (task->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, task->socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id);
@@ -204,11 +219,11 @@ static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mb
if (task->flags & IMPAIR_SET_MAC) {
for (uint16_t i = 0; i < n_pkts; ++i) {
prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
- out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
}
} else {
for (uint16_t i = 0; i < n_pkts; ++i) {
- out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
}
}
ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
@@ -268,10 +283,10 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
uint16_t idx = 0;
- if (task->tresh != RAND_MAX) {
+ if (task->tresh_no_drop != RAND_MAX) {
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
PREFETCH0(new_mbufs[idx]);
PREFETCH0(&new_mbufs[idx]->cacheline1);
@@ -346,7 +361,10 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
}
for (i = 0; i < n_pkts; ++i) {
- packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ if (rand_r(&task->seed) <= task->tresh_delay)
+ packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ else
+ packet_time = now;
idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK;
while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
struct queue *queue = &task->buffer[idx];
@@ -366,6 +384,15 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out);
plog_warn("Unexpectdly dropping packets\n");
}
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ if (rand_r(&task->seed) <= task->tresh_duplicate) {
+ mbufs[i] = rte_pktmbuf_copy(mbufs[i], mbufs[i]->pool, 0, UINT32_MAX);
+ if (mbufs[i] == NULL) {
+ plog_err("Failed to duplicate mbuf\n");
+ } else
+ i = i - 1;
+ }
+#endif
}
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
@@ -374,7 +401,7 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf;
PREFETCH0(new_mbufs[pkt_idx]);
PREFETCH0(&new_mbufs[pkt_idx]->cacheline1);
@@ -399,10 +426,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
uint64_t delay_us = 0;
task->seed = rte_rdtsc();
- if (targ->probability == 0)
- targ->probability = 1000000;
- task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * targ->probability_no_drop / 1000000;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * targ->probability_delay / 1000000;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * targ->probability_duplicate / 1000000;
if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) {
tbase->handle_bulk = handle_bulk_random_drop;
@@ -438,10 +465,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
task->queue_tail = 0;
} else if (targ->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id);
diff --git a/VNFs/DPPD-PROX/handle_impair.h b/VNFs/DPPD-PROX/handle_impair.h
index 162213ed..c2d10ab3 100644
--- a/VNFs/DPPD-PROX/handle_impair.h
+++ b/VNFs/DPPD-PROX/handle_impair.h
@@ -18,6 +18,8 @@
#define _HANDLE_IMPAIR_H_
void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us);
-void task_impair_set_proba(struct task_base *tbase, float proba);
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba);
+void task_impair_set_proba_delay(struct task_base *tbase, float proba);
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba);
#endif /* _HANDLE_IMPAIR_H_ */
diff --git a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
index a99a8f96..1c99eb84 100644
--- a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
+++ b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
@@ -167,7 +167,7 @@ static void init_task_ipv6_tun_base(struct task_ipv6_tun_base* tun_base, struct
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- tun_base->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ tun_base->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
diff --git a/VNFs/DPPD-PROX/handle_irq.c b/VNFs/DPPD-PROX/handle_irq.c
index 00c192f6..36aa54e8 100644
--- a/VNFs/DPPD-PROX/handle_irq.c
+++ b/VNFs/DPPD-PROX/handle_irq.c
@@ -26,7 +26,10 @@
#include "input.h"
#define MAX_INTERRUPT_LENGTH 500000 /* Maximum length of an interrupt is (1 / MAX_INTERRUPT_LENGTH) seconds */
+
+uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
uint64_t irq_bucket_maxtime_micro[] = {1,5,10,50,100,500,1000,5000,10000,50000,100000,500000,UINT64_MAX};
+
/*
* This module is not handling any packets.
* It loops on rdtsc() and checks whether it has been interrupted
diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c
index ef4da319..04a4848b 100644
--- a/VNFs/DPPD-PROX/handle_lat.c
+++ b/VNFs/DPPD-PROX/handle_lat.c
@@ -86,6 +86,15 @@ struct rx_pkt_meta_data {
uint32_t bytes_after_in_bulk;
};
+struct loss_buffer {
+ uint32_t packet_id;
+ uint32_t n;
+};
+
+struct flows {
+ uint32_t packet_id;
+};
+
struct task_lat {
struct task_base base;
uint64_t limit;
@@ -111,10 +120,19 @@ struct task_lat {
struct rx_pkt_meta_data *rx_pkt_meta;
// Following fields are only used when starting or stopping, not in general runtime
uint64_t *prev_tx_packet_index;
+ FILE *fp_loss;
FILE *fp_rx;
FILE *fp_tx;
struct prox_port_cfg *port;
uint64_t *bytes_to_tsc;
+ uint64_t *previous_packet;
+ uint32_t loss_buffer_size;
+ struct loss_buffer *loss_buffer;
+ uint32_t loss_id;
+ uint32_t packet_id_in_flow_pos;
+ int32_t flow_id_pos;
+ uint32_t flow_count;
+ struct flows *flows;
};
/* This function calculate the difference between rx and tx_time
* Both values are uint32_t (see handle_lat_bulk)
@@ -399,7 +417,14 @@ static void lat_stop(struct task_base *tbase)
if (task->unique_id_pos) {
task_lat_count_remaining_lost_packets(task);
task_lat_reset_eld(task);
+ memset(task->previous_packet, 0, sizeof(task->previous_packet) * task->generator_count);
+ }
+ if (task->loss_id && task->fp_loss) {
+ for (uint i = 0; i < task->loss_id; i++) {
+ fprintf(task->fp_loss, "packet %d: %d\n", task->loss_buffer[i].packet_id, task->loss_buffer[i].n);
+ }
}
+ task->lat_test->lost_packets = 0;
if (task->latency_buffer)
lat_write_latency_to_file(task);
}
@@ -440,6 +465,17 @@ static uint32_t task_lat_early_loss_detect(struct task_lat *task, uint32_t packe
return early_loss_detect_add(eld, packet_id);
}
+static void lat_test_check_duplicate(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+ struct early_loss_detect *eld = &task->eld[generator_id];
+ uint32_t old_queue_id, queue_pos;
+
+ queue_pos = packet_id & PACKET_QUEUE_MASK;
+ old_queue_id = eld->entries[queue_pos];
+ if ((packet_id >> PACKET_QUEUE_BITS) == old_queue_id)
+ lat_test->duplicate++;
+}
+
static uint64_t tsc_extrapolate_backward(struct task_lat *task, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
{
#ifdef NO_LAT_EXTRAPOLATION
@@ -462,6 +498,24 @@ static void lat_test_histogram_add(struct lat_test *lat_test, uint64_t lat_tsc)
lat_test->buckets[bucket_id]++;
}
+static void lat_test_check_flow_ordering(struct task_lat *task, struct lat_test *lat_test, int32_t flow_id, uint32_t packet_id)
+{
+ if (packet_id < task->flows[flow_id].packet_id) {
+ lat_test->mis_ordered++;
+ lat_test->extent += task->flows[flow_id].packet_id - packet_id;
+ }
+ task->flows[flow_id].packet_id = packet_id;
+}
+
+static void lat_test_check_ordering(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+ if (packet_id < task->previous_packet[generator_id]) {
+ lat_test->mis_ordered++;
+ lat_test->extent += task->previous_packet[generator_id] - packet_id;
+ }
+ task->previous_packet[generator_id] = packet_id;
+}
+
static void lat_test_add_lost(struct lat_test *lat_test, uint64_t lost_packets)
{
lat_test->lost_packets += lost_packets;
@@ -514,6 +568,7 @@ static void task_lat_store_lat(struct task_lat *task, uint64_t rx_packet_index,
static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
struct task_lat *task = (struct task_lat *)tbase;
+ static int max_flows_printed = 0;
int rc;
if (n_pkts == 0) {
@@ -602,6 +657,24 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
uint8_t generator_id;
uint32_t packet_id;
+ int32_t flow_id = -1;
+ if (task->flow_id_pos) {
+ flow_id = *(int32_t *)(hdr + task->flow_id_pos);
+ if (unlikely(flow_id >= (int32_t)(task->flow_count))) {
+ flow_id = -1;
+ if (!max_flows_printed) {
+ plog_info("Too many flows - increase flow count (only printed once)\n");
+ max_flows_printed = 1;
+ }
+ }
+
+ }
+ if (task->packet_id_in_flow_pos && (flow_id != -1)) {
+ uint32_t packet_id_in_flow;
+ struct unique_id *unique_id = (struct unique_id *)(hdr + task->packet_id_in_flow_pos);
+ unique_id_get(unique_id, &generator_id, &packet_id_in_flow);
+ lat_test_check_flow_ordering(task, task->lat_test, flow_id + generator_id * task->generator_count, packet_id_in_flow);
+ }
if (task->unique_id_pos) {
struct unique_id *unique_id = (struct unique_id *)(hdr + task->unique_id_pos);
unique_id_get(unique_id, &generator_id, &packet_id);
@@ -613,8 +686,18 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
// Skip unexpected packet
continue;
}
-
- lat_test_add_lost(task->lat_test, task_lat_early_loss_detect(task, packet_id, generator_id));
+ if (flow_id == -1) {
+ lat_test_check_ordering(task, task->lat_test, packet_id, generator_id);
+ }
+ lat_test_check_duplicate(task, task->lat_test, packet_id, generator_id);
+ uint32_t loss = task_lat_early_loss_detect(task, packet_id, generator_id);
+ if (loss) {
+ lat_test_add_lost(task->lat_test, loss);
+ if (task->loss_id < task->loss_buffer_size) {
+ task->loss_buffer[task->loss_id].packet_id = packet_id;
+ task->loss_buffer[task->loss_id++].n = loss;
+ }
+ }
} else {
generator_id = 0;
packet_id = task->rx_packet_index;
@@ -702,7 +785,7 @@ static void task_init_generator_count(struct task_lat *task)
plog_info("\tNo generators found, hard-coding to %u generators\n", task->generator_count);
} else
task->generator_count = *generator_count;
- plog_info("\tLatency using %u generators\n", task->generator_count);
+ plog_info("\t\tLatency using %u generators\n", task->generator_count);
}
static void task_lat_init_eld(struct task_lat *task, uint8_t socket_id)
@@ -734,6 +817,8 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
task->accur_pos = targ->accur_pos;
task->sig_pos = targ->sig_pos;
task->sig = targ->sig;
+ task->packet_id_in_flow_pos = targ->packet_id_in_flow_pos;
+ task->flow_id_pos = targ->flow_id_pos;
task->unique_id_pos = targ->packet_id_pos;
task->latency_buffer_size = targ->latency_buffer_size;
@@ -786,6 +871,8 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
if (task->unique_id_pos) {
task_lat_init_eld(task, socket_id);
task_lat_reset_eld(task);
+ task->previous_packet = prox_zmalloc(sizeof(task->previous_packet) * task->generator_count , socket_id);
+ PROX_PANIC(task->previous_packet == NULL, "Failed to allocate array for storing previous packet\n");
}
task->lat_test = &task->lt[task->using_lt];
@@ -803,13 +890,24 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
// It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
if (port->max_link_speed != UINT32_MAX) {
bytes_per_hz = port->max_link_speed * 125000L;
- plog_info("\tPort %u: max link speed is %ld Mbps\n",
+ plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
(uint8_t)(port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
}
}
+ task->loss_buffer_size = targ->loss_buffer_size;
+ if (task->loss_buffer_size) {
+ char name[256];
+ sprintf(name, "loss_%u.txt", targ->lconf->id);
+ task->fp_loss = fopen(name, "w+");
+ PROX_PANIC(task->fp_loss == NULL, "Failed to open %s\n", name);
+
+ task->loss_buffer = prox_zmalloc(task->loss_buffer_size * sizeof(struct loss_buffer), rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->loss_buffer == NULL,
+ "Failed to allocate %lu bytes (in huge pages) for loss_buffer\n", task->loss_buffer_size * sizeof(struct loss_buffer));
+ }
task->bytes_to_tsc = prox_zmalloc(max_frame_size * sizeof(task->bytes_to_tsc[0]) * MAX_PKT_BURST, rte_lcore_to_socket_id(targ->lconf->id));
PROX_PANIC(task->bytes_to_tsc == NULL,
- "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", max_frame_size);
+ "Failed to allocate %lu bytes (in huge pages) for bytes_to_tsc\n", max_frame_size * sizeof(task->bytes_to_tsc[0]) * MAX_PKT_BURST);
// There are cases where hz estimate might be slighly over-estimated
// This results in too much extrapolation
@@ -820,6 +918,13 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
else
task->bytes_to_tsc[i] = (rte_get_tsc_hz() * i * 0.99) / bytes_per_hz;
}
+ task->flow_count = targ->flow_count;
+ PROX_PANIC(task->flow_id_pos && (task->flow_count == 0), "flow_count must be configured when flow_id_pos is set\n");
+ if (task->flow_count) {
+ task->flows = prox_zmalloc(task->flow_count * sizeof(struct flows) * task->generator_count, rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->flows == NULL,
+ "Failed to allocate %lu bytes (in huge pages) for flows\n", task->flow_count * sizeof(struct flows) * task->generator_count);
+ }
}
static struct task_init task_init_lat = {
diff --git a/VNFs/DPPD-PROX/handle_lat.h b/VNFs/DPPD-PROX/handle_lat.h
index a80afc90..475682ce 100644
--- a/VNFs/DPPD-PROX/handle_lat.h
+++ b/VNFs/DPPD-PROX/handle_lat.h
@@ -52,6 +52,9 @@ struct lat_test {
uint64_t buckets[LAT_BUCKET_COUNT];
uint64_t bucket_size;
uint64_t lost_packets;
+ uint64_t mis_ordered;
+ uint64_t extent;
+ uint64_t duplicate;
};
static struct time_unit lat_test_get_accuracy_limit(struct lat_test *lat_test)
@@ -157,6 +160,9 @@ static void lat_test_combine(struct lat_test *dst, struct lat_test *src)
if (src->accuracy_limit_tsc > dst->accuracy_limit_tsc)
dst->accuracy_limit_tsc = src->accuracy_limit_tsc;
dst->lost_packets += src->lost_packets;
+ dst->mis_ordered += src->mis_ordered;
+ dst->extent += src->extent;
+ dst->duplicate += src->duplicate;
#ifdef LATENCY_HISTOGRAM
_lat_test_histogram_combine(dst, src);
@@ -178,6 +184,9 @@ static void lat_test_reset(struct lat_test *lat_test)
lat_test->accuracy_limit_tsc = 0;
lat_test->lost_packets = 0;
+ lat_test->mis_ordered = 0;
+ lat_test->extent = 0;
+ lat_test->duplicate = 0;
memset(lat_test->buckets, 0, sizeof(lat_test->buckets));
}
diff --git a/VNFs/DPPD-PROX/handle_lb_5tuple.c b/VNFs/DPPD-PROX/handle_lb_5tuple.c
index d320ca9d..ec229386 100644
--- a/VNFs/DPPD-PROX/handle_lb_5tuple.c
+++ b/VNFs/DPPD-PROX/handle_lb_5tuple.c
@@ -14,6 +14,11 @@
// limitations under the License.
*/
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <rte_hash.h>
#include <rte_ether.h>
#include <rte_memcpy.h>
diff --git a/VNFs/DPPD-PROX/handle_lb_qinq.c b/VNFs/DPPD-PROX/handle_lb_qinq.c
index 49ed1b79..9726edda 100644
--- a/VNFs/DPPD-PROX/handle_lb_qinq.c
+++ b/VNFs/DPPD-PROX/handle_lb_qinq.c
@@ -104,9 +104,9 @@ static void init_task_lb_qinq(struct task_base *tbase, struct task_args *targ)
plog_info("\t\ttask_lb_qinq protocols_mask = 0x%x\n", task->protocols_mask);
if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_RSS)
- tbase->flags |= BASE_FLAG_LUT_QINQ_RSS;
+ tbase->flags |= TBASE_FLAG_LUT_QINQ_RSS;
if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_HASH)
- tbase->flags |= BASE_FLAG_LUT_QINQ_HASH;
+ tbase->flags |= TBASE_FLAG_LUT_QINQ_HASH;
plog_info("\t\ttask_lb_qinq flags = 0x%x\n", tbase->flags);
}
@@ -248,13 +248,13 @@ struct qinq_packet {
prox_rte_ipv4_hdr ipv4_hdr;
prox_rte_ipv6_hdr ipv6_hdr;
};
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct qinq_packet_data {
prox_rte_ether_addr d_addr;
prox_rte_ether_addr s_addr;
uint64_t qinq;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct ether_packet {
prox_rte_ether_hdr ether_hdr;
@@ -262,7 +262,7 @@ struct ether_packet {
prox_rte_ipv4_hdr ipv4_hdr;
prox_rte_ipv6_hdr ipv6_hdr;
};
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct cpe_packet {
union {
@@ -275,7 +275,7 @@ struct cpe_packet {
static inline uint8_t get_worker(struct task_lb_qinq *task, struct cpe_packet *packet)
{
uint8_t worker = 0;
- if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_HASH) {
+ if (((struct task_base *)task)->flags & TBASE_FLAG_LUT_QINQ_HASH) {
// Load Balance on Hash of combination of cvlan and svlan
uint64_t qinq_net = packet->qd.qinq;
qinq_net = qinq_net & 0xFF0F0000FF0F0000; // Mask Proto and QoS bits
@@ -286,7 +286,7 @@ static inline uint8_t get_worker(struct task_lb_qinq *task, struct cpe_packet *p
worker = rte_hash_crc(&qinq_net,8,0) % task->nb_worker_threads;
}
plogx_dbg("Sending packet svlan=%x, cvlan=%x, pseudo_qinq=%lx to worker %d\n", rte_bswap16(0xFF0F & packet->qp.qinq_hdr.svlan.vlan_tci), rte_bswap16(0xFF0F & packet->qp.qinq_hdr.cvlan.vlan_tci), qinq_net, worker);
- } else if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_RSS){
+ } else if (((struct task_base *)task)->flags & TBASE_FLAG_LUT_QINQ_RSS){
// Load Balance on rss of combination of cvlan and svlan
uint32_t qinq = (packet->qp.qinq_hdr.cvlan.vlan_tci & 0xFF0F) << 16;
uint32_t rss = toeplitz_hash((uint8_t *)&qinq, 4);
diff --git a/VNFs/DPPD-PROX/handle_master.c b/VNFs/DPPD-PROX/handle_master.c
index 1026a179..58240ba0 100644
--- a/VNFs/DPPD-PROX/handle_master.c
+++ b/VNFs/DPPD-PROX/handle_master.c
@@ -73,6 +73,8 @@ const char *actions_string[] = {
};
+int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts) = NULL;
+
static struct my_arp_t arp_reply = {
.htype = 0x100,
.ptype = 8,
@@ -167,16 +169,17 @@ void master_init_vdev(struct task_base *tbase, uint8_t port_id, uint8_t core_id,
struct sockaddr_in dst, src;
src.sin_family = AF_INET;
- src.sin_addr.s_addr = prox_port_cfg[vdev_port].ip;
src.sin_port = rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT);
-
- int fd = socket(AF_INET, SOCK_DGRAM, 0);
- PROX_PANIC(fd < 0, "Failed to open socket(AF_INET, SOCK_DGRAM, 0)\n");
- prox_port_cfg[vdev_port].fd = fd;
- rc = bind(fd,(struct sockaddr *)&src, sizeof(struct sockaddr_in));
- PROX_PANIC(rc, "Failed to bind("IPv4_BYTES_FMT":%d): errno = %d (%s)\n", IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, errno, strerror(errno));
- plog_info("DPDK port %d bound("IPv4_BYTES_FMT":%d) to fd %d\n", port_id, IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, fd);
- fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[vdev_port].n_vlans; vlan_id++) {
+ src.sin_addr.s_addr = rte_be_to_cpu_32(prox_port_cfg[vdev_port].ip_addr[vlan_id].ip);
+ int fd = socket(AF_INET, SOCK_DGRAM, 0);
+ PROX_PANIC(fd < 0, "Failed to open socket(AF_INET, SOCK_DGRAM, 0)\n");
+ prox_port_cfg[vdev_port].fds[vlan_id] = fd;
+ rc = bind(fd,(struct sockaddr *)&src, sizeof(struct sockaddr_in));
+ PROX_PANIC(rc, "Failed to bind("IPv4_BYTES_FMT":%d): errno = %d (%s)\n", IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, errno, strerror(errno));
+ plog_info("DPDK port %d bound("IPv4_BYTES_FMT":%d) to fd %d\n", port_id, IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, fd);
+ fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
+ }
task->max_vdev_id++;
}
}
@@ -258,7 +261,7 @@ static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf *
plogx_dbg("\tMaster handling ARP request for ip "IPv4_BYTES_FMT" on port %d which supports random ip\n", IP4(key.ip), key.port);
struct rte_ring *ring = task->internal_port_table[port].ring;
create_mac(arp, &mac);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
build_arp_reply(ether_hdr, &mac, arp);
tx_ring(tbase, ring, SEND_ARP_REPLY_FROM_MASTER, mbuf);
return;
@@ -273,7 +276,7 @@ static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf *
tx_drop(mbuf);
} else {
struct rte_ring *ring = task->internal_ip_table[ret].ring;
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
build_arp_reply(ether_hdr, &task->internal_ip_table[ret].mac, arp);
tx_ring(tbase, ring, SEND_ARP_REPLY_FROM_MASTER, mbuf);
}
@@ -338,7 +341,7 @@ static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *m
return;
}
// We send an ARP request even if one was just sent (and not yet answered) by another task
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
build_arp_request(mbuf, &task->internal_port_table[port].mac, ip_dst, ip_src, vlan);
tx_ring(tbase, ring, SEND_ARP_REQUEST_FROM_MASTER, mbuf);
}
@@ -367,7 +370,7 @@ static inline void build_icmp_reply_message(struct task_base *tbase, struct rte_
tx_drop(mbuf);
} else {
struct rte_ring *ring = task->internal_ip_table[ret].ring;
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
tx_ring(tbase, ring, SEND_ICMP_FROM_MASTER, mbuf);
}
}
@@ -375,7 +378,7 @@ static inline void build_icmp_reply_message(struct task_base *tbase, struct rte_
static inline void handle_icmp(struct task_base *tbase, struct rte_mbuf *mbuf)
{
struct task_master *task = (struct task_master *)tbase;
- uint8_t port_id = mbuf->port;
+ uint8_t port_id = get_port(mbuf);
struct port_table *port = &task->internal_port_table[port_id];
prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
if (hdr->ether_type != ETYPE_IPv4) {
@@ -401,7 +404,7 @@ static inline void handle_icmp(struct task_base *tbase, struct rte_mbuf *mbuf)
port->n_echo_req = 0;
port->last_echo_req_rcvd_tsc = rte_rdtsc();
}
- build_icmp_reply_message(tbase, mbuf);
+ return build_icmp_reply_message(tbase, mbuf);
} else if (type == PROX_RTE_IP_ICMP_ECHO_REPLY) {
port->n_echo_rep++;
if (rte_rdtsc() - port->last_echo_rep_rcvd_tsc > rte_get_tsc_hz()) {
@@ -418,20 +421,21 @@ static inline void handle_unknown_ip6(struct task_base *tbase, struct rte_mbuf *
{
struct task_master *task = (struct task_master *)tbase;
struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
- uint8_t port = get_port(mbuf);
+ uint8_t port_id = get_port(mbuf);
struct ipv6_addr *ip_dst = ctrl_ring_get_ipv6_addr(mbuf);
+ uint16_t vlan = ctrl_ring_get_vlan(mbuf);
int ret1, ret2, i;
- plogx_dbg("\tMaster trying to find MAC of external IP "IPv6_BYTES_FMT" for port %d\n", IPv6_BYTES(ip_dst->bytes), port);
- if (unlikely(port >= PROX_MAX_PORTS)) {
- plogx_dbg("Port %d not found", port);
+ plogx_dbg("\tMaster trying to find MAC of external IP "IPv6_BYTES_FMT" for port %d\n", IPv6_BYTES(ip_dst->bytes), port_id);
+ if (unlikely(port_id >= PROX_MAX_PORTS)) {
+ plogx_dbg("Port %d not found", port_id);
tx_drop(mbuf);
return;
}
- struct ipv6_addr *local_ip_src = &task->internal_port_table[port].local_ipv6_addr;
- struct ipv6_addr *global_ip_src = &task->internal_port_table[port].global_ipv6_addr;
+ struct ipv6_addr *local_ip_src = &task->internal_port_table[port_id].local_ipv6_addr;
+ struct ipv6_addr *global_ip_src = &task->internal_port_table[port_id].global_ipv6_addr;
struct ipv6_addr *ip_src;
- if (memcmp(local_ip_src, ip_dst, 8) == 0)
+ if (memcmp(local_ip_src, ip_dst, prox_port_cfg[port_id].v6_mask_length) == 0)
ip_src = local_ip_src;
else if (memcmp(global_ip_src, &null_addr, 16))
ip_src = global_ip_src;
@@ -443,7 +447,7 @@ static inline void handle_unknown_ip6(struct task_base *tbase, struct rte_mbuf *
struct rte_ring *ring = task->ctrl_tx_rings[get_core(mbuf) * MAX_TASKS_PER_CORE + get_task(mbuf)];
if (ring == NULL) {
- plogx_dbg("Port %d not registered", port);
+ plogx_dbg("Port %d not registered", port_id);
tx_drop(mbuf);
return;
}
@@ -474,37 +478,33 @@ static inline void handle_unknown_ip6(struct task_base *tbase, struct rte_mbuf *
task->external_ip6_table[ret2].nb_requests++;
// Only needed for first request - but avoid test and copy the same 6 bytes
// In most cases we will only have one request per IP.
- //memcpy(&task->external_ip6_table[ret2].mac, &task->internal_port_table[port].mac, sizeof(prox_rte_ether_addr));
+ //memcpy(&task->external_ip6_table[ret2].mac, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));
}
// As timers are not handled by master, we might send an NS request even if one was just sent
// (and not yet answered) by another task
- build_neighbour_sollicitation(mbuf, &task->internal_port_table[port].mac, ip_dst, ip_src);
+ build_neighbour_sollicitation(mbuf, &task->internal_port_table[port_id].mac, ip_dst, ip_src, vlan);
tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
}
-static inline void handle_rs(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_rs(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
{
struct task_master *task = (struct task_master *)tbase;
- prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
int i, ret;
uint8_t port = get_port(mbuf);
if (task->internal_port_table[port].flags & IPV6_ROUTER) {
plogx_dbg("\tMaster handling Router Solicitation from ip "IPv6_BYTES_FMT" on port %d\n", IPv6_BYTES(ipv6_hdr->src_addr), port);
struct rte_ring *ring = task->internal_port_table[port].ring;
- build_router_advertisement(mbuf, &prox_port_cfg[port].eth_addr, &task->internal_port_table[port].local_ipv6_addr, &task->internal_port_table[port].router_prefix);
+ build_router_advertisement(mbuf, &prox_port_cfg[port].eth_addr, &task->internal_port_table[port].local_ipv6_addr, &task->internal_port_table[port].router_prefix, vlan);
tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
return;
}
}
-static inline void handle_ra(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_ra(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
{
struct task_master *task = (struct task_master *)tbase;
- prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
int i, ret, send = 0;
uint8_t port = get_port(mbuf);
struct rte_ring *ring = task->internal_port_table[port].ring;
@@ -559,11 +559,9 @@ static inline void handle_ra(struct task_base *tbase, struct rte_mbuf *mbuf)
tx_drop(mbuf);
}
-static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
{
struct task_master *task = (struct task_master *)tbase;
- prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
struct icmpv6_NS *neighbour_sollicitation = (struct icmpv6_NS *)(ipv6_hdr + 1);
int i, ret;
uint8_t port = get_port(mbuf);
@@ -606,7 +604,7 @@ static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf)
plogx_dbg("\tMaster handling NS request for ip "IPv6_BYTES_FMT" on port %d which supports random ip\n", IPv6_BYTES(key.ip6.bytes), key.port);
struct rte_ring *ring = task->internal_port_table[port].ring;
create_mac_from_EUI(&key.ip6, &mac);
- build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].local_ipv6_addr, PROX_SOLLICITED);
+ build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].local_ipv6_addr, PROX_SOLLICITED, vlan);
tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
return;
}
@@ -616,7 +614,7 @@ static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf)
plogx_dbg("\tMaster handling NS request for ip "IPv6_BYTES_FMT" on port %d which supports random ip\n", IPv6_BYTES(key.ip6.bytes), key.port);
struct rte_ring *ring = task->internal_port_table[port].ring;
create_mac_from_EUI(&key.ip6, &mac);
- build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].global_ipv6_addr, PROX_SOLLICITED);
+ build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].global_ipv6_addr, PROX_SOLLICITED, vlan);
tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
return;
}
@@ -629,16 +627,15 @@ static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf)
tx_drop(mbuf);
} else {
struct rte_ring *ring = task->internal_ip6_table[ret].ring;
- build_neighbour_advertisement(tbase, mbuf, &task->internal_ip6_table[ret].mac, &key.ip6, PROX_SOLLICITED);
+ if (ring == NULL) return;
+ build_neighbour_advertisement(tbase, mbuf, &task->internal_ip6_table[ret].mac, &key.ip6, PROX_SOLLICITED, vlan);
tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
}
}
-static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
{
struct task_master *task = (struct task_master *)tbase;
- prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
struct icmpv6_NA *neighbour_advertisement = (struct icmpv6_NA *)(ipv6_hdr + 1);
int i, ret;
uint8_t port = get_port(mbuf);
@@ -681,7 +678,7 @@ static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf)
}
if (target_address == NULL) {
- tx_drop(mbuf);
+ target_address = (uint8_t *)&neighbour_advertisement->destination_address;
}
struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
struct ipv6_addr *key = &neighbour_advertisement->destination_address;
@@ -689,6 +686,7 @@ static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf)
ret = rte_hash_lookup(task->external_ip6_hash, (const void *)key);
if (unlikely(ret < 0)) {
// entry not found for this IP: we did not ask a request, delete the reply
+ plog_err("Unkown IP "IPv6_BYTES_FMT"", IPv6_BYTES(neighbour_advertisement->destination_address.bytes));
tx_drop(mbuf);
} else {
// entry found for this IP
@@ -703,6 +701,7 @@ static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf)
}
task->external_ip6_table[ret].nb_requests = 0;
} else {
+ plog_err("UNEXPECTED nb_requests == 0");
tx_drop(mbuf);
}
}
@@ -716,7 +715,7 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
int command = get_command(mbuf);
uint8_t port = get_port(mbuf);
uint32_t ip;
- uint16_t vlan, ether_type;
+ uint16_t vlan = 0, ether_type;
uint8_t vdev_port = prox_port_cfg[port].dpdk_mapping;
plogx_dbg("\tMaster received %s (%x) from mbuf %p\n", actions_string[command], command, mbuf);
struct my_arp_t *arp;
@@ -790,6 +789,7 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
uint32_t ip = get_ip(mbuf);
+ vlan = ctrl_ring_get_vlan(mbuf);
struct rte_ring *ring = task->ctrl_tx_rings[get_core(mbuf) * MAX_TASKS_PER_CORE + get_task(mbuf)];
// First check whether MAC address is not already in kernel MAC table.
@@ -810,12 +810,23 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
dst.sin_family = AF_INET;
dst.sin_addr.s_addr = ip;
dst.sin_port = rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT);
- // TODO VLAN: find the right fd based on the VLAN
- int n = sendto(prox_port_cfg[vdev_port].fd, (char*)(&ip), 0, MSG_DONTROUTE, (struct sockaddr *)&dst, sizeof(struct sockaddr_in));
+
+ int vlan_id;
+ for (vlan_id = 0; vlan_id < prox_port_cfg[vdev_port].n_vlans; vlan_id++) {
+ if (prox_port_cfg[vdev_port].vlan_tags[vlan_id] == vlan)
+ break;
+ }
+ if (vlan_id >= prox_port_cfg[vdev_port].n_vlans) {
+ // Tag not found
+ plogx_info("\tDid not send to TAP IP "IPv4_BYTES_FMT" as wrong VLAN %d\n", IPv4_BYTES(((uint8_t*)&ip)), vlan);
+ tx_drop(mbuf);
+ break;
+ }
+ int n = sendto(prox_port_cfg[vdev_port].fds[vlan_id], (char*)(&ip), 0, MSG_DONTROUTE, (struct sockaddr *)&dst, sizeof(struct sockaddr_in));
if (n < 0) {
- plogx_info("\tFailed to send to TAP IP "IPv4_BYTES_FMT" using fd %d, error = %d (%s)\n", IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fd, errno, strerror(errno));
+ plogx_info("\tFailed to send to TAP IP "IPv4_BYTES_FMT" using fd %d, error = %d (%s)\n", IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fds[vlan_id], errno, strerror(errno));
} else
- plogx_dbg("\tSent %d bytes to TAP IP "IPv4_BYTES_FMT" using fd %d\n", n, IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fd);
+ plogx_dbg("\tSent %d bytes to TAP IP "IPv4_BYTES_FMT" using fd %d\n", n, IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fds[vlan_id]);
record_request(tbase, ip, port, ring);
tx_drop(mbuf);
@@ -828,10 +839,10 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
break;
case NDP_PKT_FROM_NET_TO_MASTER:
ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(ether_hdr + 1);
- if (unlikely((ether_hdr->ether_type != ETYPE_IPv6) || (ipv6_hdr->proto != ICMPv6))) {
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_get_ipv6_hdr(ether_hdr, rte_pktmbuf_pkt_len(mbuf), &vlan);
+ if (unlikely((!ipv6_hdr) || (ipv6_hdr->proto != ICMPv6))) {
// Should not happen
- if (ether_hdr->ether_type != ETYPE_IPv6)
+ if (!ipv6_hdr)
plog_err("\tUnexpected message received: NDP_PKT_FROM_NET_TO_MASTER with ether_type %x\n", ether_hdr->ether_type);
else
plog_err("\tUnexpected message received: NDP_PKT_FROM_NET_TO_MASTER with ether_type %x and proto %x\n", ether_hdr->ether_type, ipv6_hdr->proto);
@@ -857,16 +868,16 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
tx_drop(mbuf);
break;
case ICMPv6_RS:
- handle_rs(tbase, mbuf);
+ handle_rs(tbase, mbuf, ipv6_hdr, vlan);
break;
case ICMPv6_RA:
- handle_ra(tbase, mbuf);
+ handle_ra(tbase, mbuf, ipv6_hdr, vlan);
break;
case ICMPv6_NS:
- handle_ns(tbase, mbuf);
+ handle_ns(tbase, mbuf, ipv6_hdr, vlan);
break;
case ICMPv6_NA:
- handle_na(tbase, mbuf);
+ handle_na(tbase, mbuf, ipv6_hdr, vlan);
break;
case ICMPv6_RE:
plog_err("IPV6 ICMPV6 Redirect not handled\n");
@@ -898,6 +909,7 @@ void init_ctrl_plane(struct task_base *tbase)
.entries = n_entries,
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket_id,
};
if (prox_cfg.flags & DSF_L3_ENABLED) {
hash_params.key_len = sizeof(uint32_t);
@@ -963,7 +975,7 @@ void init_ctrl_plane(struct task_base *tbase)
struct sockaddr_nl sockaddr2;
memset(&sockaddr2, 0, sizeof(struct sockaddr_nl));
sockaddr2.nl_family = AF_NETLINK;
- sockaddr2.nl_groups = RTMGRP_IPV6_ROUTE | RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY;
+ sockaddr2.nl_groups = RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY;
rc = bind(fd, (struct sockaddr *)&sockaddr2, sizeof(struct sockaddr_nl));
PROX_PANIC(rc < 0, "Failed to bind to RTMGRP_NEIGH netlink group\n");
task->route_fds.fd = fd;
@@ -979,7 +991,7 @@ void init_ctrl_plane(struct task_base *tbase)
rte_socket_id(), 0);
PROX_PANIC(ret == NULL, "Failed to allocate ARP memory pool on socket %u with %u elements\n",
rte_socket_id(), NB_ARP_MBUF);
- plog_info("\t\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
+ plog_info("\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, rte_socket_id());
tbase->l3.arp_nd_pool = ret;
}
@@ -1006,7 +1018,11 @@ static void handle_route_event(struct task_base *tbase)
struct rtmsg *rtmsg = (struct rtmsg *)NLMSG_DATA(nl_hdr);
int rtm_family = rtmsg->rtm_family;
- if ((rtm_family == AF_INET) && (rtmsg->rtm_table != RT_TABLE_MAIN) &&(rtmsg->rtm_table != RT_TABLE_LOCAL))
+ if (rtm_family != AF_INET) {
+ plog_warn("Unhandled non IPV4 routing message\n");
+ return;
+ }
+ if ((rtmsg->rtm_table != RT_TABLE_MAIN) && (rtmsg->rtm_table != RT_TABLE_LOCAL))
return;
int dst_len = rtmsg->rtm_dst_len;
@@ -1035,8 +1051,14 @@ static void handle_route_event(struct task_base *tbase)
}
int dpdk_vdev_port = -1;
for (int i = 0; i< prox_rte_eth_dev_count_avail(); i++) {
- if (strcmp(prox_port_cfg[i].name, interface_name) == 0)
- dpdk_vdev_port = i;
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[i].n_vlans; vlan_id++) {
+ if (strcmp(prox_port_cfg[i].names[vlan_id], interface_name) == 0) {
+ dpdk_vdev_port = i;
+ break;
+ }
+ }
+ if (dpdk_vdev_port != -1)
+ break;
}
if (dpdk_vdev_port != -1) {
plogx_info("Received netlink message on tap interface %s for IP "IPv4_BYTES_FMT"/%d, Gateway "IPv4_BYTES_FMT"\n", interface_name, IP4(ip), dst_len, IP4(gw_ip));
diff --git a/VNFs/DPPD-PROX/handle_master.h b/VNFs/DPPD-PROX/handle_master.h
index 518906ed..dcd0a5f2 100644
--- a/VNFs/DPPD-PROX/handle_master.h
+++ b/VNFs/DPPD-PROX/handle_master.h
@@ -99,11 +99,11 @@ struct task_master {
struct pollfd route_fds;
};
-const char *actions_string[MAX_ACTIONS];
+extern const char *actions_string[MAX_ACTIONS];
void init_ctrl_plane(struct task_base *tbase);
-int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts);
+extern int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts);
static inline void tx_drop(struct rte_mbuf *mbuf)
{
diff --git a/VNFs/DPPD-PROX/handle_mirror.c b/VNFs/DPPD-PROX/handle_mirror.c
index 894ea799..73a5242c 100644
--- a/VNFs/DPPD-PROX/handle_mirror.c
+++ b/VNFs/DPPD-PROX/handle_mirror.c
@@ -24,6 +24,8 @@
#include "log.h"
#include "prox_port_cfg.h"
#include "quit.h"
+#include "prox_cksum.h"
+#include "prefetch.h"
/* Task that sends packets to multiple outputs. Note that in case of n
outputs, the output packet rate is n times the input packet
@@ -34,7 +36,9 @@
way to resolve this is to create deep copies of the packet. */
struct task_mirror {
struct task_base base;
- uint32_t n_dests;
+ uint32_t n_dests;
+ uint32_t multiplier;
+ uint32_t mirror_size;
};
struct task_mirror_copy {
@@ -55,14 +59,40 @@ static int handle_mirror_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
multiple times, the pointers are copied first. This copy is
used in each call to tx_pkt below. */
rte_memcpy(mbufs2, mbufs, sizeof(mbufs[0]) * n_pkts);
-
+ /* prefetch for optimization */
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
+ for (uint16_t j = 0; j < n_pkts; ++j) {
+ PREFETCH0(mbufs2[j]);
+ }
for (uint16_t j = 0; j < n_pkts; ++j) {
- rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests - 1);
+ hdr[j] = rte_pktmbuf_mtod(mbufs2[j], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[j]);
+ }
+ for (uint16_t j = 0; j < n_pkts; ++j) {
+ rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests * task->multiplier - 1);
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *) (hdr[j] + 1);
+ if ((task->mirror_size != 0) && (hdr[j]->ether_type == ETYPE_IPv4) && ((pip->next_proto_id == IPPROTO_UDP) || (pip->next_proto_id == IPPROTO_TCP))) {
+ rte_pktmbuf_pkt_len(mbufs2[j]) = task->mirror_size;
+ rte_pktmbuf_data_len(mbufs2[j]) = task->mirror_size;
+ pip->total_length = rte_bswap16(task->mirror_size-sizeof(prox_rte_ether_hdr));
+ pip->hdr_checksum = 0;
+ prox_ip_cksum_sw(pip);
+ int l4_len = task->mirror_size - sizeof(prox_rte_ether_hdr) - sizeof(prox_rte_ipv4_hdr);
+ if (pip->next_proto_id == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
+ udp->dgram_len = rte_bswap16(l4_len);
+ prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr);
+ } else if (pip->next_proto_id == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
+ prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr);
+ }
+ }
}
for (uint16_t j = 0; j < task->n_dests; ++j) {
memset(out, j, n_pkts);
-
- ret+= task->base.tx_pkt(&task->base, mbufs2, n_pkts, out);
+ for (uint16_t i = 0; i < task->multiplier; ++i) {
+ ret += task->base.tx_pkt(&task->base, mbufs2, n_pkts, out);
+ }
}
return ret;
}
@@ -110,8 +140,9 @@ static int handle_mirror_bulk_copy(struct task_base *tbase, struct rte_mbuf **mb
static void init_task_mirror(struct task_base *tbase, struct task_args *targ)
{
struct task_mirror *task = (struct task_mirror *)tbase;
-
task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings;
+ task->multiplier = targ->multiplier? targ->multiplier : 1;
+ task->mirror_size = targ->mirror_size > 63? targ->mirror_size: 0;
}
static void init_task_mirror_copy(struct task_base *tbase, struct task_args *targ)
diff --git a/VNFs/DPPD-PROX/handle_nat.c b/VNFs/DPPD-PROX/handle_nat.c
index 4cd2de22..ad0fcf45 100644
--- a/VNFs/DPPD-PROX/handle_nat.c
+++ b/VNFs/DPPD-PROX/handle_nat.c
@@ -47,7 +47,7 @@ struct task_nat {
struct pkt_eth_ipv4 {
prox_rte_ether_hdr ether_hdr;
prox_rte_ipv4_hdr ipv4_hdr;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf)
{
@@ -123,6 +123,7 @@ static int lua_to_hash_nat(struct lua_State *L, enum lua_place from, const char
.key_len = sizeof(ip_from),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket,
};
ret_hash = rte_hash_create(&hash_params);
@@ -171,7 +172,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(ret != 0, "Failed to load NAT table from lua:\n%s\n", get_lua_to_errors());
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
diff --git a/VNFs/DPPD-PROX/handle_nsh.c b/VNFs/DPPD-PROX/handle_nsh.c
index 6d67f99d..a1df22fc 100644
--- a/VNFs/DPPD-PROX/handle_nsh.c
+++ b/VNFs/DPPD-PROX/handle_nsh.c
@@ -19,7 +19,7 @@
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_version.h>
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
#include <rte_vxlan.h>
#endif
@@ -68,26 +68,43 @@ static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap
mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
mbuf->data_off += hdr_len;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* save length of header in the dynfield1 of rte_mbuf */
+ mbuf->dynfield1[0] = hdr_len;
+#else
/* save length of header in reserved 16bits of rte_mbuf */
mbuf->udata64 = hdr_len;
+#endif
}
else {
if (mbuf->data_len < VXLAN_GPE_HDR_SZ) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
/* check the UDP destination port */
udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
/* check the Next Protocol field in VxLAN-GPE header */
vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
@@ -97,8 +114,13 @@ static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap
mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
mbuf->data_off += hdr_len;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* save length of header in the dynfield1 of rte_mbuf */
+ mbuf->dynfield1[0] = hdr_len;
+#else
/* save length of header in reserved 16bits of rte_mbuf */
mbuf->udata64 = hdr_len;
+#endif
}
return 0;
@@ -143,14 +165,26 @@ static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap
if (mbuf == NULL)
return 0;
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ if (mbuf->dynfield1[0] == 0)
+#else
if (mbuf->udata64 == 0)
+#endif
return 0;
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* use header length saved in dynfields1 of rte_mbuf to
+ "encapsulate" transport + NSH header by moving packet pointer */
+ mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->dynfield1[0]);
+ mbuf->data_off -= mbuf->dynfield1[0];
+ mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->dynfield1[0]);
+#else
/* use header length saved in reserved 16bits of rte_mbuf to
"encapsulate" transport + NSH header by moving packet pointer */
mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->udata64);
mbuf->data_off -= mbuf->udata64;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64);
+#endif
eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
diff --git a/VNFs/DPPD-PROX/handle_qinq_decap4.c b/VNFs/DPPD-PROX/handle_qinq_decap4.c
index 94efbb1f..2a5bfc7f 100644
--- a/VNFs/DPPD-PROX/handle_qinq_decap4.c
+++ b/VNFs/DPPD-PROX/handle_qinq_decap4.c
@@ -148,7 +148,7 @@ static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *tar
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
// By default, calling this function 1K times per second => 64K ARP per second max
@@ -183,6 +183,10 @@ __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
svlan = rte_be_to_cpu_16(svlan & 0xFF0F);
cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%x, L2Tag=%d type=%d\n",
+ key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->dynfield1[0], mbuf->vlan_tci_outer, mbuf->packet_type);
+#else
#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type);
@@ -195,6 +199,7 @@ __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved);
#endif
#endif
+#endif
#else
plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr));
#endif
diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.c b/VNFs/DPPD-PROX/handle_qinq_encap4.c
index ffd9356a..0b707b7a 100644
--- a/VNFs/DPPD-PROX/handle_qinq_encap4.c
+++ b/VNFs/DPPD-PROX/handle_qinq_encap4.c
@@ -152,7 +152,7 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
/* TODO: check if it is not necessary to limit reverse mapping
diff --git a/VNFs/DPPD-PROX/handle_qos.c b/VNFs/DPPD-PROX/handle_qos.c
index 5af7a310..de9548f6 100644
--- a/VNFs/DPPD-PROX/handle_qos.c
+++ b/VNFs/DPPD-PROX/handle_qos.c
@@ -135,7 +135,11 @@ static void init_task_qos(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(task->sched_port == NULL, "failed to create sched_port");
plog_info("number of pipes: %d\n\n", targ->qos_conf.port_params.n_pipes_per_subport);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ int err = rte_sched_subport_config(task->sched_port, 0, targ->qos_conf.subport_params, 0);
+#else
int err = rte_sched_subport_config(task->sched_port, 0, targ->qos_conf.subport_params);
+#endif
PROX_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err);
/* only single subport and single pipe profile is supported */
diff --git a/VNFs/DPPD-PROX/handle_swap.c b/VNFs/DPPD-PROX/handle_swap.c
index e097bc05..503af598 100644
--- a/VNFs/DPPD-PROX/handle_swap.c
+++ b/VNFs/DPPD-PROX/handle_swap.c
@@ -32,9 +32,17 @@
#include "prox_cksum.h"
#include "prox_compat.h"
+#define MAX_STORE_PKT_SIZE 2048
+
+struct packet {
+ unsigned int len;
+ unsigned char buf[MAX_STORE_PKT_SIZE];
+};
+
struct task_swap {
struct task_base base;
struct rte_mempool *igmp_pool;
+ uint32_t flags;
uint32_t runtime_flags;
uint32_t igmp_address;
uint8_t src_dst_mac[12];
@@ -44,34 +52,40 @@ struct task_swap {
uint64_t last_echo_rep_rcvd_tsc;
uint32_t n_echo_req;
uint32_t n_echo_rep;
+ uint32_t store_pkt_id;
+ uint32_t store_msk;
+ struct packet *store_buf;
+ FILE *fp;
};
#define NB_IGMP_MBUF 1024
#define IGMP_MBUF_SIZE 2048
#define NB_CACHE_IGMP_MBUF 256
+#define GENEVE_PORT 0xc117 // in be
+
static void write_src_and_dst_mac(struct task_swap *task, struct rte_mbuf *mbuf)
{
prox_rte_ether_hdr *hdr;
prox_rte_ether_addr mac;
- if (unlikely((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) {
+ if (unlikely((task->flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) {
/* Source and Destination mac hardcoded */
hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
} else {
hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- if (likely((task->runtime_flags & TASK_ARG_SRC_MAC_SET) == 0)) {
+ if (unlikely((task->flags & TASK_ARG_SRC_MAC_SET) == 0)) {
/* dst mac will be used as src mac */
prox_rte_ether_addr_copy(&hdr->d_addr, &mac);
}
- if (unlikely(task->runtime_flags & TASK_ARG_DST_MAC_SET))
+ if (unlikely(task->flags & TASK_ARG_DST_MAC_SET))
prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[0], &hdr->d_addr);
else
prox_rte_ether_addr_copy(&hdr->s_addr, &hdr->d_addr);
- if (unlikely(task->runtime_flags & TASK_ARG_SRC_MAC_SET)) {
+ if (likely(task->flags & TASK_ARG_SRC_MAC_SET)) {
prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
} else {
prox_rte_ether_addr_copy(&mac, &hdr->s_addr);
@@ -136,11 +150,34 @@ static inline void build_igmp_message(struct task_base *tbase, struct rte_mbuf *
static void stop_swap(struct task_base *tbase)
{
+ uint32_t i, j;
struct task_swap *task = (struct task_swap *)tbase;
+
if (task->igmp_pool) {
rte_mempool_free(task->igmp_pool);
task->igmp_pool = NULL;
}
+
+ if (task->store_msk) {
+ for (i = task->store_pkt_id & task->store_msk; i < task->store_msk + 1; i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ for (i = 0; i < (task->store_pkt_id & task->store_msk); i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ }
}
static void handle_ipv6(struct task_swap *task, struct rte_mbuf *mbufs, prox_rte_ipv6_hdr *ipv6_hdr, uint8_t *out)
@@ -196,6 +233,9 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
struct igmpv2_hdr *pigmp;
prox_rte_icmp_hdr *picmp;
uint8_t type;
+ static int llc_printed = 0;
+ static int lldp_printed = 0;
+ static int geneve_printed = 0;
for (j = 0; j < n_pkts; ++j) {
PREFETCH0(mbufs[j]);
@@ -279,9 +319,31 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
continue;
case ETYPE_LLDP:
+ if (!lldp_printed) {
+ plog_info("Discarding LLDP packets (only printed once)\n");
+ lldp_printed = 1;
+ }
out[j] = OUT_DISCARD;
continue;
default:
+ if ((rte_bswap16(hdr->ether_type) < 0x600) && (rte_bswap16(hdr->ether_type) >= 16)) {
+ // 802.3
+ struct prox_llc {
+ uint8_t dsap;
+ uint8_t lsap;
+ uint8_t control;
+ };
+ struct prox_llc *llc = (struct prox_llc *)(hdr + 1);
+ if ((llc->dsap == 0x42) && (llc->lsap == 0x42)) {
+ // STP Protocol
+ out[j] = OUT_DISCARD;
+ if (!llc_printed) {
+ plog_info("Discarding STP packets (only printed once)\n");
+ llc_printed = 1;
+ }
+ continue;
+ }
+ }
plog_warn("Unsupported ether_type 0x%x\n", hdr->ether_type);
out[j] = OUT_DISCARD;
continue;
@@ -320,10 +382,19 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
continue;
}
udp_hdr = (prox_rte_udp_hdr *)(ip_hdr + 1);
+ port = udp_hdr->dst_port;
ip_hdr->dst_addr = ip_hdr->src_addr;
ip_hdr->src_addr = ip;
- port = udp_hdr->dst_port;
+ if ((port == GENEVE_PORT) && (task->runtime_flags & TASK_DO_NOT_FWD_GENEVE)) {
+ if (!geneve_printed) {
+ plog_info("Discarding geneve (only printed once)\n");
+ geneve_printed = 1;
+ }
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+
udp_hdr->dst_port = udp_hdr->src_port;
udp_hdr->src_port = port;
write_src_and_dst_mac(task, mbufs[j]);
@@ -393,6 +464,16 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
continue;
}
}
+ if (task->store_msk) {
+ for (int i = 0; i < n_pkts; i++) {
+ if (out[i] != OUT_DISCARD) {
+ hdr = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ memcpy(&task->store_buf[task->store_pkt_id & task->store_msk].buf, hdr, rte_pktmbuf_pkt_len(mbufs[i]));
+ task->store_buf[task->store_pkt_id & task->store_msk].len = rte_pktmbuf_pkt_len(mbufs[i]);
+ task->store_pkt_id++;
+ }
+ }
+ }
return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
}
@@ -472,7 +553,8 @@ static void init_task_swap(struct task_base *tbase, struct task_args *targ)
plog_info("\t\tCore %d: src mac set from port\n", targ->lconf->id);
}
}
- task->runtime_flags = targ->flags;
+ task->flags = targ->flags;
+ task->runtime_flags = targ->runtime_flags;
task->igmp_address = rte_cpu_to_be_32(targ->igmp_address);
if (task->igmp_pool == NULL) {
static char name[] = "igmp0_pool";
@@ -490,7 +572,19 @@ static void init_task_swap(struct task_base *tbase, struct task_args *targ)
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
+ }
+ task->store_pkt_id = 0;
+ if (targ->store_max) {
+ char filename[256];
+ sprintf(filename, "swap_buf_%02d_%02d", targ->lconf->id, targ->task);
+
+ task->store_msk = targ->store_max - 1;
+ task->store_buf = (struct packet *)malloc(sizeof(struct packet) * targ->store_max);
+ task->fp = fopen(filename, "w+");
+ PROX_PANIC(task->fp == NULL, "Unable to open %s\n", filename);
+ } else {
+ task->store_msk = 0;
}
}
diff --git a/VNFs/DPPD-PROX/handle_tsc.c b/VNFs/DPPD-PROX/handle_tsc.c
index 245fe7a2..da0afea7 100644
--- a/VNFs/DPPD-PROX/handle_tsc.c
+++ b/VNFs/DPPD-PROX/handle_tsc.c
@@ -31,7 +31,11 @@ static int handle_bulk_tsc(struct task_base *tbase, struct rte_mbuf **mbufs, uin
const uint64_t rx_tsc = rte_rdtsc();
for (uint16_t j = 0; j < n_pkts; ++j)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ memcpy(&mbufs[j]->dynfield1[0], &rx_tsc, sizeof(rx_tsc));
+#else
mbufs[j]->udata64 = rx_tsc;
+#endif
return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
}
diff --git a/VNFs/DPPD-PROX/hash_utils.c b/VNFs/DPPD-PROX/hash_utils.c
index ad746d5c..3922ef0f 100644
--- a/VNFs/DPPD-PROX/hash_utils.c
+++ b/VNFs/DPPD-PROX/hash_utils.c
@@ -14,6 +14,11 @@
// limitations under the License.
*/
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <string.h>
#include <rte_hash_crc.h>
#include <rte_table_hash.h>
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
index c4a4b063..fef0fcaf 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
@@ -17,36 +17,101 @@
##################################################
# Build all components in separate builder image #
##################################################
-FROM centos:7 as builder
-ARG BUILD_DIR=/opt/rapid
+FROM ubuntu:20.04 as builder
-COPY ./port_info ${BUILD_DIR}/port_info
+ARG DPDK_VERSION=22.07
+ENV DPDK_VERSION=${DPDK_VERSION}
-COPY ./deploycentostools.sh ${BUILD_DIR}/
-RUN chmod +x ${BUILD_DIR}/deploycentostools.sh \
- && ${BUILD_DIR}/deploycentostools.sh -k deploy
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install Dependencies
+RUN apt update && apt -y install git wget gcc unzip libpcap-dev libncurses5-dev \
+ libedit-dev liblua5.3-dev linux-headers-generic iperf3 pciutils \
+ libnuma-dev vim tuna wireshark make driverctl openssh-server sudo \
+ meson python3-pyelftools pkg-config
+
+WORKDIR ${BUILD_DIR}
+
+# Install DPDK
+RUN wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz \
+ && tar -xf ./dpdk-${DPDK_VERSION}.tar.xz \
+ && cd dpdk-${DPDK_VERSION} \
+ && meson build -Dlibdir=lib/x86_64-linux-gnu -Denable_driver_sdk=true \
+ && ninja -C build install
+
+WORKDIR ${BUILD_DIR}
+
+# Install Prox
+RUN git clone https://gerrit.opnfv.org/gerrit/samplevnf \
+ && cd samplevnf/VNFs/DPPD-PROX \
+ && COMMIT_ID=$(git rev-parse HEAD) \
+ && echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id \
+ && meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/prox ${BUILD_DIR}/prox
+
+# Build and copy port info app
+WORKDIR ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info
+RUN meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/build/port_info_app ${BUILD_DIR}/port_info_app
+
+RUN ldconfig && pkg-config --modversion libdpdk > ${BUILD_DIR}/dpdk_version
+# Create Minimal Install
+RUN ldd ${BUILD_DIR}/prox | awk '$2 ~ /=>/ {print $3}' >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/dpdk_version" >> ${BUILD_DIR}/list_of_install_components \
+ && find /usr/local/lib/x86_64-linux-gnu -not -path '*/\.*' >> ${BUILD_DIR}/list_of_install_components \
+ && tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
#############################
# Create slim runtime image #
#############################
-FROM centos:7
+FROM ubuntu:20.04
+
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
-ARG BUILD_DIR=/opt/rapid
+# Install Runtime Dependencies
+RUN apt update -y
+# Install required dynamically linked libraries + required packages
+RUN apt -y install sudo openssh-server libatomic1
-COPY ./deploycentostools.sh ${BUILD_DIR}/
COPY --from=builder ${BUILD_DIR}/install_components.tgz ${BUILD_DIR}/install_components.tgz
-RUN chmod a+rwx ${BUILD_DIR} && chmod +x ${BUILD_DIR}/deploycentostools.sh \
- && ${BUILD_DIR}/deploycentostools.sh -k runtime_image
+WORKDIR /
+RUN tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+RUN ldconfig
+RUN rm ${BUILD_DIR}/install_components.tgz
# Expose SSH and PROX ports
EXPOSE 22 8474
+RUN useradd -rm -d /home/rapid -s /bin/bash -g root -G sudo -u 1000 rapid \
+ && chmod 777 ${BUILD_DIR} \
+ && echo 'rapid:rapid' | chpasswd \
+ && mkdir /home/rapid/.ssh
+
# Copy SSH keys
-COPY ./rapid_rsa_key.pub /home/centos/.ssh/authorized_keys
+COPY ./rapid_rsa_key.pub /home/rapid/.ssh/authorized_keys
COPY ./rapid_rsa_key.pub /root/.ssh/authorized_keys
+RUN chown rapid:root /home/rapid/.ssh/authorized_keys \
+ && chmod 600 /home/rapid/.ssh/authorized_keys \
+ && chown root:root /root/.ssh/authorized_keys \
+ && chmod 600 /root/.ssh/authorized_keys
+
+#RUN apt-get clean && apt autoremove --purge
+RUN apt-get autoremove -y && apt-get clean all && rm -rf /var/cache/apt
+
# Copy startup script
COPY ./start.sh /start.sh
RUN chmod +x /start.sh
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
index 84e2f70f..3cf1113d 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -14,43 +14,65 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+## This script should run after booting: see check-prox-system-setup.service
+
NCPUS="$(lscpu | egrep '^CPU\(s\):' | awk '{ print $2 }')"
MAXCOREID="$((NCPUS-1))"
-filename="/etc/tuned/realtime-virtual-guest-variables.conf"
-logfile="/opt/rapid/prox_system_setup.log"
-if [ -f "$filename" ]
+tuned_config="/etc/tuned/realtime-virtual-guest-variables.conf"
+log_file="/opt/rapid/prox_system_setup.log"
+system_ready="/opt/rapid/system_ready_for_rapid"
+tuned_done="/opt/rapid/tuned_done"
+after_boot_file="/opt/rapid/after_boot.sh"
+
+tuned_and_reboot () {
+ echo "Applying tuned profile">>$log_file
+ tuned-adm profile realtime-virtual-guest
+ touch "$tuned_done"
+ echo "Rebooting...">>$log_file
+ reboot
+ exit 0
+}
+
+if [ -f "$tuned_config" ]
then
while read -r line
do
case $line in
isolated_cores=1-$MAXCOREID*)
- echo "Isolated CPU(s) OK, no reboot: $line">>$logfile
- FILE=/opt/rapid/after_boot.sh
- if test -f "$FILE"; then
- ("$FILE")
- echo "Executing: $FILE">>$logfile
+ if test ! -f "$tuned_done"; then
+ tuned_and_reboot
fi
- touch /opt/rapid/system_ready_for_rapid
+ if test -f "$after_boot_file"; then
+ echo "Executing: $after_boot_file">>$log_file
+ ("$after_boot_file")
+ fi
+ echo "Isolated CPU(s) OK, no reboot: $line">>$log_file
+ ## rapid scripts will wait for the system_ready file to exist
+ ## Only then, they will be able to connect to the PROX instance
+ ## and start the testing
+ touch "$system_ready"
+ ## On some systems, we still need to use the igb_uio driver.
+ ## Example: good performance on AWS with the ENA interface.
+ ## Make sure that you change devbind.sh to use the preferred
+ ## driver. vfio is the default.
+ modprobe uio
+ insmod /opt/rapid/dpdk/build/kmod/igb_uio.ko wc_activate=1
exit 0
;;
isolated_cores=*)
- echo "Isolated CPU(s) NOK, change the config and reboot: $line">>$logfile
- sed -i "/^isolated_cores=.*/c\isolated_cores=1-$MAXCOREID" $filename
- tuned-adm profile realtime-virtual-guest
- reboot
- exit 0
+ echo "Isolated CPU(s) NOK: $line">>$log_file
+ sed -i "/^isolated_cores=.*/c\isolated_cores=1-$MAXCOREID" $tuned_config
+ tuned_and_reboot
;;
*)
echo "$line"
;;
esac
- done < "$filename"
- echo "isolated_cores=1-$MAXCOREID" >> $filename
- echo "No Isolated CPU(s) defined in config, line added: $line">>$logfile
- tuned-adm profile realtime-virtual-guest
- reboot
+ done < "$tuned_config"
+ echo "isolated_cores=1-$MAXCOREID" >> $tuned_config
+ echo "No Isolated CPU(s) defined in config, line added: isolated_cores=1-$MAXCOREID">>$log_file
+ tuned_and_reboot
else
- echo "$filename not found.">>$logfile
+ echo "$tuned_config not found.">>$log_file
fi
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/config_file b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
index f31ed25e..b5aeb3a9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
@@ -3,6 +3,6 @@ cloud_name = openstackL6
stack_name = rapid
heat_template= openstack-rapid.yaml
heat_param = params_rapid.yaml
-keypair_name = rapid_key
user = centos
+dataplane_subnet_mask = 24
;push_gateway = http://192.168.36.61:9091/metrics/job/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
new file mode 100644
index 00000000..75267f35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
@@ -0,0 +1,81 @@
+;;
+;; Copyright (c) 2021 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+public_start_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..2
+public_stop_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..20
+cgnat_table = {}
+cgnat_table.dynamic = {
+ {public_ip_range_start = ip(public_start_ip),public_ip_range_stop = ip(public_stop_ip), public_port = val_range(10,20000)},
+}
+lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x212},
+}
+lpm4.routes = {};
+lpm4.routes[1] = {
+ cidr = {ip = ip(0), depth = 1},
+ next_hop_id = 0,
+}
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=internal_tap
+local ipv4=${local_ip1}
+
+[port 1]
+name=if1
+mac=hardware
+vlan=yes
+vdev=external_tap
+local ipv4=${local_ip2}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=nat
+task=0
+mode=cgnat
+sub mode=l3
+private=yes
+nat table=cgnat_table
+route table=lpm4
+rx port=if0
+tx ports from routing table=if1
+
+task=1
+mode=cgnat
+sub mode=l3
+private=no
+nat table=cgnat_table
+route table=lpm4
+rx port=if1
+tx ports from routing table=if0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
new file mode 100644
index 00000000..31728daf
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
@@ -0,0 +1,47 @@
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=esp_tap
+local ipv4=$local_ip1
+
+[defaults]
+mempool size=64K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=enc
+task=0
+mode=esp_enc
+sub mode=l3
+remote ipv4=$dest_ip1
+rx port=if0
+tx cores=$altcores task=0
+drop=yes
+
+
+[core $altcores]
+name=dec
+task=0
+mode=esp_dec
+sub mode=l3
+remote ipv4=$dest_ip1
+rx ring=yes
+tx port=if0
+drop=yes
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
index 1827395f..8d3f8581 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
@@ -41,7 +41,7 @@ mempool size=8K
name=${name}
heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
[core $gencores]
@@ -50,7 +50,7 @@ task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
+bps=1250000
pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
min bulk size=$mbs
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/gen_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
index fc3b6a68..8a477e5f 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/gen_gw.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
@@ -40,7 +40,7 @@ mempool size=8K
name=${name}
heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
[core $gencores]
@@ -49,7 +49,7 @@ task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
+bps=1250000
pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
gateway ipv4=${gw_ip1}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/genv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
index 650e284c..32fadbc7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/genv6.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
@@ -38,7 +38,7 @@ mempool size=8K
name=${name}
heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
[core $gencores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
index d9d86281..3eaf80e7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
@@ -31,14 +31,13 @@ vlan=yes
vdev=impair_tap
local ipv4=${local_ip1}
-
[defaults]
mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
@@ -49,4 +48,5 @@ sub mode=l3
rx port=if0
tx port=if0
delay us=1000
-probability=100
+proba delay=50
+proba no drop=100
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
index 4e9af96b..0f26e6eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
@@ -34,7 +34,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
index e4c1c37e..3af0ac99 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
@@ -37,7 +37,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $gencores]
@@ -48,8 +48,6 @@ tx port=p0
bps=1250000000
pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-;gateway ipv4=${gw_ip1}
-local ipv4=${local_ip1}
min bulk size=$mbs
max bulk size=16
drop=yes
@@ -69,3 +67,4 @@ accuracy pos=46
packet id pos=50
signature=0x98765432
signature pos=56
+latency bucket size=${bucket_size_exp}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2gen_bare.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
index ff9ef8b8..dc988969 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2gen_bare.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
@@ -37,7 +37,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $gencores]
@@ -48,7 +48,6 @@ tx port=p0
bps=1250000000
pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-local ipv4=${local_ip1}
min bulk size=$mbs
max bulk size=64
drop=yes
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
index 17396381..0ce3a1a3 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
@@ -35,7 +35,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
new file mode 100644
index 00000000..9ffd6e8f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
@@ -0,0 +1,57 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=public_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=PublicServer
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx cores=${self}t1
+drop=no
+
+task=1
+mode=mirror
+sub mode=l3
+multiplier=2
+mirror size=300
+rx ring=yes
+tx port=if0
+drop=no
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
index 1897bbdc..d941e5eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw1.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
@@ -41,7 +41,7 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
index 2fe3291a..9aedc85d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw2.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
@@ -41,7 +41,7 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
new file mode 100644
index 00000000..f5ff5447
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
index 0cca80c6..f66322a9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
@@ -35,7 +35,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
new file mode 100644
index 00000000..abadfa64
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
@@ -0,0 +1,50 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=swap_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=swap
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx port=if0
+gateway ipv4=${gw_ip1}
+drop=no
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/swapv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
index e073f8f1..61c8a594 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/swapv6.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
@@ -33,7 +33,7 @@ mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
[core $cores]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
index 4644a028..af1da307 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
@@ -32,8 +32,8 @@ class RapidStackManager(object):
options = config.options(section)
for option in options:
rapid_stack_params[option] = config.get(section, option)
- if 'push_gateway' not in rapid_stack_params.keys():
- rapid_stack_params['push_gateway'] = None
+ if 'dataplane_subnet_mask' not in rapid_stack_params.keys():
+ rapid_stack_params['dataplane_subnet_mask'] = 24
return (rapid_stack_params)
@staticmethod
@@ -42,25 +42,22 @@ class RapidStackManager(object):
stack_name = rapid_stack_params['stack_name']
heat_template = rapid_stack_params['heat_template']
heat_param = rapid_stack_params['heat_param']
- keypair_name = rapid_stack_params['keypair_name']
user = rapid_stack_params['user']
- push_gateway = rapid_stack_params['push_gateway']
+ dataplane_subnet_mask = rapid_stack_params['dataplane_subnet_mask']
deployment = StackDeployment(cloud_name)
- deployment.deploy(stack_name, keypair_name, heat_template, heat_param)
- deployment.generate_env_file(user, push_gateway)
+ deployment.deploy(stack_name, heat_template, heat_param)
+ deployment.generate_env_file(user, dataplane_subnet_mask)
def main():
rapid_stack_params = {}
RapidStackManager.parse_config(rapid_stack_params)
log_file = 'CREATE{}.log'.format(rapid_stack_params['stack_name'])
- RapidLog.log_init(log_file, 'DEBUG', 'INFO', '2020.05.05')
+ RapidLog.log_init(log_file, 'DEBUG', 'INFO', '2021.03.15')
#cloud_name = 'openstackL6'
#stack_name = 'rapid'
#heat_template = 'openstack-rapid.yaml'
#heat_param = 'params_rapid.yaml'
- #keypair_name = 'prox_key'
#user = 'centos'
- #push_gateway = None
RapidStackManager.deploy_stack(rapid_stack_params)
if __name__ == "__main__":
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
index 4285584d..c4667f1f 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python3
##
## Copyright (c) 2019 Intel Corporation
@@ -17,7 +17,7 @@
##
import argparse
-from k8sdeployment import K8sDeployment
+from rapid_k8s_deployment import K8sDeployment
# Config file name for deployment creation
CREATE_CONFIG_FILE_NAME = "rapid.pods"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
index 3837012e..a0fe7cb2 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
@@ -18,12 +18,6 @@
# Directory for package build
BUILD_DIR="/opt/rapid"
DPDK_VERSION="20.05"
-PROX_COMMIT="7c3217fc16"
-PROX_CHECKOUT="git checkout ${PROX_COMMIT}"
-## Next line is overruling the PROX_COMMIT and will replace the version with a very specific patch. Should be commented out
-## if you want to use a committed version of PROX with the COMMIT ID specified above
-##Following line has the commit for testing IMIX, IPV6, ... It is the merge of all PROX commits on May 27th 2020
-#PROX_CHECKOUT="git fetch \"https://gerrit.opnfv.org/gerrit/samplevnf\" refs/changes/23/70223/1 && git checkout FETCH_HEAD"
MULTI_BUFFER_LIB_VER="0.52"
export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
export RTE_TARGET="x86_64-native-linuxapp-gcc"
@@ -50,7 +44,8 @@ function os_pkgs_install()
numactl-devel vim tuna openssl-devel wireshark \
make driverctl
- ${SUDO} wget https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+ ${SUDO} wget --no-check-certificate \
+ https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
${SUDO} rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
}
@@ -61,6 +56,12 @@ function k8s_os_pkgs_runtime_install()
# Install required dynamically linked libraries + required packages
${SUDO} yum install -y numactl-libs libpcap openssh openssh-server \
openssh-clients sudo
+
+ # Install additional packets for universal image
+ ${SUDO} yum install -y epel-release python3 kubernetes-client
+ ${SUDO} yum install -y python3-paramiko python3-future
+ ${SUDO} python3 -m pip install --upgrade pip
+ ${SUDO} pip3 install scp kubernetes
}
function os_cfg()
@@ -146,15 +147,15 @@ function dpdk_install()
pushd ${RTE_SDK} > /dev/null 2>&1
make config T=${RTE_TARGET}
# Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
- # Uncomment the sed command to enable the driver compilation
- #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
+ # Uncomment the sed command to enable the driver compilation
+ #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
# For Kubernetes environment we use host vfio module
if [ "${K8S_ENV}" == "y" ]; then
sed -i 's/CONFIG_RTE_EAL_IGB_UIO=y/CONFIG_RTE_EAL_IGB_UIO=n/g' ${RTE_SDK}/build/.config
sed -i 's/CONFIG_RTE_LIBRTE_KNI=y/CONFIG_RTE_LIBRTE_KNI=n/g' ${RTE_SDK}/build/.config
sed -i 's/CONFIG_RTE_KNI_KMOD=y/CONFIG_RTE_KNI_KMOD=n/g' ${RTE_SDK}/build/.config
- fi
+ fi
# Compile with MB library
sed -i '/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/c\CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y' ${RTE_SDK}/build/.config
@@ -165,23 +166,26 @@ function dpdk_install()
function prox_compile()
{
- # Compile PROX
- pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
- make -j`getconf _NPROCESSORS_ONLN`
- ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
- popd > /dev/null 2>&1
+ # Compile PROX
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
+ COMMIT_ID=$(git rev-parse HEAD)
+ echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+ popd > /dev/null 2>&1
}
function prox_install()
{
- # Clone and compile PROX
- pushd ${BUILD_DIR} > /dev/null 2>&1
- git clone https://git.opnfv.org/samplevnf
- pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX > /dev/null 2>&1
- bash -c "${PROX_CHECKOUT}"
- popd > /dev/null 2>&1
- prox_compile
- popd > /dev/null 2>&1
+ # Clone PROX
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ git clone https://git.opnfv.org/samplevnf
+ cp -R ./samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid ./src
+ popd > /dev/null 2>&1
+ prox_compile
+
+ # Clean build folder
+ rm -rf ${BUILD_DIR}/samplevnf
}
function port_info_build()
@@ -200,6 +204,7 @@ function create_minimal_install()
echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components
echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components
tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
}
@@ -223,7 +228,7 @@ function k8s_runtime_image()
ldconfig
- #rm -rf ${BUILD_DIR}/install_components.tgz
+ rm -rf ${BUILD_DIR}/install_components.tgz
}
function print_usage()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
index fe7a5d4f..0bde3cc2 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
@@ -2,9 +2,11 @@ link="$(sudo ip -o link | grep MACADDRESS |cut -d":" -f 2)"
if [ -n "$link" ];
then
echo Need to bind
+ # Uncomment one of the following lines, depending on which driver
+ # you want to use: vfio-pci or igb_uio
#sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
sudo driverctl set-override $(sudo ethtool -i $link |grep bus-info | cut -d" " -f 2) vfio-pci
else
- echo Assuming port is already bound to vfio-pci
+ echo Assuming port is already bound to DPDK poll mode driver
fi
exit 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
index e0f38ade..e2266e58 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
@@ -16,7 +16,7 @@
##
PROX_DEPLOY_DIR="."
-PROX_IMAGE_NAME="prox_slim"
+PROX_IMAGE_NAME="rapid"
RSA_KEY_FILE_NAME="rapid_rsa_key"
DOCKERFILE="Dockerfile"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
new file mode 100644
index 00000000..8dcb09ba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
@@ -0,0 +1,105 @@
+;Format: PushGateway
+;Format: Xtesting
+;URL:
+ part1: http://testresults.opnfv.org/test/api/v1/results
+;URL:
+ part1: http://192.168.36.61:9091/metrics/job/
+ part2: test
+ part3: /instance/
+ part4: environment_file
+;FlowsizeTest:
+ Flows: Flows
+ Size: Size
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+FlowSizeTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+IrqTest:
+ Environment: environment_file
+ Test: test
+ Buckets: buckets
+ Machine_data: machine_data
+ImpairTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+CoreStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NonDPReceived: NonDPReceived
+ NonDPSent: NonDPSent
+ Dropped: Dropped
+PortStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NoMbufs: NoMbufs
+ iErrMiss: iErrMiss
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
index 237c385b..a5633409 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
@@ -21,6 +21,22 @@ function convertIPToHex(ip)
return "IP ADDRESS ERROR"
end
+ local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)(/%d+)$")}
+ if #chunks == 5 then
+ for i,v in ipairs(chunks) do
+ if i < 5 then
+ if tonumber(v) > 255 then
+ print ("IPV4 ADDRESS ERROR: ", ip)
+ return "IPV4 ADDRESS ERROR"
+ end
+ address_chunks[#address_chunks + 1] = string.format ("%02x", v)
+ end
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV4: ", result)
+ return result
+ end
+
local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)$")}
if #chunks == 4 then
for i,v in ipairs(chunks) do
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
index 1f7ce99d..38bc5a7e 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
@@ -28,3 +28,6 @@ machine_index=2
[TestM3]
machine_index=3
+
+[TestM4]
+machine_index=4
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
index 16df0874..1cc11e04 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
@@ -10,11 +10,12 @@ parameters:
public_net_name: {description: Public network to allocate (floating) IPs to VMs', type: string, default: admin_floating_net}
mgmt_net_name: {description: Name of PROX mgmt network to be created, type: string, default: admin_internal_net}
PROX_image: {description: Image name to use for PROX, type: string, default: rapidVM}
- PROX_key: {description: DO NOT CHANGE THIS DEFAULT KEY NAME, type: string, default: rapid_key}
+ PROX_key: {description: DO NOT CHANGE THIS DEFAULT KEY NAME, type: string, default: rapid_rsa_key}
my_availability_zone: {description: availability_zone for Hosting VMs, type: string, default: nova}
security_group: {description: Security Group to use, type: string, default: prox_security_group}
- PROXVM_count: {description: Total number of testVMs to create, type: number, default: 2}
- PROX2VM_count: {description: Total number of testVMs to create, type: number, default: 1}
+ PROXType1VM_count: {description: Total number of testVMs to create, type: number, default: 2}
+ PROXType2VM_count: {description: Total number of testVMs type 2 to create, type: number, default: 1}
+ PROXType3VM_count: {description: Total number of testVMs type 3 to create, type: number, default: 1}
# The following paramters are not used, but are here in case you want to also
# create the management and dataplane networks in this template
@@ -26,6 +27,7 @@ parameters:
data_net_cidr: {description: PROX private network CIDR,type: string, default: 30.30.1.0/24}
data_net_pool_start: {description: Start of private network IP address allocation pool, type: string, default: 30.30.1.100}
data_net_pool_end: {description: End of private network IP address allocation pool, type: string, default: 30.30.1.200}
+ data2_net_name: {description: Name of PROX private network 2 to be created, type: string, default: data2}
dns:
type: comma_delimited_list
label: DNS nameservers
@@ -33,11 +35,11 @@ parameters:
default: '8.8.8.8'
resources:
- PROXVMs:
+ PROXType1VMs:
type: OS::Heat::ResourceGroup
description: Group of PROX VMs according to specs described in this section
properties:
- count: { get_param: PROXVM_count }
+ count: { get_param: PROXType1VM_count }
resource_def:
type: rapid-openstack-server.yaml
properties:
@@ -52,14 +54,14 @@ resources:
PROX_config: {get_resource: MyConfig}
depends_on:
- MyConfig
-
- PROX2VMs:
+
+ PROXType2VMs:
type: OS::Heat::ResourceGroup
description: Group of PROX VMs according to specs described in this section
properties:
- count: { get_param: PROX2VM_count }
+ count: { get_param: PROXType2VM_count }
resource_def:
- type: rapid-openstack-server.yaml
+ type: rapid-openstack-server-2ports.yaml
properties:
PROX_availability_zone : {get_param: my_availability_zone}
PROX_security_group : {get_param: security_group}
@@ -69,10 +71,31 @@ resources:
PROX_public_net: {get_param: public_net_name}
PROX_mgmt_net_id: {get_param: mgmt_net_name}
PROX_data_net_id: {get_param: data_net_name}
+ PROX_data2_net_id: {get_param: data2_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ PROXType3VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType3VM_count }
+ resource_def:
+ type: rapid-openstack-server.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidType3VM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data2_net_name}
PROX_config: {get_resource: MyConfig}
depends_on:
- MyConfig
-
+
MyConfig:
type: OS::Heat::CloudConfig
properties:
@@ -92,10 +115,12 @@ resources:
expire: False
write_files:
- path: /opt/rapid/after_boot_do_not_run.sh
+ # - path: /opt/rapid/after_boot.sh
# after_boot.sh is ran by check_prox_system_setup.sh, if it exists
# This can be used to fix some issues, like in the example below
# Remove this section or rename the file, if you do not want to run
# this after booting
+ # The code below is just an example of what could be ran after boot
content: |
OLDIFS="${IFS}"
IFS=$'\n'
@@ -104,6 +129,7 @@ resources:
for item in ${list}
do /bin/bash -c "sudo ip route del ${item}"
done
+ # Make sure to replace the IP address with your gateway
/bin/bash -c "sudo ip route add default via 10.6.6.1 dev eth0"
/bin/bash -c "echo nameserver 8.8.8.8 > /etc/resolv.conf"
IFS="${OLDIFS}"
@@ -112,26 +138,31 @@ resources:
outputs:
number_of_servers:
description: List of number or PROX instance
- value:
- - {get_param: PROXVM_count}
- - {get_param: PROX2VM_count}
+ value:
+ - {get_param: PROXType1VM_count}
+ - {get_param: PROXType2VM_count}
+ - {get_param: PROXType3VM_count}
server_name:
description: List of list of names of the PROX instances
- value:
- - {get_attr: [PROXVMs, name]}
- - {get_attr: [PROX2VMs, name]}
+ value:
+ - {get_attr: [PROXType1VMs, name]}
+ - {get_attr: [PROXType2VMs, name]}
+ - {get_attr: [PROXType3VMs, name]}
mngmt_ips:
description: List of list of Management IPs of the VMs
- value:
- - {get_attr: [PROXVMs, mngmt_ip]}
- - {get_attr: [PROX2VMs, mngmt_ip]}
+ value:
+ - {get_attr: [PROXType1VMs, mngmt_ip]}
+ - {get_attr: [PROXType2VMs, mngmt_ip]}
+ - {get_attr: [PROXType3VMs, mngmt_ip]}
data_plane_ips:
description: List of list of list of DataPlane IPs of the VMs
- value:
- - {get_attr: [PROXVMs, data_plane_ips]}
- - {get_attr: [PROX2VMs, data_plane_ips]}
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_ips]}
+ - {get_attr: [PROXType2VMs, data_plane_ips]}
+ - {get_attr: [PROXType3VMs, data_plane_ips]}
data_plane_macs:
description: List of list of list of DataPlane MACs of the VMs
- value:
- - {get_attr: [PROXVMs, data_plane_mac]}
- - {get_attr: [PROX2VMs, data_plane_mac]}
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_mac]}
+ - {get_attr: [PROXType2VMs, data_plane_mac]}
+ - {get_attr: [PROXType3VMs, data_plane_mac]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
index 6d48d19e..fbef2f54 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
@@ -1,6 +1,10 @@
parameters:
public_net_name: admin_floating_net
data_net_name: dataplane-network
- PROX_image: testrapidVM
+ PROX_image: rapidVM
+ PROX_key: rapid_rsa_key
my_availability_zone: nova
security_group: prox_security_group
+ PROXType1VM_count: 3
+ PROXType2VM_count: 0
+ PROXType3VM_count: 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
index 5ce09071..9e269f60 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
@@ -7,21 +7,24 @@ metadata:
spec:
containers:
- name: pod-rapid
- image: localhost:5000/prox_slim:latest
+ image: opnfv/rapid:latest
imagePullPolicy: Always
securityContext:
capabilities:
- add: ["IPC_LOCK"]
+ add: ["IPC_LOCK", "NET_ADMIN"]
volumeMounts:
- mountPath: /dev/hugepages
name: hugepages
resources:
requests:
- hugepages-2Mi: 512Mi
+ hugepages-2Mi: 1Gi
memory: 1Gi
+ cpu: 8
intel.com/intel_sriov_vfio: '1'
limits:
- hugepages-2Mi: 512Mi
+ hugepages-2Mi: 1Gi
+ memory: 1Gi
+ cpu: 8
intel.com/intel_sriov_vfio: '1'
volumes:
- name: hugepages
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
new file mode 100644
index 00000000..f2efd667
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
@@ -0,0 +1,101 @@
+##
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+project('port-info', 'C',
+ version:
+ run_command(['git', 'describe',
+ '--abbrev=8', '--dirty', '--always']).stdout().strip(),
+ license: 'Apache',
+ default_options: ['buildtype=release', 'c_std=gnu99'],
+ meson_version: '>= 0.47'
+)
+
+cc = meson.get_compiler('c')
+
+# Configure options for prox
+# Grab the DPDK version here "manually" as it is not available in the dpdk_dep
+# object
+dpdk_version = run_command('pkg-config', '--modversion', 'libdpdk').stdout()
+
+
+cflags = [
+ '-DPROGRAM_NAME="port_info_app"',
+ '-fno-stack-protector',
+ '-DGRE_TP',
+ '-D_GNU_SOURCE'] # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+# Add configured cflags to arguments
+foreach arg: cflags
+ add_project_arguments(arg, language: 'c')
+endforeach
+
+# enable warning flags if they are supported by the compiler
+warning_flags = [
+ '-Wno-unused',
+ '-Wno-unused-parameter',
+ '-Wno-unused-result',
+ '-Wno-deprecated-declarations']
+
+foreach arg: warning_flags
+ if cc.has_argument(arg)
+ add_project_arguments(arg, language: 'c')
+ endif
+endforeach
+
+has_sym_args = [
+ [ 'HAVE_LIBEDIT_EL_RFUNC_T', 'histedit.h',
+ 'el_rfunc_t' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'libedit_autoconf.h', configuration : config)
+
+# All other dependencies
+dpdk_dep = dependency('libdpdk', required: true)
+tinfo_dep = dependency('tinfo', required: false)
+threads_dep = dependency('threads', required: true)
+pcap_dep = dependency('pcap', required: true)
+libedit_dep = dependency('libedit', required: true)
+math_dep = cc.find_library('m', required : false)
+dl_dep = cc.find_library('dl', required : true)
+
+deps = [dpdk_dep,
+ tinfo_dep,
+ threads_dep,
+ pcap_dep,
+ libedit_dep,
+ math_dep,
+ dl_dep]
+
+# Explicitly add these to the dependency list
+deps += [cc.find_library('rte_bus_pci', required: true)]
+deps += [cc.find_library('rte_bus_vdev', required: true)]
+
+if dpdk_version.version_compare('<20.11.0')
+deps += [cc.find_library('rte_pmd_ring', required: true)]
+else
+deps += [cc.find_library('rte_net_ring', required: true)]
+endif
+
+sources = files(
+ 'port_info.c')
+
+executable('port_info_app',
+ sources,
+ c_args: cflags,
+ dependencies: deps,
+ install: true)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
index 79bd0c0b..917c0636 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
@@ -21,7 +21,11 @@
#include <rte_version.h>
static const uint16_t rx_rings = 1, tx_rings = 1;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
static const struct rte_eth_conf port_conf = { .link_speeds = ETH_LINK_SPEED_AUTONEG };
+#else
+static const struct rte_eth_conf port_conf = { .link_speeds = RTE_ETH_LINK_SPEED_AUTONEG };
+#endif
static inline int
port_info(void)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
index 3ee4e831..8754ebc4 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
@@ -25,48 +25,43 @@ import os
import time
import subprocess
import socket
-from rapid_log import RapidLog
+from rapid_log import RapidLog
+from rapid_sshclient import SSHClient
class prox_ctrl(object):
- def __init__(self, ip, key=None, user=None):
+ def __init__(self, ip, key=None, user=None, password = None):
self._ip = ip
self._key = key
self._user = user
- self._children = []
+ self._password = password
self._proxsock = []
-
- def __del__(self):
- self.close()
+ self._sshclient = SSHClient(ip = ip, user = user, password = password,
+ rsa_private_key = key, timeout = None)
def ip(self):
return self._ip
- def test_connect(self):
- """Simply try to run 'true' over ssh on remote system.
- On failure, raise RuntimeWarning exception when possibly worth
- retrying, and raise RuntimeError exception otherwise.
- """
- return self.run_cmd('test -e /opt/rapid/system_ready_for_rapid', True)
-
- def connect(self):
+ def test_connection(self):
attempts = 1
- RapidLog.debug("Trying to connect to instance which was just launched \
+ RapidLog.debug("Trying to connect to machine \
on %s, attempt: %d" % (self._ip, attempts))
while True:
try:
- self.test_connect()
- break
+ if (self.run_cmd('test -e /opt/rapid/system_ready_for_rapid \
+ && echo exists')):
+ break
+ time.sleep(2)
except RuntimeWarning as ex:
+ RapidLog.debug("RuntimeWarning %d:\n%s"
+ % (ex.returncode, ex.output.strip()))
attempts += 1
if attempts > 20:
RapidLog.exception("Failed to connect to instance after %d\
attempts:\n%s" % (attempts, ex))
- raise Exception("Failed to connect to instance after %d \
- attempts:\n%s" % (attempts, ex))
time.sleep(2)
- RapidLog.debug("Trying to connect to instance which was just \
- launched on %s, attempt: %d" % (self._ip, attempts))
- RapidLog.debug("Connected to instance on %s" % self._ip)
+ RapidLog.debug("Trying to connect to machine \
+ on %s, attempt: %d" % (self._ip, attempts))
+ RapidLog.debug("Connected to machine on %s" % self._ip)
def connect_socket(self):
attempts = 1
@@ -81,8 +76,6 @@ class prox_ctrl(object):
if attempts > 20:
RapidLog.exception("Failed to connect to PROX on %s after %d \
attempts" % (self._ip, attempts))
- raise Exception("Failed to connect to PROX on %s after %d \
- attempts" % (self._ip, attempts))
time.sleep(2)
RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
attempt: %d" % (self._ip, attempts))
@@ -90,73 +83,12 @@ class prox_ctrl(object):
return sock
def close(self):
- """Must be called before program termination."""
for sock in self._proxsock:
sock.quit()
- children = len(self._children)
- if children == 0:
- return
- if children > 1:
- print('Waiting for %d child processes to complete ...' % children)
- for child in self._children:
- ret = os.waitpid(child[0], os.WNOHANG)
- if ret[0] == 0:
- print("Waiting for child process '%s' to complete ..."
- % child[1])
- ret = os.waitpid(child[0], 0)
- rc = ret[1]
- if os.WIFEXITED(rc):
- if os.WEXITSTATUS(rc) == 0:
- print("Child process '%s' completed successfully"
- % child[1])
- else:
- print("Child process '%s' returned exit status %d" % (
- child[1], os.WEXITSTATUS(rc)))
- elif os.WIFSIGNALED(rc):
- print("Child process '%s' exited on signal %d" % (
- child[1], os.WTERMSIG(rc)))
- else:
- print("Wait status for child process '%s' is 0x%04x" % (
- child[1], rc))
- def run_cmd(self, command, _connect=False):
- """Execute command over ssh on remote system.
- Wait for remote command completion.
- Return command output (combined stdout and stderr).
- _connect argument is reserved for connect() method.
- """
- cmd = self._build_ssh(command)
- try:
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- #if _connect and ex.returncode == 255:
- if _connect:
- raise RuntimeWarning(ex.output.strip())
- raise RuntimeError('ssh returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
-
- def fork_cmd(self, command, name=None):
- """Execute command over ssh on remote system, in a child process.
- Do not wait for remote command completion.
- Return child process id.
- """
- if name is None:
- name = command
- cmd = self._build_ssh(command)
- pid = os.fork()
- if (pid != 0):
- # In the parent process
- self._children.append((pid, name))
- return pid
- # In the child process: use os._exit to terminate
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError("Child process '%s' failed:\n"
- 'ssh returned exit status %d:\n%s'
- % (name, ex.returncode, ex.output.strip()))
- os._exit(0)
+ def run_cmd(self, command):
+ self._sshclient.run_cmd(command)
+ return self._sshclient.get_output()
def prox_sock(self, port=8474):
"""Connect to the PROX instance on remote system.
@@ -172,42 +104,13 @@ class prox_ctrl(object):
return None
def scp_put(self, src, dst):
- """Copy src file from local system to dst on remote system."""
- cmd = [ 'scp',
- '-B',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- cmd.append(src)
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip + ':' + dst
- cmd.append(remote)
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError('scp returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
+ self._sshclient.scp_put(src, dst)
+ RapidLog.info("Copying from {} to {}:{}".format(src, self._ip, dst))
- def _build_ssh(self, command):
- cmd = [ 'ssh',
- '-oBatchMode=yes',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip
- cmd.append(remote)
- cmd.append(command)
- return cmd
+ def scp_get(self, src, dst):
+ self._sshclient.scp_get('/home/' + self._user + src, dst)
+ RapidLog.info("Copying from {}:/home/{}{} to {}".format(self._ip,
+ self._user, src, dst))
class prox_sock(object):
def __init__(self, sock):
@@ -215,11 +118,7 @@ class prox_sock(object):
self._rcvd = b''
def __del__(self):
- self.quit()
-
- def quit(self):
if self._sock is not None:
- self._send('quit')
self._sock.close()
self._sock = None
@@ -238,10 +137,14 @@ class prox_sock(object):
self._send('reset stats')
def lat_stats(self, cores, tasks=[0]):
- min_lat = 999999999
- max_lat = avg_lat = 0
+ result = {}
+ result['lat_min'] = 999999999
+ result['lat_max'] = result['lat_avg'] = 0
+ result['buckets'] = [0] * 128
+ result['mis_ordered'] = 0
+ result['extent'] = 0
+ result['duplicate'] = 0
number_tasks_returning_stats = 0
- buckets = [0] * 128
self._send('lat all stats %s %s' % (','.join(map(str, cores)),
','.join(map(str, tasks))))
for core in cores:
@@ -254,37 +157,42 @@ class prox_sock(object):
(potential incompatibility between scripts and PROX)")
raise Exception("lat stats error")
number_tasks_returning_stats += 1
- min_lat = min(int(stats[0]),min_lat)
- max_lat = max(int(stats[1]),max_lat)
- avg_lat += int(stats[2])
+ result['lat_min'] = min(int(stats[0]),result['lat_min'])
+ result['lat_max'] = max(int(stats[1]),result['lat_max'])
+ result['lat_avg'] += int(stats[2])
#min_since begin = int(stats[3])
#max_since_begin = int(stats[4])
- tsc = int(stats[5]) # Taking the last tsc as the timestamp since
- # PROX will return the same tsc for each
- # core/task combination
- hz = int(stats[6])
+ result['lat_tsc'] = int(stats[5])
+ # Taking the last tsc as the timestamp since
+ # PROX will return the same tsc for each
+ # core/task combination
+ result['lat_hz'] = int(stats[6])
#coreid = int(stats[7])
#taskid = int(stats[8])
+ result['mis_ordered'] += int(stats[9])
+ result['extent'] += int(stats[10])
+ result['duplicate'] += int(stats[11])
stats = self._recv().split(':')
if stats[0].startswith('error'):
RapidLog.critical("lat stats error: unexpected lat bucket \
reply (potential incompatibility between scripts \
and PROX)")
raise Exception("lat bucket reply error")
- buckets[0] = int(stats[1])
+ result['buckets'][0] = int(stats[1])
for i in range(1, 128):
stats = self._recv().split(':')
- buckets[i] = int(stats[1])
- avg_lat = old_div(avg_lat,number_tasks_returning_stats)
+ result['buckets'][i] += int(stats[1])
+ result['lat_avg'] = old_div(result['lat_avg'],
+ number_tasks_returning_stats)
self._send('stats latency(0).used')
used = float(self._recv())
self._send('stats latency(0).total')
total = float(self._recv())
- return (min_lat, max_lat, avg_lat, (old_div(used,total)), tsc, hz,
- buckets)
+ result['lat_used'] = old_div(used,total)
+ return (result)
def irq_stats(self, core, bucket, task=0):
- self._send('stats task.core(%s).task(%s).irq(%s)' %
+ self._send('stats task.core(%s).task(%s).irq(%s)' %
(core, task, bucket))
stats = self._recv().split(',')
return int(stats[0])
@@ -298,12 +206,12 @@ class prox_sock(object):
def core_stats(self, cores, tasks=[0]):
rx = tx = drop = tsc = hz = rx_non_dp = tx_non_dp = tx_fail = 0
- self._send('dp core stats %s %s' % (','.join(map(str, cores)),
+ self._send('dp core stats %s %s' % (','.join(map(str, cores)),
','.join(map(str, tasks))))
for core in cores:
for task in tasks:
stats = self._recv().split(',')
- if stats[0].startswith('error'):
+ if stats[0].startswith('error'):
if stats[0].startswith('error: invalid syntax'):
RapidLog.critical("dp core stats error: unexpected \
invalid syntax (potential incompatibility \
@@ -324,7 +232,7 @@ class prox_sock(object):
rx = tx = port_id = tsc = no_mbufs = errors = 0
self._send('multi port stats %s' % (','.join(map(str, ports))))
result = self._recv().split(';')
- if result[0].startswith('error'):
+ if result[0].startswith('error'):
RapidLog.critical("multi port stats error: unexpected invalid \
syntax (potential incompatibility between scripts and \
PROX)")
@@ -340,35 +248,46 @@ class prox_sock(object):
return rx, tx, no_mbufs, errors, tsc
def set_random(self, cores, task, offset, mask, length):
- self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)),
+ self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)),
task, offset, mask, length))
def set_size(self, cores, task, pkt_size):
- self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task,
+ self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task,
pkt_size))
def set_imix(self, cores, task, imix):
- self._send('imix %s %s %s' % (','.join(map(str, cores)), task,
+ self._send('imix %s %s %s' % (','.join(map(str, cores)), task,
','.join(map(str,imix))))
def set_value(self, cores, task, offset, value, length):
- self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)),
+ self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)),
task, offset, value, length))
+ def quit_prox(self):
+ self._send('quit')
+
def _send(self, cmd):
"""Append LF and send command to the PROX instance."""
if self._sock is None:
raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
- self._sock.sendall(cmd.encode() + b'\n')
+ try:
+ self._sock.sendall(cmd.encode() + b'\n')
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
def _recv(self):
"""Receive response from PROX instance, return it with LF removed."""
if self._sock is None:
raise RuntimeError("PROX socket closed, cannot receive anymore")
- pos = self._rcvd.find(b'\n')
- while pos == -1:
- self._rcvd += self._sock.recv(256)
+ try:
pos = self._rcvd.find(b'\n')
- rsp = self._rcvd[:pos]
- self._rcvd = self._rcvd[pos+1:]
+ while pos == -1:
+ self._rcvd += self._sock.recv(256)
+ pos = self._rcvd.find(b'\n')
+ rsp = self._rcvd[:pos]
+ self._rcvd = self._rcvd[pos+1:]
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
return rsp.decode()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
new file mode 100644
index 00000000..374b58cb
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools>=42",
+ "wheel"
+]
+build-backend = "setuptools.build_meta"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
new file mode 100644
index 00000000..e1095fbd
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
@@ -0,0 +1,94 @@
+heat_template_version: 2014-10-16
+
+description: single server resource with 2 dataplane ports used by resource groups.
+
+parameters:
+ PROX_public_net:
+ type: string
+ PROX_mgmt_net_id:
+ type: string
+ PROX_data_net_id:
+ type: string
+ PROX_data2_net_id:
+ type: string
+ PROX_server_name:
+ type: string
+ PROX_availability_zone:
+ type: string
+ PROX_security_group:
+ type: string
+ PROX_image:
+ type: string
+ PROX_key:
+ type: string
+ PROX_config:
+ type: string
+
+resources:
+ PROX_instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: PROX_server_name }
+ availability_zone : {get_param: PROX_availability_zone}
+ flavor: {get_resource: PROX_flavor}
+ image: {get_param: PROX_image}
+ key_name: {get_param: PROX_key}
+ networks:
+ - port: {get_resource: mgmt_port }
+ - port: {get_resource: data_port }
+ - port: {get_resource: data2_port }
+ user_data: {get_param: PROX_config}
+ user_data_format: RAW
+
+ PROX_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 4096
+ vcpus: 4
+ disk: 80
+ extra_specs: {"hw:mem_page_size": "large","hw:cpu_policy": "dedicated","hw:cpu_thread_policy":"isolate"}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_mgmt_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: PROX_public_net}
+ port_id: {get_resource: mgmt_port}
+
+ data_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ data2_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data2_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+outputs:
+ name:
+ description: Name of the PROX instance
+ value: {get_attr: [PROX_instance, name]}
+ mngmt_ip:
+ description: Management IP of the VM
+ value: {get_attr: [floating_ip, floating_ip_address ]}
+ data_plane_ips:
+ description: List of DataPlane IPs of the VM
+ value:
+ - {get_attr: [data_port, fixed_ips, 0, ip_address]}
+ - {get_attr: [data2_port, fixed_ips, 0, ip_address]}
+ data_plane_mac:
+ description: List of DataPlane MACs of the VM
+ value:
+ - {get_attr: [data_port, mac_address]}
+ - {get_attr: [data2_port, mac_address]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
index 918125ae..cd54d507 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
@@ -15,16 +15,15 @@
##
[DEFAULT]
-total_number_of_pods=3
+total_number_of_pods=2
+namespace=rapid-testing
[POD1]
nodeSelector_hostname=k8s-node1
dp_ip=192.168.30.11
+dp_subnet=24
[POD2]
nodeSelector_hostname=k8s-node2
dp_ip=192.168.30.12
-
-[POD3]
-nodeSelector_hostname=k8s-node2
-dp_ip=192.168.30.13
+dp_subnet=24
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
index ac0518c3..d103deba 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
@@ -41,6 +41,7 @@ class RapidCli(object):
print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME. Default is %s."%test_params['environment_file'])
print(" --test TEST_NAME Test cases will be read from TEST_NAME. Default is %s."%test_params['test_file'])
print(" --map MACHINE_MAP_FILE Machine mapping will be read from MACHINE_MAP_FILE. Default is %s."%test_params['machine_map_file'])
+ print(" --map INDEX_LIST This parameter can also be a list of indices, e.g. [2,3]")
print(" --runtime Specify time in seconds for 1 test run")
print(" --configonly If this option is specified, only upload all config files to the VMs, do not run the tests")
print(" --log Specify logging level for log file output, default is DEBUG")
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
index dddd29c6..e6a7f517 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
@@ -27,14 +27,13 @@ class CoreStatsTest(RapidTest):
"""
Class to manage the corestatstesting
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file, machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
- self.machines = machines
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
def run(self):
- # fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
+ result_details = {'Details': 'Nothing'}
RapidLog.info("+------------------------------------------------------------------------------------------------------------------+")
RapidLog.info("| Measuring core statistics on 1 or more PROX instances |")
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
@@ -73,18 +72,19 @@ class CoreStatsTest(RapidTest):
old_tsc[i] = new_tsc[i]
tot_drop[i] = tot_drop[i] + tx - rx
RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
- # writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file'] + str(i)
- DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NonDPReceived': non_dp_rx,
+ 'NonDPSent': non_dp_tx,
+ 'Dropped': tot_drop[i]}
+ result_details = self.post_data(result_details)
if machines_to_go == 0:
duration = duration - 1
machines_to_go = len (self.machines)
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
- return (True)
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
index 2aa1acc1..27d2430d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
@@ -21,14 +21,16 @@ class RapidDefaults(object):
Class to define the test defaults
"""
test_params = {
- 'version' : '2020.04.15', # Please do NOT change, used for debugging
+ 'version' : '2023.01.16', # Please do NOT change, used for debugging
'environment_file' : 'rapid.env', #Default string for environment
- 'test_file' : 'basicrapid.test', #Default string for test
+ 'test_file' : 'tests/basicrapid.test', #Default string for test
'machine_map_file' : 'machine.map', #Default string for machine map file
'loglevel' : 'DEBUG', # sets log level for writing to file
'screenloglevel' : 'INFO', # sets log level for writing to screen
'runtime' : 10, # time in seconds for 1 test run
'configonly' : False, # If True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
'rundir' : '/opt/rapid', # Directory where to find the tools in the machines running PROX
+ 'resultsdir' : '.', # Directory where to store log files
+ 'sleep_time' : 2, # Sleep time between two loop iteration. Minimum is 2 seconds. Might be useful to let SUT clean caches
'lat_percentile' : 0.99
}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
index da53742e..ea42fc9a 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
@@ -16,10 +16,9 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
-
import sys
import time
-import requests
+import copy
from math import ceil
from statistics import mean
from past.utils import old_div
@@ -32,13 +31,14 @@ class FlowSizeTest(RapidTest):
"""
Class to manage the flowsizetesting
"""
- def __init__(self, test_param, lat_percentile, runtime, pushgateway,
- environment_file, gen_machine, sut_machine, background_machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines, sleep_time):
+ super().__init__(test_param, runtime, testname, environment_file)
self.gen_machine = gen_machine
self.sut_machine = sut_machine
self.background_machines = background_machines
self.test['lat_percentile'] = lat_percentile
+ self.test['sleep_time'] = sleep_time
if self.test['test'] == 'TST009test':
# This test implements some of the testing as defined in
# https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
@@ -51,23 +51,24 @@ class FlowSizeTest(RapidTest):
self.test['TST009_S']= []
for m in range(0, self.test['TST009_n']):
self.test['TST009_S'].append((m+1) * self.test['stepsize'])
- self.test['lat_avg_threshold'] = inf
- self.test['lat_perc_threshold'] = inf
- self.test['lat_max_threshold'] = inf
elif self.test['test'] == 'fixed_rate':
for key in['drop_rate_threshold','lat_avg_threshold',
- 'lat_perc_threshold','lat_max_threshold']:
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']:
self.test[key] = inf
def new_speed(self, speed,size,success):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (speed + self.test['step'])
elif 'TST009' in self.test.keys():
if success:
self.test['TST009_L'] = self.test['TST009_m'] + 1
else:
- self.test['TST009_R'] = max(self.test['TST009_m'] - 1, self.test['TST009_L'])
- self.test['TST009_m'] = int (old_div((self.test['TST009_L'] + self.test['TST009_R']),2))
+ self.test['TST009_R'] = max(self.test['TST009_m'] - 1,
+ self.test['TST009_L'])
+ self.test['TST009_m'] = int (old_div((self.test['TST009_L'] +
+ self.test['TST009_R']),2))
return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
else:
if success:
@@ -79,10 +80,13 @@ class FlowSizeTest(RapidTest):
def get_start_speed_and_init(self, size):
if self.test['test'] == 'fixed_rate':
return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (self.test['startspeed'])
elif 'TST009' in self.test.keys():
self.test['TST009_L'] = 0
self.test['TST009_R'] = self.test['TST009_n'] - 1
- self.test['TST009_m'] = int(old_div((self.test['TST009_L'] + self.test['TST009_R']), 2))
+ self.test['TST009_m'] = int(old_div((self.test['TST009_L'] +
+ self.test['TST009_R']), 2))
return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
else:
self.test['minspeed'] = 0
@@ -97,163 +101,226 @@ class FlowSizeTest(RapidTest):
else:
return ((self.test['maxspeed'] - self.test['minspeed']) <= self.test['accuracy'])
+ def warm_up(self):
+ # Running at low speed to make sure the ARP messages can get through.
+ # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
+ # Note however that if we would run the test steps during a very long time, the ARP would expire in the switch.
+ # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
+
+ if WARMUPTIME == 0:
+ RapidLog.info(("Not Warming up"))
+ return
+
+ RapidLog.info(("Warming up during {} seconds..., packet size = {},"
+ " flows = {}, speed = {}").format(WARMUPTIME, imix, FLOWSIZE,
+ WARMUPSPEED))
+ self.gen_machine.set_generator_speed(WARMUPSPEED)
+ self.set_background_speed(self.background_machines, WARMUPSPEED)
+ self.gen_machine.set_udp_packet_size(imix)
+ self.set_background_size(self.background_machines, imix)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
+ self.set_background_flows(self.background_machines, FLOWSIZE)
+ self.gen_machine.start()
+ self.start_background_traffic(self.background_machines)
+ time.sleep(WARMUPTIME)
+ self.stop_background_traffic(self.background_machines)
+ self.gen_machine.stop()
+
def run(self):
- # global fieldnames
- # global writer
- # #fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
- # fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
- self.gen_machine.start_latency_cores()
- TestPassed = True
+ result_details = {'Details': 'Nothing'}
+ TestResult = 0
+ end_data = {}
+ iteration_prefix = {}
+ self.warm_up()
for imix in self.test['imixs']:
size = mean(imix)
self.gen_machine.set_udp_packet_size(imix)
if self.background_machines:
- backgroundinfo = '{}Running {} x background traffic not represented in the table{}'.format(bcolors.FLASH,len(self.background_machines),bcolors.ENDC)
+ backgroundinfo = ('{}Running {} x background traffic not '
+ 'represented in the table{}').format(bcolors.FLASH,
+ len(self.background_machines),bcolors.ENDC)
else:
backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
self.set_background_size(self.background_machines, imix)
- RapidLog.info("+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- RapidLog.info('| UDP, {:>5} bytes, different number of flows by randomizing SRC & DST UDP port. {:116.116}|'.format(size, backgroundinfo))
- RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
- RapidLog.info('| Flows | Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f} Pcentil| Max. Lat.| Sent | Received | Lost | Total Lost|L.Ratio|Time|'.format(self.test['lat_percentile']*100))
- RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
+ RapidLog.info('+' + '-' * 200 + '+')
+ RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
+ "randomizing SRC & DST UDP port. {:128.128}|").
+ format(round(size), backgroundinfo))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Flows | Speed requested | Gen by core | Sent by'
+ ' NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f}'
+ ' Pcentil| Max. Lat.| Sent | Received | Lost | Total'
+ ' Lost|L.Ratio|Mis-ordered|Time').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
for flow_number in self.test['flows']:
attempts = 0
self.gen_machine.reset_stats()
if self.sut_machine:
self.sut_machine.reset_stats()
- flow_number = self.gen_machine.set_flows(flow_number)
- self.set_background_flows(self.background_machines, flow_number)
- endspeed = None
+ if flow_number != 0:
+ flow_number = self.gen_machine.set_flows(flow_number)
+ self.set_background_flows(self.background_machines, flow_number)
+ end_data['speed'] = None
speed = self.get_start_speed_and_init(size)
while True:
attempts += 1
endwarning = False
- print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
+ print('{} flows: Measurement ongoing at speed: {}%'.format(
+ str(flow_number), str(round(speed, 2))), end=' \r')
sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- self.gen_machine.set_generator_speed(speed)
- self.set_background_speed(self.background_machines, speed)
- self.start_background_traffic(self.background_machines)
- # Get statistics now that the generation is stable and initial ARP messages are dealt with
- pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc , lat_perc_max, lat_max, abs_tx,abs_rx,abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = self.run_iteration(float(self.test['runtime']),flow_number,size,speed)
- self.stop_background_traffic(self.background_machines)
- if r > 1:
- retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(r) + bcolors.ENDC
+ iteration_data = self.run_iteration(
+ float(self.test['runtime']),flow_number,size,speed)
+ if iteration_data['r'] > 1:
+ retry_warning = '{} {:1} retries needed{}'.format(
+ bcolors.WARNING, iteration_data['r'],
+ bcolors.ENDC)
else:
retry_warning = ''
- # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
- # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
- if (drop_rate + lat_used * 100) < 95:
- lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
+ # Drop rate is expressed in percentage. lat_used is a ratio
+ # (0 to 1). The sum of these 2 should be 100%.
+ # If the sum is lower than 95, it means that more than 5%
+ # of the latency measurements where dropped for accuracy
+ # reasons.
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used'] * 100,
+ bcolors.ENDC)
else:
lat_warning = ''
+ iteration_prefix = {'speed' : bcolors.ENDC,
+ 'lat_avg' : bcolors.ENDC,
+ 'lat_perc' : bcolors.ENDC,
+ 'lat_max' : bcolors.ENDC,
+ 'abs_drop_rate' : bcolors.ENDC,
+ 'mis_ordered' : bcolors.ENDC,
+ 'drop_rate' : bcolors.ENDC}
if self.test['test'] == 'fixed_rate':
- endspeed = speed
- endpps_req_tx = None
- endpps_tx = None
- endpps_sut_tx = None
- endpps_rx = None
- endlat_avg = lat_avg
- endlat_perc = lat_perc
- endlat_perc_max = lat_perc_max
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- endabs_tx = abs_tx
- endabs_rx = abs_rx
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
if lat_warning or retry_warning:
- endwarning = '| | {:177.177} |'.format(retry_warning + lat_warning)
+ endwarning = '| | {:177.177} |'.format(
+ retry_warning + lat_warning)
success = True
- TestPassed = False # fixed rate testing cannot be True, it is just reported numbers every second
- speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
- # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
- # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
+ # TestResult = TestResult + iteration_data['pps_rx']
+ # fixed rate testing result is strange: we just report
+ # the pps received
+ # The following if statement is testing if we pass the
+ # success criteria of a certain drop rate, average latency
+ # and maximum latency below the threshold.
+ # The drop rate success can be achieved in 2 ways: either
+ # the drop rate is below a treshold, either we want that no
+ # packet has been lost during the test.
# This can be specified by putting 0 in the .test file
- elif ((drop_rate < self.test['drop_rate_threshold']) or (abs_dropped==self.test['drop_rate_threshold']==0)) and (lat_avg< self.test['lat_avg_threshold']) and (lat_perc< self.test['lat_perc_threshold']) and (lat_max < self.test['lat_max_threshold']):
- if (old_div((self.get_pps(speed,size) - pps_tx),self.get_pps(speed,size)))>0.01:
- speed_prefix = bcolors.WARNING
- if abs_tx_fail > 0:
- gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), pps_tx, abs_tx_fail) + bcolors.ENDC
+ elif ((self.get_pps(speed,size) - iteration_data['pps_tx']) / self.get_pps(speed,size)) \
+ < self.test['generator_threshold'] and \
+ ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or \
+ (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and \
+ (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and \
+ (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and \
+ (iteration_data['lat_max'] < self.test['lat_max_threshold'] and \
+ iteration_data['mis_ordered'] <= self.test['mis_ordered_threshold']):
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
+ success = True
+ success_message=' SUCCESS'
+ if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
+ iteration_prefix['speed'] = bcolors.WARNING
+ if iteration_data['abs_tx_fail'] > 0:
+ gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
else:
- gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), pps_tx) + bcolors.ENDC
+ gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
+ break
else:
- speed_prefix = bcolors.ENDC
+ iteration_prefix['speed'] = bcolors.ENDC
gen_warning = ''
- endspeed = speed
- endspeed_prefix = speed_prefix
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx = pps_sut_tx
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_perc = lat_perc
- endlat_perc_max = lat_perc_max
- endlat_max = lat_max
- endabs_dropped = None
- enddrop_rate = drop_rate
- endabs_tx = abs_tx
- endabs_rx = abs_rx
- if lat_warning or gen_warning or retry_warning:
- endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
- success = True
- success_message=' SUCCESS'
- speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
- RapidLog.debug(self.report_result(-attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning + gen_warning)
+ if lat_warning or retry_warning:
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
else:
success_message=' FAILED'
- abs_drop_rate_prefix = bcolors.ENDC
- if ((abs_dropped>0) and (self.test['drop_rate_threshold'] ==0)):
- abs_drop_rate_prefix = bcolors.FAIL
- if (drop_rate < self.test['drop_rate_threshold']):
- drop_rate_prefix = bcolors.ENDC
+ if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
+ iteration_prefix['abs_drop_rate'] = bcolors.FAIL
+ if (iteration_data['drop_rate'] <= self.test['drop_rate_threshold']):
+ iteration_prefix['drop_rate'] = bcolors.ENDC
else:
- drop_rate_prefix = bcolors.FAIL
- if (lat_avg< self.test['lat_avg_threshold']):
- lat_avg_prefix = bcolors.ENDC
+ iteration_prefix['drop_rate'] = bcolors.FAIL
+ if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
+ iteration_prefix['lat_avg'] = bcolors.ENDC
else:
- lat_avg_prefix = bcolors.FAIL
- if (lat_perc< self.test['lat_perc_threshold']):
- lat_perc_prefix = bcolors.ENDC
+ iteration_prefix['lat_avg'] = bcolors.FAIL
+ if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
+ iteration_prefix['lat_perc'] = bcolors.ENDC
else:
- lat_perc_prefix = bcolors.FAIL
- if (lat_max< self.test['lat_max_threshold']):
- lat_max_prefix = bcolors.ENDC
+ iteration_prefix['lat_perc'] = bcolors.FAIL
+ if (iteration_data['lat_max']< self.test['lat_max_threshold']):
+ iteration_prefix['lat_max'] = bcolors.ENDC
else:
- lat_max_prefix = bcolors.FAIL
- if ((old_div((self.get_pps(speed,size) - pps_tx),self.get_pps(speed,size)))<0.001):
- speed_prefix = bcolors.ENDC
+ iteration_prefix['lat_max'] = bcolors.FAIL
+ if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
+ iteration_prefix['speed'] = bcolors.ENDC
else:
- speed_prefix = bcolors.FAIL
+ iteration_prefix['speed'] = bcolors.FAIL
+ if (iteration_data['mis_ordered']< self.test['mis_ordered_threshold']):
+ iteration_prefix['mis_ordered'] = bcolors.ENDC
+ else:
+ iteration_prefix['mis_ordered'] = bcolors.FAIL
+
success = False
- RapidLog.debug(self.report_result(-attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_perc_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix)+ success_message + retry_warning + lat_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) +
+ success_message + retry_warning + lat_warning)
speed = self.new_speed(speed, size, success)
- if self.resolution_achieved():
+ if self.test['test'] == 'increment_till_fail':
+ if not success:
+ break
+ elif self.resolution_achieved():
break
- if endspeed is not None:
- if TestPassed and (endpps_rx < self.test['pass_threshold']):
- TestPassed = False
- speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
- RapidLog.info(self.report_result(flow_number,size,endspeed,endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_perc,endlat_perc_max,endlat_max,endabs_tx,endabs_rx,endabs_dropped,actual_duration,speed_prefix,lat_avg_prefix,lat_perc_prefix,lat_max_prefix,abs_drop_rate_prefix,drop_rate_prefix))
- if endwarning:
- RapidLog.info (endwarning)
- RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
- # writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file']
- if endabs_dropped == None:
- ead = 0
- else:
- ead = endabs_dropped
- DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(flow_number,size+4,self.get_pps(endspeed,size),endpps_req_tx,endpps_tx,endpps_sut_tx,endpps_rx,endlat_avg,endlat_max,endabs_tx,endabs_rx,ead,ead)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
- else:
- RapidLog.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
- self.gen_machine.stop_latency_cores()
- return (TestPassed)
+ if end_data['speed'] is None:
+ end_data = iteration_data
+ end_prefix = iteration_prefix
+ RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
+ RapidLog.info(self.report_result(flow_number, size,
+ end_data, end_prefix))
+ if end_data['avg_bg_rate']:
+ tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
+ endtotaltrafficrate = '| | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(RapidTest.get_speed(tot_avg_rx_rate,size) , tot_avg_rx_rate, ' '*84)
+ RapidLog.info (endtotaltrafficrate)
+ if endwarning:
+ RapidLog.info (endwarning)
+ if self.test['test'] != 'fixed_rate':
+ TestResult = TestResult + end_data['pps_rx']
+ end_data['test'] = self.test['testname']
+ end_data['environment_file'] = self.test['environment_file']
+ end_data['Flows'] = flow_number
+ end_data['Size'] = size
+ end_data['RequestedSpeed'] = RapidTest.get_pps(end_data['speed'] ,size)
+ result_details = self.post_data(end_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ return (TestResult, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
index 8d7b4e33..e52b17db 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
@@ -17,7 +17,6 @@
##
from rapid_log import RapidLog
-from prox_ctrl import prox_ctrl
from rapid_machine import RapidMachine
from math import ceil, log2
@@ -35,7 +34,7 @@ class RandomPortBits(object):
# throw exeption since we need the first bit to be 1
# Otherwise, the randomization could results in all 0's
# and that might be an invalid UDP port and result in
- # packets begin discarded
+ # packets being discarded
src_number_of_random_bits = number_of_random_bits // 2
dst_number_of_random_bits = (number_of_random_bits -
src_number_of_random_bits)
@@ -43,32 +42,71 @@ class RandomPortBits(object):
src_number_of_random_bits)
dst_port_bitmap = '1000000000000000'.replace ('0','X',
dst_number_of_random_bits)
- return [src_port_bitmap, dst_port_bitmap, 1<<number_of_random_bits]
+ return [src_port_bitmap, dst_port_bitmap, 1 << number_of_random_bits]
class RapidGeneratorMachine(RapidMachine):
"""
- Class to deal with rapid configuration files
+ Class to deal with a generator PROX instance (VM, bare metal, container)
"""
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly, ipv6):
+ mac_address_size = 6
+ ethertype_size = 2
+ FCS_size = 4
+ if ipv6:
+ ip_header_size = 40
+ self.ip_length_offset = 18
+ # In IPV6, the IP size is the size of the IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size)
+ else:
+ ip_header_size = 20
+ self.ip_length_offset = 16
+ # In IPV4, the IP size is the size of the IP header + IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + FCS_size)
+ self.frame_size_minus_udp_header_and_content = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size )
+ udp_header_start_offset = (2 * mac_address_size + ethertype_size +
+ ip_header_size)
+ self.udp_source_port_offset = udp_header_start_offset
+ self.udp_dest_port_offset = udp_header_start_offset + 2
+ self.udp_length_offset = udp_header_start_offset + 4
+ self.ipv6 = ipv6
+ if 'bucket_size_exp' in machine_params.keys():
+ self.bucket_size_exp = machine_params['bucket_size_exp']
+ else:
+ self.bucket_size_exp = 11
+ super().__init__(key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly)
+
def get_cores(self):
return (self.machine_params['gencores'] +
self.machine_params['latcores'])
- def generate_lua(self, vim):
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (gencores, latcores)
+ """
+ super().remap_all_cpus()
+
+ if self.cpu_mapping is None:
+ return
+
+ if 'gencores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['gencores'])
+ RapidLog.debug('{} ({}): gencores {} remapped to {}'.format(self.name, self.ip, self.machine_params['gencores'], cpus_remapped))
+ self.machine_params['gencores'] = cpus_remapped
+
+ if 'latcores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['latcores'])
+ RapidLog.debug('{} ({}): latcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['latcores'], cpus_remapped))
+ self.machine_params['latcores'] = cpus_remapped
+
+ def generate_lua(self):
appendix = 'gencores="%s"\n'% ','.join(map(str,
self.machine_params['gencores']))
appendix = appendix + 'latcores="%s"\n'% ','.join(map(str,
self.machine_params['latcores']))
- if 'gw_vm' in self.machine_params.keys():
- for index, gw_ip in enumerate(self.machine_params['gw_ips'],
- start = 1):
- appendix = appendix + 'gw_ip{}="{}"\n'.format(index, gw_ip)
- appendix = (appendix +
- 'gw_hex_ip{}=convertIPToHex(gw_ip{})\n'.format(index,
- index))
- if 'bucket_size_exp' in self.machine_params.keys():
- self.bucket_size_exp = self.machine_params['bucket_size_exp']
- else:
- self.bucket_size_exp = 11
appendix = (appendix +
'bucket_size_exp="{}"\n'.format(self.bucket_size_exp))
if 'heartbeat' in self.machine_params.keys():
@@ -76,7 +114,7 @@ class RapidGeneratorMachine(RapidMachine):
'heartbeat="%s"\n'% self.machine_params['heartbeat'])
else:
appendix = appendix + 'heartbeat="60"\n'
- super().generate_lua(vim, appendix)
+ super().generate_lua(appendix)
def start_prox(self):
# Start the generator with the -e option so that the cores don't
@@ -100,15 +138,17 @@ class RapidGeneratorMachine(RapidMachine):
# The set_size function takes the PROX packet size as a parameter
self.socket.set_size(self.machine_params['gencores'], 0,
imix_frame_sizes[0] - 4)
- # 18 is the difference between the frame size and IP size =
- # size of (MAC addresses, ethertype and FCS)
- self.socket.set_value(self.machine_params['gencores'], 0, 16,
- imix_frame_sizes[0] - 18, 2)
- # 38 is the difference between the frame size and UDP size =
- # 18 + size of IP header (=20)
- self.socket.set_value(self.machine_params['gencores'], 0, 38,
- imix_frame_sizes[0] - 38, 2)
+ # Writing length in the ip header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.ip_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_ip_size, 2)
+ # Writing length in the udp header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.udp_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_udp_header_and_content, 2)
else:
+ if self.ipv6:
+ RapidLog.critical('IMIX not supported for IPV6')
prox_sizes = [frame_size - 4 for frame_size in imix_frame_sizes]
self.socket.set_imix(self.machine_params['gencores'], 0,
prox_sizes)
@@ -116,10 +156,10 @@ class RapidGeneratorMachine(RapidMachine):
def set_flows(self, number_of_flows):
source_port, destination_port, actualflows = RandomPortBits.get_bitmap(
number_of_flows)
- self.socket.set_random(self.machine_params['gencores'],0,34,
- source_port,2)
- self.socket.set_random(self.machine_params['gencores'],0,36,
- destination_port,2)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_source_port_offset, source_port,2)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_dest_port_offset, destination_port,2)
return actualflows
def start_gen_cores(self):
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
new file mode 100644
index 00000000..4d210409
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rapid
+description: A Helm chart for deploying RAPID test scripts and environment
+type: application
+version: 0.0.1
+appVersion: "1.0.0"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
new file mode 100644
index 00000000..74fc6297
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rapid-testing
+ namespace: {{ .Values.namespace }}
+ labels:
+ app: rapid-testing
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rapid-testing
+ template:
+ metadata:
+ labels:
+ app: rapid-testing
+ spec:
+ serviceAccountName: rapid-testing-sa
+ containers:
+ - name: rapid-mgmt
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7886ade3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.namespace }}
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: rapid-testing-cr
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/status"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rapid-testing-crb
+subjects:
+- kind: ServiceAccount
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+roleRef:
+ kind: ClusterRole
+ name: rapid-testing-cr
+ apiGroup: rbac.authorization.k8s.io
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
new file mode 100644
index 00000000..76b8037a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
@@ -0,0 +1,8 @@
+namespace: rapid-testing
+
+image:
+ repository: opnfv/rapid
+ tag: "latest"
+ pullPolicy: IfNotPresent
+
+nodeSelector: {}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
index 82067295..3945cd8e 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
@@ -21,6 +21,7 @@ import sys
import time
import requests
from rapid_log import RapidLog
+from rapid_log import bcolors
from rapid_test import RapidTest
from statistics import mean
@@ -28,54 +29,80 @@ class ImpairTest(RapidTest):
"""
Class to manage the impair testing
"""
- def __init__(self, test_param, lat_percentile, runtime, pushgateway,
- environment_file, gen_machine, sut_machine):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines):
+ super().__init__(test_param, runtime, testname, environment_file)
self.gen_machine = gen_machine
self.sut_machine = sut_machine
+ self.background_machines = background_machines
self.test['lat_percentile'] = lat_percentile
def run(self):
- # fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
+ result_details = {'Details': 'Nothing'}
imix = self.test['imix']
size = mean (imix)
flow_number = self.test['flowsize']
- attempts = 0
+ attempts = self.test['steps']
self.gen_machine.set_udp_packet_size(imix)
flow_number = self.gen_machine.set_flows(flow_number)
self.gen_machine.start_latency_cores()
- RapidLog.info("+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- RapidLog.info("| Generator is sending UDP ({:>5} flow) packets ({:>5} bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test |".format(flow_number,size))
- RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
- RapidLog.info('| Test | Speed requested | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f} Pcentil| Max. Lat.| Sent | Received | Lost | Total Lost|L.Ratio|Time|'.format(self.test['lat_percentile']*100))
- RapidLog.info("+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+")
-
+ RapidLog.info('+' + '-' * 188 + '+')
+ RapidLog.info(("| Generator is sending UDP ({:>5} flow) packets ({:>5}"
+ " bytes) to SUT via GW dropping and delaying packets. SUT sends "
+ "packets back.{:>60}").format(flow_number,round(size),'|'))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Test | Speed requested | Gen by core | Sent by NIC'
+ ' | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f} Pcentil'
+ '| Max. Lat.| Sent | Received | Lost | Total Lost|'
+ 'L.Ratio|Time|').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
speed = self.test['startspeed']
self.gen_machine.set_generator_speed(speed)
- while True:
- attempts += 1
+ while attempts:
+ attempts -= 1
print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
sys.stdout.flush()
time.sleep(1)
# Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = self.run_iteration(float(self.test['runtime']),flow_number,size,speed)
+ iteration_data = self.run_iteration(float(self.test['runtime']),flow_number,size,speed)
+ iteration_data['speed'] = speed
# Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
# If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
- if (drop_rate + lat_used * 100) < 95:
- lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(lat_used*100) + bcolors.ENDC
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used']*100, bcolors.ENDC)
else:
lat_warning = ''
- RapidLog.info(self.report_result(attempts,size,speed,pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_perc,lat_perc_max,lat_max,abs_tx,abs_rx,abs_dropped,actual_duration))
-# writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test'] + '/instance/' + self.test['environment_file']
- DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(flow_number,size+4,self.get_pps(speed,size),pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,lat_max,abs_dropped,drop_rate)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(attempts, size, iteration_data,
+ iteration_prefix))
+ iteration_data['test'] = self.test['testname']
+ iteration_data['environment_file'] = self.test['environment_file']
+ iteration_data['Flows'] = flow_number
+ iteration_data['Size'] = size
+ iteration_data['RequestedSpeed'] = RapidTest.get_pps(
+ iteration_data['speed'] ,size)
+ result_details = self.post_data(iteration_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
self.gen_machine.stop_latency_cores()
- return (True)
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
index feabe656..de7e6ae3 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
@@ -28,58 +28,79 @@ class IrqTest(RapidTest):
"""
Class to manage the irq testing
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file,
+ def __init__(self, test_param, runtime, testname, environment_file,
machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.machines = machines
def run(self):
- RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------")
- RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
- RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
- RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
- RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
- RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 ")
- RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------")
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic |")
+ RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and |")
+ RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was |")
+ RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout |")
+ RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 |")
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
sys.stdout.flush()
+ max_loop_duration = 0
+ machine_details = {}
for machine in self.machines:
- buckets=machine.socket.show_irq_buckets(1)
+ buckets=machine.socket.show_irq_buckets(machine.get_cores()[0])
+ if max_loop_duration == 0:
+ # First time we go through the loop, we need to initialize
+ # result_details
+ result_details = {'test': self.test['testname'],
+ 'environment_file': self.test['environment_file'],
+ 'buckets': buckets}
print('Measurement ongoing ... ',end='\r')
- machine.stop()
- old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)]
- irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)]
- irq[0][0] = 'bucket us'
- for j,bucket in enumerate(buckets,start=1):
- irq[0][j] = '<'+ bucket
- irq[0][-1] = '>'+ buckets [-2]
- machine.start()
- time.sleep(2)
- for j,bucket in enumerate(buckets,start=1):
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- old_irq[i][j] = machine.socket.irq_stats(irqcore,j-1)
+ machine.start() # PROX cores will be started within 0 to 1 seconds
+ # That is why we sleep a bit over 1 second to make sure all cores
+ # are started
+ time.sleep(1.2)
+ old_irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ column_names = []
+ for bucket in buckets:
+ column_names.append('<{}'.format(bucket))
+ column_names[-1] = '>{}'.format(buckets[-2])
+ for j,bucket in enumerate(buckets):
+ for i,irqcore in enumerate(machine.get_cores()):
+ old_irq[i][j] = machine.socket.irq_stats(irqcore,j)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
time.sleep(float(self.test['runtime']))
- machine.stop()
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- irq[i][0]='core %s'%irqcore
- for j,bucket in enumerate(buckets,start=1):
- diff = machine.socket.irq_stats(irqcore,j-1) - old_irq[i][j]
+ row_names = []
+ for i,irqcore in enumerate(machine.get_cores()):
+ row_names.append(irqcore)
+ for j,bucket in enumerate(buckets):
+ diff = machine.socket.irq_stats(irqcore,j) - old_irq[i][j]
if diff == 0:
irq[i][j] = '0'
else:
- irq[i][j] = str(round(old_div(diff,float(self.test['runtime'])), 2))
+ irq[i][j] = str(round(old_div(diff,
+ float(self.test['runtime'])), 2))
+ if max_loop_duration < int(bucket):
+ max_loop_duration = int(bucket)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
+ # Conclusion: we don't know the exact window size.
+ # Real measurement windows might be wrong by 1 second
+ # This could be fixed in this script by checking this data every
+ # 0.5 seconds Not implemented since we can also run this test for
+ # a longer time and decrease the error. The absolute number of
+ # interrupts is not so important.
+ machine.stop()
+ core_details = {}
RapidLog.info('Results for PROX instance %s'%machine.name)
- for row in irq:
- RapidLog.info(''.join(['{:>12}'.format(item) for item in row]))
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test']+ '/instance/' + self.test['environment_file']
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- #DATA = 'Machine {}\n'.format(machine.name)
- for i,irqcore in enumerate(machine.get_cores(),start=1):
- DATA = '{}\n'.format(irq[i][0])
- for j,bucket in enumerate(buckets,start=1):
- DATA = DATA + 'B{} {}\n'.format(irq[0][j].replace(">","M").replace("<","").replace(" ",""),irq[i][j])
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
- return (True)
+ RapidLog.info('{:>12}'.format('bucket us') +
+ ''.join(['{:>12}'.format(item) for item in column_names]))
+ for j, row in enumerate(irq):
+ RapidLog.info('Core {:>7}'.format(row_names[j]) +
+ ''.join(['{:>12}'.format(item) for item in row]))
+ core_details['Core {}'.format(row_names[j])] = row
+ machine_details[machine.name] = core_details
+ result_details['machine_data'] = machine_details
+ result_details = self.post_data(result_details)
+ return (500000 - max_loop_duration, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/k8sdeployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
index 5e921d46..1d1112f7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/k8sdeployment.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python2.7
-
##
## Copyright (c) 2019-2020 Intel Corporation
##
@@ -18,11 +16,15 @@
import sys
from kubernetes import client, config
-import ConfigParser
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
import logging
from logging import handlers
-from pod import Pod
+from rapid_k8s_pod import Pod
class K8sDeployment:
"""Deployment class to create containers for test execution in Kubernetes
@@ -30,7 +32,7 @@ class K8sDeployment:
"""
LOG_FILE_NAME = "createrapidk8s.log"
SSH_PRIVATE_KEY = "./rapid_rsa_key"
- SSH_USER = "centos"
+ SSH_USER = "rapid"
POD_YAML_TEMPLATE_FILE_NAME = "pod-rapid.yaml"
@@ -38,6 +40,7 @@ class K8sDeployment:
_create_config = None
_runtime_config = None
_total_number_of_pods = 0
+ _namespace = "rapid-testing"
_pods = []
def __init__(self):
@@ -62,14 +65,18 @@ class K8sDeployment:
self._log.addHandler(console_handler)
# Initialize k8s plugin
- config.load_kube_config()
+ try:
+ config.load_kube_config()
+ except:
+ config.load_incluster_config()
+
Pod.k8s_CoreV1Api = client.CoreV1Api()
def load_create_config(self, config_file_name):
"""Read and parse configuration file for the test environment.
"""
self._log.info("Loading configuration file %s", config_file_name)
- self._create_config = ConfigParser.RawConfigParser()
+ self._create_config = configparser.RawConfigParser()
try:
self._create_config.read(config_file_name)
except Exception as e:
@@ -87,6 +94,15 @@ class K8sDeployment:
self._log.debug("Total number of pods %d" % self._total_number_of_pods)
+ if self._create_config.has_option("DEFAULT", "namespace"):
+ self._namespace = self._create_config.get(
+ "DEFAULT", "namespace")
+ else:
+ self._log.error("No option namespace in DEFAULT section")
+ return -1
+
+ self._log.debug("Using namespace %s" % self._total_number_of_pods)
+
# Parse [PODx] sections
for i in range(1, int(self._total_number_of_pods) + 1):
# Search for POD name
@@ -95,7 +111,7 @@ class K8sDeployment:
pod_name = self._create_config.get(
"POD%d" % i, "name")
else:
- pod_name = "pod-rapid-%d" % i
+ pod_name = "prox-pod-%d" % i
# Search for POD hostname
if self._create_config.has_option("POD%d" % i,
@@ -105,6 +121,14 @@ class K8sDeployment:
else:
pod_nodeselector_hostname = None
+ # Search for POD spec
+ if self._create_config.has_option("POD%d" % i,
+ "spec_file_name"):
+ pod_spec_file_name = self._create_config.get(
+ "POD%d" % i, "spec_file_name")
+ else:
+ pod_spec_file_name = K8sDeployment.POD_YAML_TEMPLATE_FILE_NAME
+
# Search for POD dataplane static IP
if self._create_config.has_option("POD%d" % i,
"dp_ip"):
@@ -113,9 +137,19 @@ class K8sDeployment:
else:
pod_dp_ip = None
- pod = Pod(pod_name)
+ # Search for POD dataplane subnet
+ if self._create_config.has_option("POD%d" % i,
+ "dp_subnet"):
+ pod_dp_subnet = self._create_config.get(
+ "POD%d" % i, "dp_subnet")
+ else:
+ pod_dp_subnet = "24"
+
+ pod = Pod(pod_name, self._namespace)
pod.set_nodeselector(pod_nodeselector_hostname)
+ pod.set_spec_file_name(pod_spec_file_name)
pod.set_dp_ip(pod_dp_ip)
+ pod.set_dp_subnet(pod_dp_subnet)
pod.set_id(i)
# Add POD to the list of PODs which need to be created
@@ -132,7 +166,7 @@ class K8sDeployment:
# Create PODs using template from yaml file
for pod in self._pods:
self._log.info("Creating POD %s...", pod.get_name())
- pod.create_from_yaml(K8sDeployment.POD_YAML_TEMPLATE_FILE_NAME)
+ pod.create_from_yaml()
# Wait for PODs to start
for pod in self._pods:
@@ -142,11 +176,12 @@ class K8sDeployment:
for pod in self._pods:
pod.set_ssh_credentials(K8sDeployment.SSH_USER, K8sDeployment.SSH_PRIVATE_KEY)
pod.get_sriov_dev_mac()
+ pod.get_qat_dev()
def save_runtime_config(self, config_file_name):
self._log.info("Saving config %s for runrapid script...",
config_file_name)
- self._runtime_config = ConfigParser.RawConfigParser()
+ self._runtime_config = configparser.RawConfigParser()
# Section [DEFAULT]
# self._runtime_config.set("DEFAULT",
@@ -178,8 +213,13 @@ class K8sDeployment:
"dp_mac1", pod.get_dp_mac())
self._runtime_config.set("M%d" % pod.get_id(),
"dp_pci_dev", pod.get_dp_pci_dev())
+ if (pod.get_qat_pci_dev()):
+ for qat_index, qat_device in enumerate(pod.get_qat_pci_dev()):
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "qat_pci_dev%d" % qat_index, qat_device)
self._runtime_config.set("M%d" % pod.get_id(),
- "dp_ip1", pod.get_dp_ip())
+ "dp_ip1", pod.get_dp_ip() + "/" +
+ pod.get_dp_subnet())
# Section [Varia]
self._runtime_config.add_section("Varia")
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pod.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
index 61af9371..beaedd69 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/pod.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python2.7
-
##
## Copyright (c) 2019 Intel Corporation
##
@@ -21,7 +19,7 @@ import time, yaml
import logging
from kubernetes import client, config
-from sshclient import SSHClient
+from rapid_sshclient import SSHClient
class Pod:
"""Class which represents test pods.
@@ -34,10 +32,12 @@ class Pod:
_name = "pod"
_namespace = "default"
_nodeSelector_hostname = None
+ _spec_filename = None
_last_status = None
_id = None
_admin_ip = None
_dp_ip = None
+ _dp_subnet = None
_ssh_client = None
@@ -50,6 +50,7 @@ class Pod:
self._name = name
self._namespace = namespace
self._ssh_client = SSHClient(logger_name = logger_name)
+ self.qat_vf = []
def __del__(self):
"""Destroy POD. Do a cleanup.
@@ -57,10 +58,11 @@ class Pod:
if self._ssh_client is not None:
self._ssh_client.disconnect()
- def create_from_yaml(self, file_name):
+ def create_from_yaml(self):
"""Load POD description from yaml file.
"""
- with open(path.join(path.dirname(__file__), file_name)) as yaml_file:
+ with open(path.join(path.dirname(__file__),
+ self._spec_filename)) as yaml_file:
self.body = yaml.safe_load(yaml_file)
self.body["metadata"]["name"] = self._name
@@ -68,14 +70,16 @@ class Pod:
if (self._nodeSelector_hostname is not None):
if ("nodeSelector" not in self.body["spec"]):
self.body["spec"]["nodeSelector"] = {}
- self.body["spec"]["nodeSelector"]["kubernetes.io/hostname"] = self._nodeSelector_hostname
+ self.body["spec"]["nodeSelector"]["kubernetes.io/hostname"] = \
+ self._nodeSelector_hostname
self._log.debug("Creating POD, body:\n%s" % self.body)
try:
self.k8s_CoreV1Api.create_namespaced_pod(body = self.body,
namespace = self._namespace)
except client.rest.ApiException as e:
- self._log.error("Couldn't create POD %s!\n%s\n" % (self._name, e))
+ self._log.error("Couldn't create POD %s!\n%s\n" % (self._name,
+ e))
def terminate(self):
"""Terminate POD. Close SSH connection.
@@ -130,12 +134,18 @@ class Pod:
def get_dp_ip(self):
return self._dp_ip
+ def get_dp_subnet(self):
+ return self._dp_subnet
+
def get_dp_mac(self):
return self._sriov_vf_mac
def get_dp_pci_dev(self):
return self._sriov_vf
+ def get_qat_pci_dev(self):
+ return self.qat_vf
+
def get_id(self):
return self._id
@@ -151,13 +161,35 @@ class Pod:
self._last_status = pod.status.phase
return self._last_status
+ def get_qat_dev(self):
+ """Get qat devices if any, assigned by k8s QAT device plugin.
+ """
+ self._log.info("Checking assigned QAT VF for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_qat_device_plugin_envs")
+ if ret != 0:
+ self._log.error("Failed to check assigned QAT VF!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+
+ if cmd_output:
+ self._log.debug("Before: Using QAT VF %s" % self.qat_vf)
+ self._log.debug("Environment variable %s" % cmd_output)
+ for line in cmd_output.splitlines():
+ self.qat_vf.append(line.split("=")[1])
+ self._log.debug("Using QAT VF %s" % self.qat_vf)
+ else:
+ self._log.debug("No QAT devices for this pod")
+ self.qat_vf = None
+
def get_sriov_dev_mac(self):
"""Get assigned by k8s SRIOV network device plugin SRIOV VF devices.
Return 0 in case of sucessfull configuration.
Otherwise return -1.
"""
self._log.info("Checking assigned SRIOV VF for POD %s" % self._name)
- ret = self._ssh_client.run_cmd("cat /opt/k8s_sriov_device_plugin_envs")
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_sriov_device_plugin_envs")
if ret != 0:
self._log.error("Failed to check assigned SRIOV VF!"
"Error %s" % self._ssh_client.get_error())
@@ -171,8 +203,24 @@ class Pod:
self._sriov_vf = cmd_output.split(",")[0]
self._log.debug("Using first SRIOV VF %s" % self._sriov_vf)
- self._log.info("Getting MAC address for assigned SRIOV VF %s" % self._sriov_vf)
- self._ssh_client.run_cmd("sudo /opt/rapid/port_info_app -n 4 -w %s" % self._sriov_vf)
+ # find DPDK version
+ self._log.info("Checking DPDK version for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/dpdk_version")
+ if ret != 0:
+ self._log.error("Failed to check DPDK version"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+ dpdk_version = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug("DPDK version %s" % dpdk_version)
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+
+ self._log.info("Getting MAC address for assigned SRIOV VF %s" % \
+ self._sriov_vf)
+ self._ssh_client.run_cmd("sudo /opt/rapid/port_info_app -n 4 \
+ --{} {}".format(allow_parameter, self._sriov_vf))
if ret != 0:
self._log.error("Failed to get MAC address!"
"Error %s" % self._ssh_client.get_error())
@@ -191,6 +239,9 @@ class Pod:
def set_dp_ip(self, dp_ip):
self._dp_ip = dp_ip
+ def set_dp_subnet(self, dp_subnet):
+ self._dp_subnet = dp_subnet
+
def set_id(self, pod_id):
self._id = pod_id
@@ -199,6 +250,11 @@ class Pod:
"""
self._nodeSelector_hostname = hostname
+ def set_spec_file_name(self, file_name):
+ """Set pod spec filename.
+ """
+ self._spec_filename = file_name
+
def set_ssh_credentials(self, user, rsa_private_key):
"""Set SSH credentials for the SSH connection to the POD.
"""
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
index 9c794586..1ad54273 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
@@ -42,56 +42,67 @@ class RapidLog(object):
@staticmethod
def log_init(log_file, loglevel, screenloglevel, version):
- # create formatters
- screen_formatter = logging.Formatter("%(message)s")
- file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-
- # get a top-level logger,
- # set its log level,
- # BUT PREVENT IT from propagating messages to the root logger
- #
- log = logging.getLogger()
- numeric_level = getattr(logging, loglevel.upper(), None)
- if not isinstance(numeric_level, int):
- raise ValueError('Invalid log level: %s' % loglevel)
- log.setLevel(numeric_level)
- log.propagate = 0
-
- # create a console handler
- # and set its log level to the command-line option
- #
- console_handler = logging.StreamHandler(sys.stdout)
- #console_handler.setLevel(logging.INFO)
- numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
- if not isinstance(numeric_screenlevel, int):
- raise ValueError('Invalid screenlog level: %s' % screenloglevel)
- console_handler.setLevel(numeric_screenlevel)
- console_handler.setFormatter(screen_formatter)
-
- # create a file handler
- # and set its log level
- #
- file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
- #file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
- file_handler.setLevel(numeric_level)
- file_handler.setFormatter(file_formatter)
-
- # add handlers to the logger
- #
- log.addHandler(file_handler)
- log.addHandler(console_handler)
-
- # Check if log exists and should therefore be rolled
- needRoll = os.path.isfile(log_file)
-
-
- # This is a stale log, so roll it
- if needRoll:
- # Add timestamp
- log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
-
- # Roll over on application start
- log.handlers[0].doRollover()
+ log = logging.getLogger(__name__)
+ makeFileHandler = True
+ makeStreamHandler = True
+ if len(log.handlers) > 0:
+ for handler in log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ makeFileHandler = False
+ elif isinstance(handler, logging.StreamHandler):
+ makeStreamHandler = False
+ if makeStreamHandler:
+ # create formatters
+ screen_formatter = logging.Formatter("%(message)s")
+ # create a console handler
+ # and set its log level to the command-line option
+ #
+ console_handler = logging.StreamHandler(sys.stdout)
+ #console_handler.setLevel(logging.INFO)
+ numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
+ if not isinstance(numeric_screenlevel, int):
+ raise ValueError('Invalid screenlog level: %s' % screenloglevel)
+ console_handler.setLevel(numeric_screenlevel)
+ console_handler.setFormatter(screen_formatter)
+ # add handler to the logger
+ #
+ log.addHandler(console_handler)
+ if makeFileHandler:
+ # create formatters
+ file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
+ # get a top-level logger,
+ # set its log level,
+ # BUT PREVENT IT from propagating messages to the root logger
+ #
+ numeric_level = getattr(logging, loglevel.upper(), None)
+ if not isinstance(numeric_level, int):
+ raise ValueError('Invalid log level: %s' % loglevel)
+ log.setLevel(numeric_level)
+ log.propagate = 0
+
+
+ # create a file handler
+ # and set its log level
+ #
+ file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
+ file_handler.setLevel(numeric_level)
+ file_handler.setFormatter(file_formatter)
+
+ # add handler to the logger
+ #
+ log.addHandler(file_handler)
+
+ # Check if log exists and should therefore be rolled
+ needRoll = os.path.isfile(log_file)
+
+
+ # This is a stale log, so roll it
+ if needRoll:
+ # Add timestamp
+ log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
+
+ # Roll over on application start
+ file_handler.doRollover()
# Add timestamp
log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
@@ -100,13 +111,21 @@ class RapidLog(object):
RapidLog.log = log
@staticmethod
+ def log_close():
+ for handler in RapidLog.log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.close()
+ RapidLog.log.removeHandler(handler)
+
+ @staticmethod
def exception(exception_info):
RapidLog.log.exception(exception_info)
- raise Exception(exception_info)
+ exit(1)
+ @staticmethod
def critical(critical_info):
RapidLog.log.critical(critical_info)
- raise Exception(critical_info)
+ exit(1)
@staticmethod
def error(error_info):
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
index 7a5ebd4b..47f858d0 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
@@ -18,43 +18,123 @@
from rapid_log import RapidLog
from prox_ctrl import prox_ctrl
+import os
import re
+import uuid
class RapidMachine(object):
"""
- Class to deal with rapid configuration files
+ Class to deal with a PROX instance (VM, bare metal, container)
"""
- def __init__(self, key, user, vim, rundir, machine_params):
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly):
self.name = machine_params['name']
self.ip = machine_params['admin_ip']
self.key = key
self.user = user
+ self.password = password
self.rundir = rundir
+ self.resultsdir = resultsdir
self.dp_ports = []
self.dpdk_port_index = []
+ self.configonly = configonly
index = 1
while True:
ip_key = 'dp_ip{}'.format(index)
mac_key = 'dp_mac{}'.format(index)
- if ip_key in machine_params.keys() and mac_key in machine_params.keys():
- dp_port = {'ip': machine_params[ip_key], 'mac' : machine_params[mac_key]}
+ if ip_key in machine_params.keys():
+ if mac_key in machine_params.keys():
+ dp_port = {'ip': machine_params[ip_key], 'mac' : machine_params[mac_key]}
+ else:
+ dp_port = {'ip': machine_params[ip_key], 'mac' : None}
self.dp_ports.append(dict(dp_port))
self.dpdk_port_index.append(index - 1)
index += 1
else:
break
- self.rundir = rundir
self.machine_params = machine_params
- self._client = prox_ctrl(self.ip, self.key, self.user)
- self._client.connect()
- if vim in ['OpenStack']:
- self.devbind()
- self.generate_lua(vim)
- self._client.scp_put(self.machine_params['config_file'], '{}/{}'.format(self.rundir, machine_params['config_file']))
+ self.vim = vim
+ self.cpu_mapping = None
+ if 'config_file' in self.machine_params.keys():
+ PROXConfigfile = open (self.machine_params['config_file'], 'r')
+ PROXConfig = PROXConfigfile.read()
+ PROXConfigfile.close()
+ self.all_tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",PROXConfig))
def get_cores(self):
return (self.machine_params['cores'])
+ def expand_list_format(self, list):
+ """Expand cpuset list format provided as comma-separated list of
+ numbers and ranges of numbers. For more information please see
+ https://man7.org/linux/man-pages/man7/cpuset.7.html
+ """
+ list_expanded = []
+ for num in list.split(','):
+ if '-' in num:
+ num_range = num.split('-')
+ list_expanded += range(int(num_range[0]), int(num_range[1]) + 1)
+ else:
+ list_expanded.append(int(num))
+ return list_expanded
+
+ def read_cpuset(self):
+ """Read list of cpus on which we allowed to execute
+ """
+ cpu_set_file = '/sys/fs/cgroup/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ cpu_set_file = '/sys/fs/cgroup/cpuset/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ RapidLog.critical('{Cannot determine cpuset')
+ cpuset_cpus = self._client.run_cmd(cmd).decode().rstrip()
+ RapidLog.debug('{} ({}): Allocated cpuset: {}'.format(self.name, self.ip, cpuset_cpus))
+ self.cpu_mapping = self.expand_list_format(cpuset_cpus)
+ RapidLog.debug('{} ({}): Expanded cpuset: {}'.format(self.name, self.ip, self.cpu_mapping))
+
+ # Log CPU core mapping for user information
+ cpu_mapping_str = ''
+ for i in range(len(self.cpu_mapping)):
+ cpu_mapping_str = cpu_mapping_str + '[' + str(i) + '->' + str(self.cpu_mapping[i]) + '], '
+ cpu_mapping_str = cpu_mapping_str[:-2]
+ RapidLog.debug('{} ({}): CPU mapping: {}'.format(self.name, self.ip, cpu_mapping_str))
+
+ def remap_cpus(self, cpus):
+ """Convert relative cpu ids provided as function parameter to match
+ cpu ids from allocated list
+ """
+ cpus_remapped = []
+ for cpu in cpus:
+ cpus_remapped.append(self.cpu_mapping[cpu])
+ return cpus_remapped
+
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (mcore, cores)
+ """
+ if self.cpu_mapping is None:
+ RapidLog.debug('{} ({}): cpu mapping is not defined! Please check the configuration!'.format(self.name, self.ip))
+ return
+
+ if 'mcore' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['mcore'])
+ RapidLog.debug('{} ({}): mcore {} remapped to {}'.format(self.name, self.ip, self.machine_params['mcore'], cpus_remapped))
+ self.machine_params['mcore'] = cpus_remapped
+
+ if 'cores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['cores'])
+ RapidLog.debug('{} ({}): cores {} remapped to {}'.format(self.name, self.ip, self.machine_params['cores'], cpus_remapped))
+ self.machine_params['cores'] = cpus_remapped
+
+ if 'altcores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['altcores'])
+ RapidLog.debug('{} ({}): altcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['altcores'], cpus_remapped))
+ self.machine_params['altcores'] = cpus_remapped
+
def devbind(self):
# Script to bind the right network interface to the poll mode driver
for index, dp_port in enumerate(self.dp_ports, start = 1):
@@ -63,14 +143,11 @@ class RapidMachine(object):
cmd = 'sed -i \'s/MACADDRESS/' + dp_port['mac'] + '/\' ' + DevBindFileName
result = self._client.run_cmd(cmd)
RapidLog.debug('devbind.sh MAC updated for port {} on {} {}'.format(index, self.name, result))
- result = self._client.run_cmd(DevBindFileName)
- RapidLog.debug('devbind.sh running for port {} on {} {}'.format(index, self.name, result))
-
- def generate_lua(self, vim, appendix = ''):
- PROXConfigfile = open (self.machine_params['config_file'], 'r')
- PROXConfig = PROXConfigfile.read()
- PROXConfigfile.close()
- self.all_tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",PROXConfig))
+ if ((not self.configonly) and self.machine_params['prox_launch_exit']):
+ result = self._client.run_cmd(DevBindFileName)
+ RapidLog.debug('devbind.sh running for port {} on {} {}'.format(index, self.name, result))
+
+ def generate_lua(self, appendix = ''):
self.LuaFileName = 'parameters-{}.lua'.format(self.ip)
with open(self.LuaFileName, "w") as LuaFile:
LuaFile.write('require "helper"\n')
@@ -78,29 +155,93 @@ class RapidMachine(object):
for index, dp_port in enumerate(self.dp_ports, start = 1):
LuaFile.write('local_ip{}="{}"\n'.format(index, dp_port['ip']))
LuaFile.write('local_hex_ip{}=convertIPToHex(local_ip{})\n'.format(index, index))
- if vim in ['kubernetes']:
- LuaFile.write("eal=\"--socket-mem=512,0 --file-prefix %s --pci-whitelist %s\"\n" % (self.name, self.machine_params['dp_pci_dev']))
+ if self.vim in ['kubernetes']:
+ cmd = 'cat /opt/rapid/dpdk_version'
+ dpdk_version = self._client.run_cmd(cmd).decode().rstrip()
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+ eal_line = 'eal=\"--file-prefix {}{} --{} {} --force-max-simd-bitwidth=512'.format(
+ self.name, str(uuid.uuid4()), allow_parameter,
+ self.machine_params['dp_pci_dev'])
+ looking_for_qat = True
+ index = 0
+ while (looking_for_qat):
+ if 'qat_pci_dev{}'.format(index) in self.machine_params:
+ eal_line += ' --{} {}'.format(allow_parameter,
+ self.machine_params['qat_pci_dev{}'.format(index)])
+ index += 1
+ else:
+ looking_for_qat = False
+ eal_line += '"\n'
+ LuaFile.write(eal_line)
else:
LuaFile.write("eal=\"\"\n")
+ if 'mcore' in self.machine_params.keys():
+ LuaFile.write('mcore="%s"\n'% ','.join(map(str,
+ self.machine_params['mcore'])))
if 'cores' in self.machine_params.keys():
- LuaFile.write('cores="%s"\n'% ','.join(map(str, self.machine_params['cores'])))
+ LuaFile.write('cores="%s"\n'% ','.join(map(str,
+ self.machine_params['cores'])))
+ if 'altcores' in self.machine_params.keys():
+ LuaFile.write('altcores="%s"\n'% ','.join(map(str,
+ self.machine_params['altcores'])))
if 'ports' in self.machine_params.keys():
- LuaFile.write('ports="%s"\n'% ','.join(map(str, self.machine_params['ports'])))
+ LuaFile.write('ports="%s"\n'% ','.join(map(str,
+ self.machine_params['ports'])))
if 'dest_ports' in self.machine_params.keys():
for index, dest_port in enumerate(self.machine_params['dest_ports'], start = 1):
LuaFile.write('dest_ip{}="{}"\n'.format(index, dest_port['ip']))
LuaFile.write('dest_hex_ip{}=convertIPToHex(dest_ip{})\n'.format(index, index))
- LuaFile.write('dest_hex_mac{}="{}"\n'.format(index , dest_port['mac'].replace(':',' ')))
+ if dest_port['mac']:
+ LuaFile.write('dest_hex_mac{}="{}"\n'.format(index ,
+ dest_port['mac'].replace(':',' ')))
+ if 'gw_vm' in self.machine_params.keys():
+ for index, gw_ip in enumerate(self.machine_params['gw_ips'],
+ start = 1):
+ LuaFile.write('gw_ip{}="{}"\n'.format(index, gw_ip))
+ LuaFile.write('gw_hex_ip{}=convertIPToHex(gw_ip{})\n'.
+ format(index, index))
LuaFile.write(appendix)
self._client.scp_put(self.LuaFileName, self.rundir + '/parameters.lua')
self._client.scp_put('helper.lua', self.rundir + '/helper.lua')
def start_prox(self, autostart=''):
- if self.machine_params['prox_launch_exit']:
- cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(self.rundir, autostart, self.rundir, self.machine_params['config_file'])
- result = self._client.fork_cmd(cmd, 'PROX Testing on {}'.format(self.name))
- RapidLog.debug("Starting PROX on {}: {}, {}".format(self.name, cmd, result))
- self.socket = self._client.connect_socket()
+ if self.machine_params['prox_socket']:
+ self._client = prox_ctrl(self.ip, self.key, self.user,
+ self.password)
+ self._client.test_connection()
+ if self.vim in ['OpenStack']:
+ self.devbind()
+ if self.vim in ['kubernetes']:
+ self.read_cpuset()
+ self.remap_all_cpus()
+ _, prox_config_file_name = os.path.split(self.
+ machine_params['config_file'])
+ if self.machine_params['prox_launch_exit']:
+ self.generate_lua()
+ self._client.scp_put(self.machine_params['config_file'], '{}/{}'.
+ format(self.rundir, prox_config_file_name))
+ if not self.configonly:
+ cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(self.rundir,
+ autostart, self.rundir, prox_config_file_name)
+ RapidLog.debug("Starting PROX on {}: {}".format(self.name,
+ cmd))
+ result = self._client.run_cmd(cmd)
+ RapidLog.debug("Finished PROX on {}: {}".format(self.name,
+ cmd))
+
+ def close_prox(self):
+ if (not self.configonly) and self.machine_params[
+ 'prox_socket'] and self.machine_params['prox_launch_exit']:
+ self.socket.quit_prox()
+ self._client.scp_get('/prox.log', '{}/{}.prox.log'.format(
+ self.resultsdir, self.name))
+
+ def connect_prox(self):
+ if self.machine_params['prox_socket']:
+ self.socket = self._client.connect_socket()
def start(self):
self.socket.start(self.get_cores())
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
index df71811d..143323b8 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
@@ -24,6 +24,7 @@ except ImportError:
# Python 2.x fallback
import ConfigParser as configparser
import ast
+inf = float("inf")
class RapidConfigParser(object):
"""
@@ -33,26 +34,43 @@ class RapidConfigParser(object):
def parse_config(test_params):
testconfig = configparser.RawConfigParser()
testconfig.read(test_params['test_file'])
- test_params['required_number_of_test_machines'] = int(testconfig.get('TestParameters', 'total_number_of_test_machines'))
- test_params['number_of_tests'] = int(testconfig.get('TestParameters', 'number_of_tests'))
+ test_params['required_number_of_test_machines'] = int(testconfig.get(
+ 'TestParameters', 'total_number_of_test_machines'))
+ test_params['number_of_tests'] = int(testconfig.get('TestParameters',
+ 'number_of_tests'))
test_params['TestName'] = testconfig.get('TestParameters', 'name')
if testconfig.has_option('TestParameters', 'lat_percentile'):
- test_params['lat_percentile'] = old_div(float(testconfig.get('TestParameters', 'lat_percentile')),100.0)
+ test_params['lat_percentile'] = old_div(float(
+ testconfig.get('TestParameters', 'lat_percentile')),100.0)
else:
test_params['lat_percentile'] = 0.99
- RapidLog.info('Latency percentile at {:.0f}%'.format(test_params['lat_percentile']*100))
+ RapidLog.info('Latency percentile at {:.0f}%'.format(
+ test_params['lat_percentile']*100))
+ if testconfig.has_option('TestParameters', 'sleep_time'):
+ test_params['sleep_time'] = int(testconfig.get('TestParameters', 'sleep_time'))
+ if test_params['sleep_time'] < 2:
+ test_params['sleep_time'] = 2
+ else:
+ test_params['sleep_time'] = 2
+
+ if testconfig.has_option('TestParameters', 'ipv6'):
+ test_params['ipv6'] = testconfig.getboolean('TestParameters','ipv6')
+ else:
+ test_params['ipv6'] = False
config = configparser.RawConfigParser()
config.read(test_params['environment_file'])
test_params['vim_type'] = config.get('Varia', 'vim')
- test_params['key'] = config.get('ssh', 'key')
test_params['user'] = config.get('ssh', 'user')
- test_params['total_number_of_machines'] = int(config.get('rapid', 'total_number_of_machines'))
- #if config.has_option('TestParameters', 'pushgateway'):
- if config.has_option('Varia', 'pushgateway'):
- test_params['pushgateway'] = config.get('Varia', 'pushgateway')
- RapidLog.info('Measurements will be pushed to %s'%test_params['pushgateway'])
+ if config.has_option('ssh', 'key'):
+ test_params['key'] = config.get('ssh', 'key')
else:
- test_params['pushgateway'] = None
+ test_params['key'] = None
+ if config.has_option('ssh', 'password'):
+ test_params['password'] = config.get('ssh', 'password')
+ else:
+ test_params['password'] = None
+ test_params['total_number_of_machines'] = int(config.get('rapid',
+ 'total_number_of_machines'))
tests = []
test = {}
for test_index in range(1, test_params['number_of_tests']+1):
@@ -60,51 +78,86 @@ class RapidConfigParser(object):
section = 'test%d'%test_index
options = testconfig.options(section)
for option in options:
- if option in ['imix','imixs','flows']:
- test[option] = ast.literal_eval(testconfig.get(section, option))
-# test[option] = [int(i) for i in test[option]]
- elif option in ['maxframespersecondallingress','stepsize','flowsize']:
+ if option in ['imix','imixs','flows', 'warmupimix']:
+ test[option] = ast.literal_eval(testconfig.get(section,
+ option))
+ elif option in ['maxframespersecondallingress','stepsize',
+ 'flowsize','warmupflowsize','warmuptime', 'steps']:
test[option] = int(testconfig.get(section, option))
- elif option in ['startspeed','drop_rate_threshold','lat_avg_threshold','lat_perc_threshold','lat_max_threshold','accuracy','maxr','maxz','pass_threshold']:
+ elif option in ['startspeed', 'step', 'drop_rate_threshold',
+ 'generator_threshold','lat_avg_threshold','lat_perc_threshold',
+ 'lat_max_threshold','accuracy','maxr','maxz',
+ 'ramp_step','warmupspeed','mis_ordered_threshold']:
test[option] = float(testconfig.get(section, option))
else:
test[option] = testconfig.get(section, option)
tests.append(dict(test))
for test in tests:
- if test['test'] in ['flowsizetest','TST009test']:
+ if test['test'] in ['flowsizetest', 'TST009test', 'increment_till_fail']:
if 'drop_rate_threshold' not in test.keys():
test['drop_rate_threshold'] = 0
+ thresholds = ['generator_threshold','lat_avg_threshold', \
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']
+ for threshold in thresholds:
+ if threshold not in test.keys():
+ test[threshold] = inf
test_params['tests'] = tests
- if test_params['required_number_of_test_machines'] > test_params['total_number_of_machines']:
+ if test_params['required_number_of_test_machines'] > test_params[
+ 'total_number_of_machines']:
RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
+ map_info = test_params['machine_map_file'].strip('[]').split(',')
+ map_info_length = len(map_info)
+ # If map_info is a list where the first entry is numeric, we assume we
+ # are dealing with a list of machines and NOT the machine.map file
+ if map_info[0].isnumeric():
+ if map_info_length < test_params[
+ 'required_number_of_test_machines']:
+ RapidLog.exception('Not enough machine indices in --map \
+ parameter: {}. Needing {} entries'.format(map_info,
+ test_params['required_number_of_test_machines']))
+ machine_index = list(map(int,map_info))
+ else:
+ machine_map = configparser.RawConfigParser()
+ machine_map.read(test_params['machine_map_file'])
+ machine_index = []
+ for test_machine in range(1,
+ test_params['required_number_of_test_machines']+1):
+ machine_index.append(int(machine_map.get(
+ 'TestM%d'%test_machine, 'machine_index')))
machine_map = configparser.RawConfigParser()
machine_map.read(test_params['machine_map_file'])
machines = []
machine = {}
- for test_machine in range(1, test_params['required_number_of_test_machines']+1):
+ for test_machine in range(1, test_params[
+ 'required_number_of_test_machines']+1):
machine.clear()
- if not(testconfig.has_option('TestM%d'%test_machine, 'prox_socket') and not testconfig.getboolean('TestM%d'%test_machine, 'prox_socket')):
- section = 'TestM%d'%test_machine
- options = testconfig.options(section)
- for option in options:
- if option in ['prox_socket','prox_launch_exit','monitor']:
- machine[option] = testconfig.getboolean(section, option)
- elif option in ['cores', 'gencores','latcores']:
- machine[option] = ast.literal_eval(testconfig.get(section, option))
- else:
- machine[option] = testconfig.get(section, option)
- for key in ['prox_socket','prox_launch_exit']:
- if key not in machine.keys():
- machine[key] = True
- if 'monitor' not in machine.keys():
- machine['monitor'] = True
- index = int(machine_map.get('TestM%d'%test_machine, 'machine_index'))
- section = 'M%d'%index
- options = config.options(section)
- for option in options:
- machine[option] = config.get(section, option)
- machines.append(dict(machine))
+ section = 'TestM%d'%test_machine
+ options = testconfig.options(section)
+ for option in options:
+ if option in ['prox_socket','prox_launch_exit','monitor']:
+ machine[option] = testconfig.getboolean(section, option)
+ elif option in ['mcore', 'cores', 'gencores', 'latcores',
+ 'altcores']:
+ machine[option] = ast.literal_eval(testconfig.get(
+ section, option))
+ elif option in ['bucket_size_exp']:
+ machine[option] = int(testconfig.get(section, option))
+ if machine[option] < 11:
+ RapidLog.exception(
+ "Minimum Value for bucket_size_exp is 11")
+ else:
+ machine[option] = testconfig.get(section, option)
+ for key in ['prox_socket','prox_launch_exit']:
+ if key not in machine.keys():
+ machine[key] = True
+ if 'monitor' not in machine.keys():
+ machine['monitor'] = True
+ section = 'M%d'%machine_index[test_machine-1]
+ options = config.options(section)
+ for option in options:
+ machine[option] = config.get(section, option)
+ machines.append(dict(machine))
for machine in machines:
dp_ports = []
if 'dest_vm' in machine.keys():
@@ -112,10 +165,13 @@ class RapidConfigParser(object):
while True:
dp_ip_key = 'dp_ip{}'.format(index)
dp_mac_key = 'dp_mac{}'.format(index)
- if dp_ip_key in machines[int(machine['dest_vm'])-1].keys() and \
- dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
- dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
- 'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
+ if dp_ip_key in machines[int(machine['dest_vm'])-1].keys():
+ if dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
+ else:
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : None}
dp_ports.append(dict(dp_port))
index += 1
else:
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
index 6991e879..8157ddf2 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
@@ -27,15 +27,13 @@ class PortStatsTest(RapidTest):
"""
Class to manage the portstatstesting
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file,
+ def __init__(self, test_param, runtime, testname, environment_file,
machines):
- super().__init__(test_param, runtime, pushgateway, environment_file)
+ super().__init__(test_param, runtime, testname, environment_file)
self.machines = machines
def run(self):
- # fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
- # writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
- # writer.writeheader()
+ result_details = {'Details': 'Nothing'}
RapidLog.info("+---------------------------------------------------------------------------+")
RapidLog.info("| Measuring port statistics on 1 or more PROX instances |")
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
@@ -69,17 +67,17 @@ class PortStatsTest(RapidTest):
old_errors[i] = new_errors[i]
old_tsc[i] = new_tsc[i]
RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
- # writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
- if self.test['pushgateway']:
- URL = self.test['pushgateway'] + self.test['test'] + '/instance/' + self.test['environment_file'] + str(i)
- DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(i,duration,rx,tx,no_mbufs,errors)
- HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
- response = requests.post(url=URL, data=DATA,headers=HEADERS)
- if (response.status_code != 202) and (response.status_code != 200):
- RapidLog.info('Cannot send metrics to {}'.format(URL))
- RapidLog.info(DATA)
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NoMbufs': no_mbufs,
+ 'iErrMiss': errors}
+ result_details = self.post_data(result_details)
if machines_to_go == 0:
duration = duration - 1
machines_to_go = len (self.machines)
RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
- return (True)
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
new file mode 100644
index 00000000..6ecdb277
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
@@ -0,0 +1,49 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuGS2Cg/tcP8zZHvzk8/lkR
+85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2ugWxp2FaePHKp0FBkMgup
+GHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5ypEg19ywtZmDooiXz6fWoa
+WgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsCyMFvCIIhrm/YIs8pwkqJ
+sPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4ZDIrMtbKBQPHP90Ku+C5i
+jY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwSL3PmUGl02JdWJgYG/B37
+fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN1qbIYYGgpIuLEgQUKC39
+/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJIuDmwtqxUHwCuKNThUVh
+A3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WOv08tiRJL1lrfMis8Z9so
+fYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqPSggS+dNjDacl0v1e6ByB
+kAAAdQW2kXgltpF4IAAAAHc3NoLXJzYQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuG
+S2Cg/tcP8zZHvzk8/lkR85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2u
+gWxp2FaePHKp0FBkMgupGHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5yp
+Eg19ywtZmDooiXz6fWoaWgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsC
+yMFvCIIhrm/YIs8pwkqJsPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4Z
+DIrMtbKBQPHP90Ku+C5ijY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwS
+L3PmUGl02JdWJgYG/B37fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN
+1qbIYYGgpIuLEgQUKC39/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJ
+IuDmwtqxUHwCuKNThUVhA3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WO
+v08tiRJL1lrfMis8Z9sofYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqP
+SggS+dNjDacl0v1e6ByBkAAAADAQABAAACABLHepSv96vSnFwHxzcZnyk9SJRBLECWmfB2
+fwcwtjrmGsVbopS/eIPNsBcaOR+v0+239v4RB80AWLBrtk7yAfU+AfoTiiY0SSC/lqgxrs
+fFNUlbxbeLd5BGmreqN9LJ2UHZZxzLUfOKQ2J/Mt0kg/ehO00Ngej1n8ydw5gaPPwT+QpN
+DO2SPhmbt+u3+D7H2DUPbLhBXMcM/xNyOBl4PMbTGifCfdqx+5MTX11v+GwpZIjuMnNBY7
+baSu/pnE7OZbO14wWuUugbd8PCr7mAbtNj5Jn5JGv/SDEWCMPHYauYVU+hZTgitUX+xRnn
+unXC/uffXYivZfLwlyRp6Zsd0r2z3dY+bjhZ/SBheAmP3FaKy4ZA1ggn7VHCM/RWywJJlP
+/xdKHWQs2j/kF+s84Z5+eb6r1p3xBS7Dv3Lt9KQPN/nLciJNWYwUHiVXo3BtFw4IRosP+k
+W4Km3bfmfs0yrgrAdypUeLHbD9fyYu/BjhdcDqCj9ntlxUnDfo4WQga1J1kY/5zUDOpVCV
+LYit6y4SCvFM1H8mIHX9n3jxEfs1fdx52OhcahfGc7Qg8EbMJFt3CqXcc4ErVkUxC61sWX
+7mfFqzp0eho1QrGU5a+1l9UaVTJhN1B0ruhEfdBm1FahcQ91ZEn2m6Wf1P0+RImI7m0cH1
+FZ0WDdX+DETUWNHr0BAAABAGEBn6UfyzTYtk/HWW8Px+ae60U4BJCcQ8m/ARSMGGLds2f3
+5NJjm6KliZJ+b7sdN4UYj2hm9zxjef+kwFXUEYmYVm16NufQRR1svF7YqLzNnOQ7eXluZS
+S3SEj1siziCveQ6kyLYrfedNtX/TErdR5SFqcbuanMzd7mqw1vMpejoEGKriSpYOSohsZW
+7Rkcej3XSR4jt5pzxfzUObcKrm5mWAYddINbflAYVswpT/LxNl7jduUsQd3Ul6fOBX4sBK
+rWYMv3Qo4z25oShqvWOJbvvQ1voTOiDF8LTOu60/YbbOfF116J6BcWTHbwe8z+Du8SxdVi
+1N4tFcadL7HqsZEAAAEBAN4ma7nbSI0fA3QM1IK9h5cN/h0qMk91Syh7+vFyNfe/DILFnJ
+0TGNaYhAow1jNMOQKeyEJOfuZkeMdR9/ohtfwSvzSJml/k0JV9aIZHehncZOMt93Gi6WtC
++Os2owyhcXMJN7MbKo1e3Ln21OyaAJi6TAdwSDivFSytvNCKoX8NncQu/UIPzNQVJcrvJn
+SZ+0AHFeuZVl9HgxZY1fUvIs24m9QnYH3HpMiYc2p8UT1hEOqq1bJpgKx9WHhj0fNCBsZ1
+6zTnCDa/HiDADHmlif6pyEu7nD+3MHAeGxS7LJjmMSvtbH/ltrYaz6wFSowlr/RiX7Z8pT
+Ib1lf7KPYulYUAAAEBAMcxzoKSEZt/eYz5w4h9Bs6tdBEBnmSzwni8P0DTv1q0sDan1g4Q
++Mcuo42lSXS9aTmfI+hJDRSuRraLE9xzmxUJ+R2bQkpOLgG6QOF1uU36ZtMoxtptII8pXT
+yQtIW2sHSz9Kgv16PFp98EaEfwzmdk/C8A6NxoGW7EpzAXzXZYLRSwgAr6wVE83jUsbIu5
+lAN6DG6vIm62PLsxmpDZuS5idQwxP8DP4itHMMRh2jE0+msQAWHRQ514nCTqeuy/ORbNSO
+4A1yMy1KxXBH6hQ/oE8ZXqtBqJ3CbINPEyuLK9PYj9e2zABoEOcXTaJcvmVve97xhhw6om
+zVgd4qw70oUAAAAVeWt5bHVsaW5AMGJkODI0NDk5MTYwAQIDBAUG
+-----END OPENSSH PRIVATE KEY-----
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
new file mode 100644
index 00000000..c735d178
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCs2xZMUPvSWOMv5addzQZuU3sqWJyKK4ZLYKD+1w/zNke/OTz+WRHzkRxcam9iseRMXV1pkJtBTLeKzQq8/bs6GiWvFW/0cCfgGQDa6BbGnYVp48cqnQUGQyC6kYcVWHN6DiEDigW2Oloz3kBMxxZMH77ue6Xp1b5/n++HHPJrnKkSDX3LC1mYOiiJfPp9ahpaBKqPLQ2IsWgkSg0TkliPPZcdOAEMpntKv6gC2j39w57cbkmwLIwW8IgiGub9gizynCSomw8wQ+DoNsbo/pLVhucvqsz8GzBWOnUihak9VuZmENuGU1KT/hkMisy1soFA8c/3Qq74LmKNjpk0niAPsLCb5kunip0iomg2h7OYbwLRi87xa98wtGjJQKHBIvc+ZQaXTYl1YmBgb8Hft8lBubzR3oc68ATmu87ln11vAG/NkLRinl1fXfQ601qOk6F8FE3WpshhgaCki4sSBBQoLf3+LWEYw1M5UOMngY1uJJSFSRtzriPSTYH65iz3ju1+7Cui026Yki4ObC2rFQfAK4o1OFRWEDdTW1uUJYxLoTFVjjNrASVcFP+mt6IBhoWXMW5igePO5Zc3RY6/Ty2JEkvWWt8yKzxn2yh9hzeoFKqUuACxbl20Hk8wjLcnFdaqMVAh2COXFZnngG723l+o9KCBL502MNpyXS/V7oHIGQ== default@default
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/sshclient.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
index c781271e..d8aeacc1 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/sshclient.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python2.7
-
##
## Copyright (c) 2019 Intel Corporation
##
@@ -17,6 +15,7 @@
##
import paramiko
+from scp import SCPClient
import logging
class SSHClient:
@@ -34,9 +33,11 @@ class SSHClient:
_output = None
_error = None
- def __init__(self, ip=None, user=None, rsa_private_key=None, timeout=15, logger_name=None):
+ def __init__(self, ip=None, user=None, rsa_private_key=None, timeout=15,
+ logger_name=None, password = None):
self._ip = ip
self._user = user
+ self._password = password
self._rsa_private_key = rsa_private_key
self._timeout = timeout
@@ -45,19 +46,21 @@ class SSHClient:
self._connected = False
- def set_credentials(self, ip, user, rsa_private_key):
+ def set_credentials(self, ip, user, rsa_private_key, password = None):
self._ip = ip
self._user = user
+ self._password = password
self._rsa_private_key = rsa_private_key
def connect(self):
+
if self._connected:
if (self._log is not None):
self._log.debug("Already connected!")
return
-
if ((self._ip is None) or (self._user is None) or
- (self._rsa_private_key is None)):
+ ((self._rsa_private_key is None) ==
+ (self._password is None))):
if (self._log is not None):
self._log.error("Wrong parameter! IP %s, user %s, RSA private key %s"
% (self._ip, self._user, self._rsa_private_key))
@@ -66,10 +69,14 @@ class SSHClient:
self._ssh = paramiko.SSHClient()
self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- private_key = paramiko.RSAKey.from_private_key_file(self._rsa_private_key)
+ if (self._rsa_private_key is not None):
+ private_key = paramiko.RSAKey.from_private_key_file(self._rsa_private_key)
+ else:
+ private_key = None
try:
- self._ssh.connect(hostname = self._ip, username = self._user, pkey = private_key)
+ self._ssh.connect(hostname = self._ip, username = self._user,
+ password = self._password, pkey = private_key)
except Exception as e:
if (self._log is not None):
self._log.error("Failed to connect to the host! IP %s, user %s, RSA private key %s\n%s"
@@ -106,6 +113,50 @@ class SSHClient:
return ret
+ def scp_put(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.put(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def scp_get(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.get(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
def get_output(self):
return self._output
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
index 0b0b2049..deba695f 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
@@ -17,25 +17,34 @@
## limitations under the License.
##
+import yaml
+import requests
import time
+import os
+import copy
from past.utils import old_div
from rapid_log import RapidLog
from rapid_log import bcolors
inf = float("inf")
+from datetime import datetime as dt
+
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
class RapidTest(object):
"""
Class to manage the testing
"""
- def __init__(self, test_param, runtime, pushgateway, environment_file ):
+ def __init__(self, test_param, runtime, testname, environment_file ):
self.test = test_param
self.test['runtime'] = runtime
- self.test['pushgateway'] = pushgateway
+ self.test['testname'] = testname
self.test['environment_file'] = environment_file
if 'maxr' not in self.test.keys():
self.test['maxr'] = 1
if 'maxz' not in self.test.keys():
self.test['maxz'] = inf
+ with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
+ self.data_format = yaml.load(f, Loader=yaml.FullLoader)
@staticmethod
def get_percentageof10Gbps(pps_speed,size):
@@ -91,122 +100,206 @@ class RapidTest(object):
machine.stop()
@staticmethod
- def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
- pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
- elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
- lat_max_prefix='', abs_drop_rate_prefix='', drop_rate_prefix=''):
+ def parse_data_format_dict(data_format, variables):
+ for k, v in data_format.items():
+ if type(v) is dict:
+ RapidTest.parse_data_format_dict(v, variables)
+ else:
+ if v in variables.keys():
+ data_format[k] = variables[v]
+
+ def post_data(self, variables):
+ test_type = type(self).__name__
+ var = copy.deepcopy(self.data_format)
+ self.parse_data_format_dict(var, variables)
+ if var.keys() >= {'URL', test_type, 'Format'}:
+ URL=''
+ for value in var['URL'].values():
+ URL = URL + value
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
+ if var['Format'] == 'PushGateway':
+ data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
+ response = requests.post(url=URL, data=data,headers=HEADERS)
+ elif var['Format'] == 'Xtesting':
+ data = var[test_type]
+ response = requests.post(url=URL, json=data)
+ if (response.status_code >= 300):
+ RapidLog.info('Cannot send metrics to {}'.format(URL))
+ RapidLog.info(data)
+ return (var[test_type])
+
+ @staticmethod
+ def report_result(flow_number, size, data, prefix):
if flow_number < 0:
flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
else:
flow_number_str = '|{:>7} |'.format(flow_number)
- if pps_req_tx is None:
+ if data['pps_req_tx'] is None:
pps_req_tx_str = '{0: >14}'.format(' NA |')
else:
- pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
- if pps_tx is None:
+ pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
+ if data['pps_tx'] is None:
pps_tx_str = '{0: >14}'.format(' NA |')
else:
- pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
- if pps_sut_tx is None:
+ pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
+ if data['pps_sut_tx'] is None:
pps_sut_tx_str = '{0: >14}'.format(' NA |')
else:
- pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
- if pps_rx is None:
+ pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
+ if data['pps_rx'] is None:
pps_rx_str = '{0: >25}'.format('NA |')
else:
- pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
- if tot_drop is None:
+ pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
+ RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
+ if data['abs_dropped'] is None:
tot_drop_str = ' | NA | '
else:
- tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
- if lat_perc is None:
- lat_perc_str = ' |{:^10.10}|'.format('NA')
- elif lat_perc_max == True:
- lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
+ tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
+ if data['lat_perc'] is None:
+ lat_perc_str = '|{:^10.10}|'.format('NA')
+ elif data['lat_perc_max'] == True:
+ lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
else:
- lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,float(lat_perc), bcolors.ENDC)
- if elapsed_time is None:
+ lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
+ if data['actual_duration'] is None:
elapsed_time_str = ' NA |'
else:
- elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
- return(flow_number_str + '{:>5.1f}'.format(speed) + '% '+speed_prefix +'{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|'+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str + pps_rx_str +lat_avg_prefix+ ' {:>6.0f}'.format(lat_avg)+' us'+lat_perc_str+lat_max_prefix+'{:>6.0f}'.format(lat_max)+' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) + ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) + tot_drop_str +drop_rate_prefix+ '{:>5.2f}'.format(old_div(float(tx-rx),tx)) +bcolors.ENDC+' |' + elapsed_time_str)
-
+ elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
+ if data['mis_ordered'] is None:
+ mis_ordered_str = ' NA '
+ else:
+ mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
+ return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
+ + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
+ pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
+ ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
+ + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
+ ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
+ tot_drop_str + prefix['drop_rate'] +
+ '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
+ prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
+ ' |' + elapsed_time_str)
+
def run_iteration(self, requested_duration, flow_number, size, speed):
BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
+ sleep_time = self.test['sleep_time']
LAT_PERCENTILE = self.test['lat_percentile']
- r = 0;
- sleep_time = 2
- while (r < self.test['maxr']):
+ iteration_data= {}
+ time_loop_data= {}
+ iteration_data['r'] = 0;
+
+ while (iteration_data['r'] < self.test['maxr']):
+ self.gen_machine.start_latency_cores()
time.sleep(sleep_time)
# Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
t1_dp_rx = t1_rx - t1_non_dp_rx
t1_dp_tx = t1_tx - t1_non_dp_tx
+ self.gen_machine.set_generator_speed(0)
self.gen_machine.start_gen_cores()
+ self.set_background_speed(self.background_machines, 0)
+ self.start_background_traffic(self.background_machines)
+ if 'ramp_step' in self.test.keys():
+ ramp_speed = self.test['ramp_step']
+ else:
+ ramp_speed = speed
+ while ramp_speed < speed:
+ self.gen_machine.set_generator_speed(ramp_speed)
+ self.set_background_speed(self.background_machines, ramp_speed)
+ time.sleep(2)
+ ramp_speed = ramp_speed + self.test['ramp_step']
+ self.gen_machine.set_generator_speed(speed)
+ self.set_background_speed(self.background_machines, speed)
+ iteration_data['speed'] = speed
+ time_loop_data['speed'] = speed
time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
+ start_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
+ bg_gen_stat = {
+ "bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc
+ }
+ start_bg_gen_stats.append(dict(bg_gen_stat))
if self.sut_machine!= None:
t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
tx = t2_tx - t1_tx
- dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
- dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
- tot_dp_drop = dp_tx - dp_rx
+ iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
+ iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
if tx == 0:
RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
- if dp_tx == 0:
+ if iteration_data['abs_tx'] == 0:
RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
# Ask PROX to calibrate the bucket size once we have a PROX function to do this.
# Measure latency statistics per second
- lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
- lat_samples = sum(buckets)
+ iteration_data.update(self.gen_machine.lat_stats())
+ t2_lat_tsc = iteration_data['lat_tsc']
sample_count = 0
- for sample_percentile, bucket in enumerate(buckets,start=1):
+ for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
sample_count += bucket
- if sample_count > (lat_samples * LAT_PERCENTILE):
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
break
- percentile_max = (sample_percentile == len(buckets))
- sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
+ iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
+ iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
+ time_loop_data['bucket_size'] = iteration_data['bucket_size']
+ iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
if self.test['test'] == 'fixed_rate':
- RapidLog.info(self.report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['pps_rx'] = None
+ iteration_data['lat_perc'] = None
+ iteration_data['actual_duration'] = None
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size,
+ iteration_data, iteration_prefix ))
tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
- lat_avg = used_avg = 0
- buckets_total = [0] * 128
- tot_lat_samples = 0
+ iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
tot_lat_measurement_duration = float(0)
- tot_core_measurement_duration = float(0)
+ iteration_data['actual_duration'] = float(0)
tot_sut_core_measurement_duration = float(0)
tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
lat_avail = core_avail = sut_avail = False
- while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
+ while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
time.sleep(0.5)
- lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
+ time_loop_data.update(self.gen_machine.lat_stats())
# Get statistics after some execution time
- if t3_lat_tsc != t2_lat_tsc:
- single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
+ if time_loop_data['lat_tsc'] != t2_lat_tsc:
+ single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
# A second has passed in between to lat_stats requests. Hence we need to process the results
tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
- if lat_min > lat_min_sample:
- lat_min = lat_min_sample
- if lat_max < lat_max_sample:
- lat_max = lat_max_sample
- lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
- used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
- lat_samples = sum(buckets)
- tot_lat_samples += lat_samples
+ if iteration_data['lat_min'] > time_loop_data['lat_min']:
+ iteration_data['lat_min'] = time_loop_data['lat_min']
+ if iteration_data['lat_max'] < time_loop_data['lat_max']:
+ iteration_data['lat_max'] = time_loop_data['lat_max']
+ iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
+ iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
sample_count = 0
- for sample_percentile, bucket in enumerate(buckets,start=1):
+ for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
sample_count += bucket
- if sample_count > lat_samples * LAT_PERCENTILE:
+ if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
break
- percentile_max = (sample_percentile == len(buckets))
- sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
- buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
- t2_lat_tsc = t3_lat_tsc
+ time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
+ time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
+ iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
+ t2_lat_tsc = time_loop_data['lat_tsc']
lat_avail = True
t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
if t3_tsc != t2_tsc:
- single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
- tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
+ time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
delta_rx = t3_rx - t2_rx
tot_rx += delta_rx
delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
@@ -217,8 +310,8 @@ class RapidTest(object):
tot_non_dp_tx += delta_non_dp_tx
delta_dp_tx = delta_tx -delta_non_dp_tx
delta_dp_rx = delta_rx -delta_non_dp_rx
- delta_dp_drop = delta_dp_tx - delta_dp_rx
- tot_dp_drop += delta_dp_drop
+ time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
+ iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
delta_drop = t3_drop - t2_drop
tot_drop += delta_drop
t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
@@ -226,7 +319,7 @@ class RapidTest(object):
if self.sut_machine!=None:
t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
if t3_sut_tsc != t2_sut_tsc:
- single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
tot_sut_rx += t3_sut_rx - t2_sut_rx
tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
@@ -239,76 +332,110 @@ class RapidTest(object):
if self.test['test'] == 'fixed_rate':
if lat_avail == core_avail == True:
lat_avail = core_avail = False
- pps_req_tx = (delta_tx + delta_drop - delta_rx)/single_core_measurement_duration/1000000
- pps_tx = delta_tx/single_core_measurement_duration/1000000
+ time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
+ time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
if self.sut_machine != None and sut_avail:
- pps_sut_tx = delta_sut_tx/single_sut_core_measurement_duration/1000000
+ time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
sut_avail = False
else:
- pps_sut_tx = None
- pps_rx = delta_rx/single_core_measurement_duration/1000000
- RapidLog.info(self.report_result(flow_number, size,
- speed, pps_req_tx, pps_tx, pps_sut_tx, pps_rx,
- lat_avg_sample, sample_percentile, percentile_max,
- lat_max_sample, delta_dp_tx, delta_dp_rx,
- tot_dp_drop, single_core_measurement_duration))
+ time_loop_data['pps_sut_tx'] = None
+ time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
+ time_loop_data['abs_tx'] = delta_dp_tx
+ time_loop_data['abs_rx'] = delta_dp_rx
+ time_loop_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size, time_loop_data,
+ time_loop_prefix))
+ time_loop_data['test'] = self.test['testname']
+ time_loop_data['environment_file'] = self.test['environment_file']
+ time_loop_data['Flows'] = flow_number
+ time_loop_data['Size'] = size
+ time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
+ _ = self.post_data(time_loop_data)
+ end_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
+ bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc,
+ "bg_hz" : bg_hz
+ }
+ end_bg_gen_stats.append(dict(bg_gen_stat))
+ self.stop_background_traffic(self.background_machines)
+ i = 0
+ bg_rates =[]
+ while i < len(end_bg_gen_stats):
+ bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
+ start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
+ start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
+ i += 1
+ if len(bg_rates):
+ iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
+ RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
+ else:
+ iteration_data['avg_bg_rate'] = None
#Stop generating
self.gen_machine.stop_gen_cores()
- r += 1
- lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
- used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
+ time.sleep(3.5)
+ self.gen_machine.stop_latency_cores()
+ iteration_data['r'] += 1
+ iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
+ iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
t4_tsc = t2_tsc
while t4_tsc == t2_tsc:
t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
if self.test['test'] == 'fixed_rate':
- t4_lat_tsc = t2_lat_tsc
- while t4_lat_tsc == t2_lat_tsc:
- lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
+ iteration_data['lat_tsc'] = t2_lat_tsc
+ while iteration_data['lat_tsc'] == t2_lat_tsc:
+ iteration_data.update(self.gen_machine.lat_stats())
sample_count = 0
- lat_samples = sum(buckets)
- for percentile, bucket in enumerate(buckets,start=1):
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
sample_count += bucket
- if sample_count > lat_samples * LAT_PERCENTILE:
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
break
- percentile_max = (percentile == len(buckets))
- percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
- lat_max = lat_max_sample
- lat_avg = lat_avg_sample
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
delta_rx = t4_rx - t2_rx
delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
delta_tx = t4_tx - t2_tx
delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
delta_dp_tx = delta_tx -delta_non_dp_tx
delta_dp_rx = delta_rx -delta_non_dp_rx
- dp_tx = delta_dp_tx
- dp_rx = delta_dp_rx
- tot_dp_drop += delta_dp_tx - delta_dp_rx
- pps_req_tx = None
- pps_tx = None
- pps_sut_tx = None
- pps_rx = None
- drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
- tot_core_measurement_duration = None
+ iteration_data['abs_tx'] = delta_dp_tx
+ iteration_data['abs_rx'] = delta_dp_rx
+ iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
+ iteration_data['actual_duration'] = None
break ## Not really needed since the while loop will stop when evaluating the value of r
else:
sample_count = 0
- for percentile, bucket in enumerate(buckets_total,start=1):
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
sample_count += bucket
- if sample_count > tot_lat_samples * LAT_PERCENTILE:
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
break
- percentile_max = (percentile == len(buckets_total))
- percentile = percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
- pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
- pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
- pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
+ iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
+ iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
if self.sut_machine != None and sut_avail:
- pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
+ iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
else:
- pps_sut_tx = None
- dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
- dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
- tot_dp_drop = dp_tx - dp_rx
- drop_rate = 100.0*tot_dp_drop/dp_tx
- if ((drop_rate < self.test['drop_rate_threshold']) or (tot_dp_drop == self.test['drop_rate_threshold'] ==0) or (tot_dp_drop > self.test['maxz'])):
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
+ iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
+ iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
+ if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
break
- return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration)
+ self.gen_machine.stop_latency_cores()
+ iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
+ return (iteration_data)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
index 55f07be4..a86ce806 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
@@ -35,17 +35,18 @@ class WarmupTest(RapidTest):
# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
# Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
- imix = self.test['imix']
- FLOWSIZE = int(self.test['flowsize'])
- WARMUPSPEED = int(self.test['warmupspeed'])
- WARMUPTIME = int(self.test['warmuptime'])
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
self.gen_machine.set_generator_speed(WARMUPSPEED)
self.gen_machine.set_udp_packet_size(imix)
# gen_machine['socket'].set_value(gencores,0,56,1,1)
- _ = self.gen_machine.set_flows(FLOWSIZE)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
self.gen_machine.start()
time.sleep(WARMUPTIME)
self.gen_machine.stop()
# gen_machine['socket'].set_value(gencores,0,56,50,1)
time.sleep(WARMUPTIME)
- return (True)
+ return (True, None)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
new file mode 100644
index 00000000..2f6b9443
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python3
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# pylint: disable=missing-docstring
+
+import json
+import os
+import sys
+import time
+
+from xtesting.core import testcase
+from runrapid import RapidTestManager
+from rapid_cli import RapidCli
+from rapid_log import RapidLog
+
+class RapidXt(testcase.TestCase):
+
+ def run(self, **kwargs):
+ try:
+ test_params = RapidTestManager.get_defaults()
+ for key in kwargs:
+ test_params[key] = kwargs[key]
+ os.makedirs(self.res_dir, exist_ok=True)
+ test_params['resultsdir'] = self.res_dir
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(
+ test_params['environment_file'])
+ log_file = '{}/RUN{}.{}.log'.format(self.res_dir,
+ environment_file_name, test_file_name)
+ RapidLog.log_init(log_file, test_params['loglevel'],
+ test_params['screenloglevel'] , test_params['version'] )
+ test_manager = RapidTestManager()
+ self.start_time = time.time()
+ self.result, self.details = test_manager.run_tests(test_params)
+ self.stop_time = time.time()
+ RapidLog.log_close()
+
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
index db4e969b..7ec270a1 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
@@ -22,7 +22,10 @@ from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import object
+import os
import sys
+import concurrent.futures
+from concurrent.futures import ALL_COMPLETED
from rapid_cli import RapidCli
from rapid_log import RapidLog
from rapid_parser import RapidConfigParser
@@ -40,23 +43,42 @@ class RapidTestManager(object):
"""
RapidTestManager Class
"""
+ def __init__(self):
+ """
+ Init Function
+ """
+ self.machines = []
+
+ def __del__(self):
+ for machine in self.machines:
+ machine.close_prox()
+
@staticmethod
def get_defaults():
return (RapidDefaults.test_params)
- @staticmethod
- def run_tests(test_params):
+ def run_tests(self, test_params):
test_params = RapidConfigParser.parse_config(test_params)
- RapidLog.debug(test_params)
monitor_gen = monitor_sut = False
background_machines = []
sut_machine = gen_machine = None
- machines = []
+ configonly = test_params['configonly']
+ machine_names = []
+ machine_counter = {}
for machine_params in test_params['machines']:
+ if machine_params['name'] not in machine_names:
+ machine_names.append(machine_params['name'])
+ machine_counter[machine_params['name']] = 1
+ else:
+ machine_counter[machine_params['name']] += 1
+ machine_params['name'] = '{}_{}'.format(machine_params['name'],
+ machine_counter[machine_params['name']])
if 'gencores' in machine_params.keys():
machine = RapidGeneratorMachine(test_params['key'],
- test_params['user'], test_params['vim_type'],
- test_params['rundir'], machine_params)
+ test_params['user'], test_params['password'],
+ test_params['vim_type'], test_params['rundir'],
+ test_params['resultsdir'], machine_params, configonly,
+ test_params['ipv6'])
if machine_params['monitor']:
if monitor_gen:
RapidLog.exception("Can only monitor 1 generator")
@@ -68,55 +90,89 @@ class RapidTestManager(object):
background_machines.append(machine)
else:
machine = RapidMachine(test_params['key'], test_params['user'],
- test_params['vim_type'], test_params['rundir'],
- machine_params)
+ test_params['password'], test_params['vim_type'],
+ test_params['rundir'], test_params['resultsdir'],
+ machine_params, configonly)
if machine_params['monitor']:
if monitor_sut:
RapidLog.exception("Can only monitor 1 sut")
raise Exception("Can only monitor 1 sut")
else:
monitor_sut = True
- sut_machine = machine
- machines.append(machine)
- if test_params['configonly']:
- sys.exit()
- for machine in machines:
- machine.start_prox()
- result = True
- for test_param in test_params['tests']:
- RapidLog.info(test_param['test'])
- if test_param['test'] in ['flowsizetest', 'TST009test',
- 'fixed_rate']:
- test = FlowSizeTest(test_param, test_params['lat_percentile'],
- test_params['runtime'], test_params['pushgateway'],
- test_params['environment_file'], gen_machine,
- sut_machine, background_machines)
- elif test_param['test'] in ['corestats']:
- test = CoreStatsTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
- test_params['environment_file'], machines)
- elif test_param['test'] in ['portstats']:
- test = PortStatsTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
- test_params['environment_file'], machines)
- elif test_param['test'] in ['impairtest']:
- test = ImpairTest(test_param, test_params['lat_percentile'],
- test_params['runtime'], test_params['pushgateway'],
- test_params['environment_file'], gen_machine,
- sut_machine)
- elif test_param['test'] in ['irqtest']:
- test = IrqTest(test_param, test_params['runtime'],
- test_params['pushgateway'],
- test_params['environment_file'], machines)
- elif test_param['test'] in ['warmuptest']:
- test = WarmupTest(test_param, gen_machine)
- else:
- RapidLog.debug('Test name ({}) is not valid:'.format(
- test_param['test']))
- single_test_result = test.run()
- if not single_test_result:
- result = False
- return (result)
+ if machine_params['prox_socket']:
+ sut_machine = machine
+ self.machines.append(machine)
+ RapidLog.debug(test_params)
+ try:
+ prox_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ self.future_to_prox = {prox_executor.submit(machine.start_prox): machine for machine in self.machines}
+ if configonly:
+ concurrent.futures.wait(self.future_to_prox,return_when=ALL_COMPLETED)
+ sys.exit()
+ socket_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ future_to_connect_prox = {socket_executor.submit(machine.connect_prox): machine for machine in self.machines}
+ concurrent.futures.wait(future_to_connect_prox,return_when=ALL_COMPLETED)
+ result = 0
+ for test_param in test_params['tests']:
+ RapidLog.info(test_param['test'])
+ if test_param['test'] in ['flowsizetest', 'TST009test',
+ 'fixed_rate', 'increment_till_fail']:
+ test = FlowSizeTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines,
+ test_params['sleep_time'])
+ elif test_param['test'] in ['corestatstest']:
+ test = CoreStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['portstatstest']:
+ test = PortStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['impairtest']:
+ test = ImpairTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines)
+ elif test_param['test'] in ['irqtest']:
+ test = IrqTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['warmuptest']:
+ test = WarmupTest(test_param,
+ gen_machine)
+ else:
+ RapidLog.debug('Test name ({}) is not valid:'.format(
+ test_param['test']))
+ single_test_result, result_details = test.run()
+ result = result + single_test_result
+ for machine in self.machines:
+ machine.close_prox()
+ concurrent.futures.wait(self.future_to_prox,
+ return_when=ALL_COMPLETED)
+ except (ConnectionError, KeyboardInterrupt) as e:
+ result = result_details = None
+ socket_executor.shutdown(wait=False)
+ socket_executor._threads.clear()
+ prox_executor.shutdown(wait=False)
+ prox_executor._threads.clear()
+ concurrent.futures.thread._threads_queues.clear()
+ RapidLog.error("Test interrupted: {} {}".format(
+ type(e).__name__,e))
+ return (result, result_details)
def main():
"""Main function.
@@ -125,12 +181,19 @@ def main():
# When no cli is used, the process_cli can be replaced by code modifying
# test_params
test_params = RapidCli.process_cli(test_params)
- log_file = 'RUN{}.{}.log'.format(test_params['environment_file'],
- test_params['test_file'])
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(test_params['environment_file'])
+ if 'resultsdir' in test_params:
+ res_dir = test_params['resultsdir']
+ log_file = '{}/RUN{}.{}.log'.format(res_dir,environment_file_name,
+ test_file_name)
+ else:
+ log_file = 'RUN{}.{}.log'.format(environment_file_name, test_file_name)
RapidLog.log_init(log_file, test_params['loglevel'],
test_params['screenloglevel'] , test_params['version'] )
- test_result = RapidTestManager.run_tests(test_params)
- RapidLog.info('Test result is : {}'.format(test_result))
+ test_manager = RapidTestManager()
+ test_result, _ = test_manager.run_tests(test_params)
+ RapidLog.log_close()
if __name__ == "__main__":
main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
new file mode 100644
index 00000000..bac49bd5
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
@@ -0,0 +1,16 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+package_dir = .
+
+[options.data_files]
+. = format.yaml
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
+[options.packages.find]
+where = .
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
new file mode 100644
index 00000000..fa9d59ac
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
index 525cff1a..7038ab66 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
@@ -75,7 +75,7 @@ class StackDeployment(object):
for name in server_group_output:
self.names.append(name)
- def print_paramDict(self, user, push_gateway):
+ def print_paramDict(self, user, dataplane_subnet_mask):
if not(len(self.dp_ips) == len(self.dp_macs) == len(self.mngmt_ips)):
sys.exit()
_ENV_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -90,9 +90,11 @@ class StackDeployment(object):
env_file.write('admin_ip = {}\n'.format(str(self.mngmt_ips[count])))
if type(self.dp_ips[count]) == list:
for i, dp_ip in enumerate(self.dp_ips[count], start = 1):
- env_file.write('dp_ip{} = {}\n'.format(i, str(dp_ip)))
+ env_file.write('dp_ip{} = {}/{}\n'.format(i, str(dp_ip),
+ dataplane_subnet_mask))
else:
- env_file.write('dp_ip1 = {}\n'.format(str(self.dp_ips[count])))
+ env_file.write('dp_ip1 = {}/{}\n'.format(str(self.dp_ips[count]),
+ dataplane_subnet_mask))
if type(self.dp_macs[count]) == list:
for i, dp_mac in enumerate(self.dp_macs[count], start = 1):
env_file.write('dp_mac{} = {}\n'.format(i, str(dp_mac)))
@@ -100,23 +102,20 @@ class StackDeployment(object):
env_file.write('dp_mac1 = {}\n'.format(str(self.dp_macs[count])))
env_file.write('\n')
env_file.write('[ssh]\n')
- env_file.write('key = {}\n'.format(self.private_key_filename))
+ env_file.write('key = {}\n'.format(self.key_name))
env_file.write('user = {}\n'.format(user))
env_file.write('\n')
env_file.write('[Varia]\n')
env_file.write('vim = OpenStack\n')
env_file.write('stack = {}\n'.format(self.stack.stack_name))
- if push_gateway:
- env_file.write('pushgateway = {}\n'.format(push_gateway))
- def create_stack(self, stack_name, stack_file_path, param_file):
+ def create_stack(self, stack_name, stack_file_path, heat_parameters):
files, template = template_utils.process_template_path(stack_file_path)
- heat_parameters = open(param_file)
- temp_params = yaml.load(heat_parameters,Loader=yaml.BaseLoader)
- heat_parameters.close()
- stack_created = self.heatclient.stacks.create(stack_name=stack_name, template=template,
- parameters=temp_params["parameters"], files=files)
- stack = self.heatclient.stacks.get(stack_created['stack']['id'], resolve_outputs=True)
+ stack_created = self.heatclient.stacks.create(stack_name = stack_name,
+ template = template, parameters = heat_parameters,
+ files = files)
+ stack = self.heatclient.stacks.get(stack_created['stack']['id'],
+ resolve_outputs=True)
# Poll at 5 second intervals, until the status is no longer 'BUILD'
while stack.stack_status == 'CREATE_IN_PROGRESS':
print('waiting..')
@@ -128,11 +127,22 @@ class StackDeployment(object):
RapidLog.exception('Error in stack deployment')
def create_key(self):
- keypair = self.nova_client.keypairs.create(name=self.key_name)
+ if os.path.exists(self.key_name):
+ public_key_file = "{}.pub".format(self.key_name)
+ if not os.path.exists(public_key_file):
+ RapidLog.critical('Keypair {}.pub does not exist'.format(
+ self.key_name))
+ with open(public_key_file, mode='rb') as public_file:
+ public_key = public_file.read()
+ else:
+ public_key = None
+ keypair = self.nova_client.keypairs.create(name = self.key_name,
+ public_key = public_key)
# Create a file for writing that can only be read and written by owner
- fp = os.open(self.private_key_filename, os.O_WRONLY | os.O_CREAT, 0o600)
- with os.fdopen(fp, 'w') as f:
- f.write(keypair.private_key)
+ if not os.path.exists(self.key_name):
+ fp = os.open(self.key_name, os.O_WRONLY | os.O_CREAT, 0o600)
+ with os.fdopen(fp, 'w') as f:
+ f.write(keypair.private_key)
RapidLog.info('Keypair {} created'.format(self.key_name))
def IsDeployed(self, stack_name):
@@ -150,14 +160,18 @@ class StackDeployment(object):
return True
return False
- def deploy(self, stack_name, keypair_name, heat_template, heat_param):
- self.key_name = keypair_name
- self.private_key_filename = '{}.pem'.format(keypair_name)
+ def deploy(self, stack_name, heat_template, heat_param):
+ heat_parameters_file = open(heat_param)
+ heat_parameters = yaml.load(heat_parameters_file,
+ Loader=yaml.BaseLoader)['parameters']
+ heat_parameters_file.close()
+ self.key_name = heat_parameters['PROX_key']
if not self.IsDeployed(stack_name):
if not self.IsKey():
self.create_key()
- self.stack = self.create_stack(stack_name, heat_template, heat_param)
+ self.stack = self.create_stack(stack_name, heat_template,
+ heat_parameters)
- def generate_env_file(self, user = 'centos', push_gateway = None):
+ def generate_env_file(self, user = 'centos', dataplane_subnet_mask = '24'):
self.generate_paramDict()
- self.print_paramDict(user, push_gateway)
+ self.print_paramDict(user, dataplane_subnet_mask)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
index 742983ec..78772dd2 100755
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
@@ -17,12 +17,27 @@
function save_k8s_envs()
{
- printenv | grep "PCIDEVICE_INTEL_COM" > /opt/k8s_sriov_device_plugin_envs
+ printenv | grep "PCIDEVICE" > /opt/rapid/k8s_sriov_device_plugin_envs
+ printenv | grep "QAT[0-9]" > /opt/rapid/k8s_qat_device_plugin_envs
+}
+
+function create_tun()
+{
+ mkdir -p /dev/net
+ mknod /dev/net/tun c 10 200
+ chmod 600 /dev/net/tun
}
save_k8s_envs
+create_tun
+
+# Ready for testing
+touch /opt/rapid/system_ready_for_rapid
# Start SSH server in background
-/usr/sbin/sshd
+echo "mkdir -p /var/run/sshd" >> /etc/rc.local
+service ssh start
+
+echo "rapid ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
-exec sleep infinity
+sleep infinity
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
new file mode 100644
index 00000000..9e26fdb1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
@@ -0,0 +1,194 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# This README is describing the format of all the rapid test files that you can
+# find in this directory.
+# These files can be specified as a parameter for the runrapid.py script, using
+# the --test [testfile] option. The default file name is specified in
+# rapid_defaults.py and is basicrapid.test.
+#
+# There are 3 types of sections in this config file:
+# - the [TestParameters] section, which defines how many [TestMx] sections and
+# how many [testy] sections need to be present in this file.
+# - at least one TestMachine section [TestMx], where x is the index of the Test
+# machines starting at index 1
+# - at least one tests definition section [testy], where y is the index of the
+# test to be run. Index starts at 1.
+
+[TestParameters]
+# The name of this test. Can be chosen freely to describe this test
+name = BasicSwapTesting
+
+# Defines how may different tests will be executed when running this test file.
+# This is usually set to 1. You need to define as many [testy] sections as
+# defined in this parameter.
+number_of_tests = 1
+
+# The next parameter defines how many PROX instance are needed to run this test.
+# You need to define as many [TestMx] sections as defined in this parameter.
+total_number_of_test_machines = 2
+
+# Some rapid tests are reporting the latency percentile statistics. This
+# parameter defines which latency percentile will be used for this test.
+lat_percentile = 99
+
+# When doing ipv6 testing, this parameter needs to be set to True, default is
+# False. This is used by the generator code to calculate the proper packet
+# header length offsets.
+ipv6 = True
+
+# The following section describes the role of the first Test Machine. Note that
+# the connection details for each PROX instance are defined in the environment
+# file (default: rapid.env). There is a --map parameter for runrapid.py that
+# specifies how the Test machines are mapped onto the available PROX instances.
+[TestM1]
+# Name can be freely chosen
+name = Generator
+# the PROX configuration files that will be used to start PROX on this Test
+# machine. This configuration file will define the role that PROX will play in
+# this Test machine.
+config_file = configs/gen.cfg
+# The values of the remaining parameters in this section are passed on to the
+# PROX configuration file through a file called parameters.lua
+#
+# The next parameter defines the destination Test machine index. This will be
+# used by a generator to define which destination MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+dest_vm = 2
+# The next parameter defines the GW Test machine index. This will be
+# used by a generator to define which GW MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+#gw_vm = 2
+# mcore defines whichmaster core PROX will use. It is not advised to change
+# this. The PROX instances are optimized to use core 0 for the master and all
+# other cores for DPDK usage.
+mcore = [0]
+# gencores defines which cores will be used to generate packets. If the
+# generator is not able to generate enough packets, you migth want to assign
+# more cores to the generator. Make sure not to use more cores in these
+# variables than you have available in your PROX instance.
+gencores = [1]
+# latcores defines that cores that will do the task of measuring latency,
+# reordering and other statistics.
+latcores = [3]
+# Non generator Test machines only require the cores parameter to find out on
+# which cores they need to place the PROX tasks.
+# cores = [1-3]
+# cores = [1,2,3]
+# The bucket_size_exp parameter is only needed for generator machines when
+# collecting percentile latency statistics. PROX is assigning every packet to
+# one of the 128 latency buckets. The size of the latency buckets depends on
+# the processor frequency and this parameter using some complicated formula.
+# iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) /
+# (old_div(float(iteration_data['lat_hz']),float(10**6)))
+# Teh result is expressing the width of each bucket in micro-seconds.
+# The minimum value (which is also the default value) for this parameter is 11.
+# For a processor with a frequency of 2Ghz, and a parameter of 11, this results
+# in a bucket size of 1.024 us. Since we have 128 buckets, the maximum latency
+# that can be stored in the buckets is in theory 128 * 1.024 = 131.072 us. We
+# will however place every measurement with a latency higher than 131.072 us in
+# the last bucket. When you are dealing with higher latency, you will have to
+# increase this parameter. Each time you increase this parameter by 1, you will
+# double the bucket size.
+#bucket_size_exp = 12
+# We can only monitor one generator and one reflector (swap) Test machine.
+# Monitoring means that we will sue the statistics coming from these Test
+# machines to report statistics and make decisions on the success of a test.
+# Test machines not playing a role in this process, need to have the monitor
+# parameter set to false. You can only have 1 generator machines and 1 SUT Test
+# machine. The parameter can be set to false for background traffic Test
+# machines, GW Test machines, etc... Default is true
+#monitor = false
+# The prox_socket parameter instruct the rapid scripts to connect to the PROX
+# instance and collect statistics. Default is true. If set to none, we will not
+# collect any statistics from this machine
+#prox_socket = false
+# The prox_launch_exit parameter instructs the script to actually start PROX at
+# the beginning of a test, and to stop it at the end. The default is true. You
+# can set this parameter to false in case you want to start PROX manually and
+# inspect the PROX UI, while the rapid scripts are dringing the testing.
+#prox_launch_exit = false
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = false
+#prox_launch_exit = false
+
+# The following section describes the first test that will run. You need at
+# least 1 test section. In most cases, you will only have one.
+[test1]
+# The test that we will run. A limited set of tests are available: you need to
+# select from the available tests as you can see in the runrapid.py code.
+# At the moment of the writing of this text, we have the following tests
+# available: flowsizetest, TST009test, fixed_rate, increment_till_fail,
+# corestatstest, portstatstest, impairtest, irqtest, warmuptest
+test=flowsizetest
+# The next warmup parameters, are used to warm up the system before the actual
+# test is started. This is to make sure ARP is being resolved in PROX and in the
+# underlying infrastructure so that this does not influence the results.
+# warmupflowsize instruct how many parallel flows need to be generated during
+# warmup
+warmupflowsize=512
+# Give the imix packet size that will be used during warmup. It is a list of
+# packet sizes
+warmupimix=[64, 300, 250, 250, 80]
+# The speed at whcih we will generate packets during the warmup phase. The speed
+# is expressed as a percentage of 10Gb/s. You could say this is expressed in
+# units of 100Mb/s.
+warmupspeed=1
+# warmuptime is the time this warmup phase will run. It is expressed in seconds.
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+# Each element in this list will result in an seperate test.
+flows=[64,500000]
+# The drop_rate_threshold defines the maximum amount of packets that can be
+# dropped without decalring the test as failed. This number is expressed as a
+# percentage of the total amount of packets being sent by the generator. If this
+# number is set to 0, the test will only be declared succesful, if zero packets
+# were dropped during this test
+drop_rate_threshold = 0.1
+# Setting one of the following thresholds to infinity (inf), results in the
+# criterion not being evaluated to rate the test as succesful. The latency
+# tresholds are expressed in micro-seconds.
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+# When we run binary searches, we are always trying at a new speed, halfway
+# between the last failed speed and the last succesful speed (initially, we
+# consider 0 as that last succesful speed). When stop doing this binary search
+# when the difference between the last speed and the news speed is less than
+# what is defined by accuracy, expressed in percentages.
+accuracy = 1
+# Speed at which we will start the binary search, expressed in percentage of
+# 10Gb/s.
+startspeed = 50
+# When using ramp_step, we will at the beginning of each measurement, increase
+# the traffic slowly, till we reach the requested speed. Can be used with
+# certain soft switches that are reconfiguring the resource usage, based on the
+# actual traffic. In order not the influence the measurement, we then slowly go
+# to the requested traffic rate.
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/TST009_Throughput.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
index d931faa8..8b765e7d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/TST009_Throughput.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,40 +13,36 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
-name = TST009Testing
-number_of_tests = 2
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
total_number_of_test_machines = 2
lat_percentile = 99
[TestM1]
name = Generator
-config_file = gen.cfg
+config_file = configs/gen.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
#bucket_size_exp = 12
[TestM2]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=TST009test
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
imixs=[[64],[128]]
# the number of flows in the list need to be powers of 2, max 2^20
# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
new file mode 100644
index 00000000..27794a12
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[64]
+drop_rate_threshold = 0
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
new file mode 100644
index 00000000..69e4ebc7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64,256,64,1024,64,128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[16384]
+drop_rate_threshold = 0
+lat_avg_threshold = 120
+lat_perc_threshold = 220
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
new file mode 100644
index 00000000..ff902de6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
@@ -0,0 +1,61 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+ipv6 = True
+
+[TestM1]
+name = Generator
+config_file = configs/genv6.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swapv6.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[8,1024]
+drop_rate_threshold = 0
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
index e827e974..803c65e7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/bare.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
@@ -16,30 +16,29 @@
[TestParameters]
name = BareTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 2
[TestM1]
name = Generator
-config_file = l2gen_bare.cfg
+config_file = configs/l2gen_bare.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = Swap
-config_file = l2swap.cfg
+config_file = configs/l2swap.cfg
+mcore = [0]
cores = [1]
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
warmupspeed=10
warmuptime=2
-
-[test2]
-test=flowsizetest
imixs=[[64],[128]]
# the number of flows in the list need to be powers of 2, max 2^20
# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
index 80710f36..9874de47 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/basicrapid.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,42 +13,38 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = BasicSwapTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 2
lat_percentile = 99
[TestM1]
name = Generator
-config_file = gen.cfg
+config_file = configs/gen.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
#bucket_size_exp = 12
[TestM2]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
#prox_socket = true
#prox_launch_exit = true
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=flowsizetest
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
# Each element in the imix list will result in a separate test. Each element
# is on its turn a list of packet sizes which will be used during one test
# execution. If you only want to test 1 size, define a list with only one
@@ -66,3 +62,4 @@ lat_perc_threshold = 80
lat_max_threshold = inf
accuracy = 1
startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
new file mode 100644
index 00000000..a876a049
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
@@ -0,0 +1,73 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapWithGatewayTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+gw_vm = 2
+dest_vm = 3
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Gateway
+monitor = false
+prox_socket = false
+prox_launch_exit = false
+
+[TestM3]
+name = Swap
+config_file = configs/swap_gw.cfg
+gw_vm = 2
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
new file mode 100644
index 00000000..927ecf35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
@@ -0,0 +1,63 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = CGNATTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+dest_vm = 3
+gw_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = CGNAT
+config_file = configs/cgnat.cfg
+dest_vm = 3
+mcore = [0]
+cores = [1]
+monitor = false
+prox_socket = true
+prox_launch_exit = true
+
+[TestM3]
+name = PublicSide
+config_file = configs/public_server.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512]
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
index f29e8587..660f79b0 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/corestats.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2019 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,6 +13,8 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = CoreStatistics
@@ -21,8 +23,9 @@ total_number_of_test_machines = 1
[TestM1]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
[test1]
-test=corestats
+test=corestatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
new file mode 100644
index 00000000..bc5e96b8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
@@ -0,0 +1,70 @@
+##
+## Copyright (c) 2023 luc.provoost@gmail.com
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = EncryptionDecryption
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+bucket_size_exp = 16
+#prox_launch_exit = false
+
+[TestM2]
+name = Encrypt
+config_file = configs/esp.cfg
+dest_vm = 1
+mcore = [0]
+cores = [1]
+altcores=[2]
+#prox_socket = true
+#prox_launch_exit = false
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+#imixs=[[64],[64,250,800,800]]
+imixs=[[1500],[512],[256],[128]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.5
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+accuracy = 5
+startspeed = 250
+#ramp_step = 1
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
index a1d5c7be..898062c9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/impair.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,6 +13,8 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = impairTesting
@@ -21,36 +23,40 @@ total_number_of_test_machines = 3
[TestM1]
name = Generator
-config_file = gen_gw.cfg
+config_file = configs/gen_gw.cfg
gw_vm = 2
dest_vm = 3
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = ImpairGW
-config_file = impair.cfg
+config_file = configs/impair.cfg
+mcore = [0]
cores = [1]
monitor = False
[TestM3]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
[test1]
test=warmuptest
-flowsize=1024
-imix=[64]
+warmupflowsize=1024
+warmupimix=[64]
warmupspeed=10
warmuptime=2
[test2]
test=impairtest
+steps=5
imix=[64]
flowsize=64
drop_rate_threshold = 0.1
lat_avg_threshold = 500
lat_max_threshold = 1000
accuracy = 0.1
-startspeed = 10
+startspeed = 5
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
new file mode 100644
index 00000000..cb673de2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
@@ -0,0 +1,64 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = IncrementTillFailTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=increment_till_fail
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+step = 0.5
+startspeed = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/ipv6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
index 966c073a..f0330589 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/ipv6.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2020 Intel Corporation
+## Copyright (c) 2020-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,44 +13,45 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = BasicSwapTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 2
lat_percentile = 99
+ipv6 = True
[TestM1]
name = Generator
-config_file = genv6.cfg
+config_file = configs/genv6.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
#bucket_size_exp = 12
[TestM2]
name = Swap
-config_file = swapv6.cfg
+config_file = configs/swapv6.cfg
+mcore = [0]
cores = [1]
#prox_socket = true
#prox_launch_exit = true
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[84]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=flowsizetest
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
-imixs=[[64], [128]]
-# the number of flows in the list need to be powers of 2, max 2^30
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[250]]
+# Number of flows in the list need to be powers of 2, max 2^30
# If not a power of 2, we will use the lowest power of 2 that is larger than
# the requested number of flows. e.g. 9 will result in 16 flows
flows=[64,500000]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
index e77ae032..77c9cbec 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/irq.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
@@ -21,13 +21,15 @@ total_number_of_test_machines = 2
[TestM1]
name = InterruptTestMachine1
-config_file = irq.cfg
+config_file = configs/irq.cfg
+mcore = [0]
cores = [1,2,3]
monitor = False
[TestM2]
name = InterruptTestMachine2
-config_file = irq.cfg
+config_file = configs/irq.cfg
+mcore = [0]
cores = [1,2,3]
monitor = False
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
index e09b80f3..542fe634 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2framerate.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,6 +13,8 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = L2BasicSwapTesting
@@ -21,23 +23,20 @@ total_number_of_test_machines = 2
[TestM1]
name = Generator
-config_file = l2gen.cfg
+config_file = configs/l2gen.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = Swap
-config_file = l2swap.cfg
+config_file = configs/l2swap.cfg
+mcore = [0]
cores = [1]
[test1]
test=fixed_rate
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
startspeed = 10
imixs=[[256]]
flows=[64]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
index 2f61df56..d3a2ba7c 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l2zeroloss.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2019 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,38 +13,34 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = L2BasicSwapTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 2
[TestM1]
name = Generator
-config_file = l2gen.cfg
+config_file = configs/l2gen.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = Swap
-config_file = l2swap.cfg
+config_file = configs/l2swap.cfg
+mcore = [0]
cores = [1]
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=flowsizetest
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
# Each element in the imix list will result in a separate test. Each element
# is on its turn a list of packet sizes which will be used during one test
# execution. If you only want to test 1 size, define a list with only one
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
index 1d890d13..f0db6b28 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/l3framerate.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,38 +13,34 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = L3FrameRateTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 2
[TestM1]
name = Generator
-config_file = gen.cfg
+config_file = configs/gen.cfg
dest_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
[test1]
-test=warmuptest
-flowsize=64
-imix=[64]
+test=fixed_rate
+warmupflowsize=64
+warmupimix=[64]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=fixed_rate
-# Following parameter defines the success criterium for the test.
-# When this test uses multiple combinations of packet size and flows,
-# all combinations must be meeting the same threshold
-# The threshold is expressed in Mpps
-pass_threshold=0.1
imixs=[[64],[128]]
# the number of flows in the list need to be powers of 2, max 2^20
# If not a power of 2, we will use the lowest power of 2 that is larger than
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
index 7bf99676..20d66209 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/portstats.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2019 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,6 +13,8 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = PortStats
@@ -21,9 +23,10 @@ total_number_of_test_machines = 1
[TestM1]
name = Swap
-config_file = swap.cfg
+config_file = configs/swap.cfg
+mcore = [0]
cores = [1]
ports = [0]
[test1]
-test=portstats
+test=portstatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
index b34f6642..e4bddad0 100644
--- a/VNFs/DPPD-PROX/helper-scripts/rapid/secgw.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2020 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,40 +13,42 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
[TestParameters]
name = GWTesting
-number_of_tests = 2
+number_of_tests = 1
total_number_of_test_machines = 3
[TestM1]
name = Generator
-config_file = gen_gw.cfg
+config_file = configs/gen_gw.cfg
dest_vm = 3
gw_vm = 2
+mcore = [0]
gencores = [1]
latcores = [3]
[TestM2]
name = GW1
-config_file = secgw1.cfg
+config_file = configs/secgw1.cfg
dest_vm = 3
+mcore = [0]
cores = [1]
[TestM3]
name = GW2
-config_file = secgw2.cfg
+config_file = configs/secgw2.cfg
+mcore = [0]
cores = [1]
[test1]
-test=warmuptest
-flowsize=512
-imix=[64]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
warmupspeed=1
warmuptime=2
-
-[test2]
-test=flowsizetest
imixs=[[64]]
# the number of flows in the list need to be powers of 2, max 2^20
# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
new file mode 100644
index 00000000..8a092def
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
@@ -0,0 +1,28 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+FROM opnfv/xtesting
+
+RUN apk upgrade --update
+
+ENV RAPID_TEST =rapid_tst009_throughput
+
+RUN git clone https://git.opnfv.org/samplevnf /samplevnf
+WORKDIR /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid
+RUN chmod 400 /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+RUN apk add python3-dev openssh-client && cd /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/ && git init && pip3 install .
+CMD ["run_tests", "-t", "all"]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
new file mode 100644
index 00000000..92fc7b4c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: rapidxt
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: rapidxt
+ tests:
+ - rapid_tst009
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
new file mode 100644
index 00000000..3cdda7d7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
@@ -0,0 +1,54 @@
+---
+tiers:
+ -
+ name: IRQ_rapid_benchmarking
+ order: 1
+ description: 'IRQ Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_irq
+ project_name: rapidxt
+ criteria: 499500
+ # Criterium for irq is defined as 500000 - the maximal allowed interrupt time per PMD loop (in us)
+ blocking: true
+ clean_flag: false
+ description: 'IRQ test'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/irq.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ name: TST009_rapid_benchmarking
+ order: 2
+ description: 'TST009 Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_tst009_64b_64f
+ project_name: rapidxt
+ criteria: 0.5
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, 64 byte packets, 64 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_64B_64F.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ case_name: rapid_tst009_acaeab_16384f
+ project_name: rapidxt
+ criteria: 0.2
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, imix acaeab, 16384 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_acaeab_16384F.test
+ runtime: 5
+ environment_file: config/rapid.env
diff --git a/VNFs/DPPD-PROX/input_curses.c b/VNFs/DPPD-PROX/input_curses.c
index 4ea2e4a8..346b0e31 100644
--- a/VNFs/DPPD-PROX/input_curses.c
+++ b/VNFs/DPPD-PROX/input_curses.c
@@ -27,6 +27,7 @@
#include "cmd_parser.h"
#include "input_curses.h"
#include "histedit.h"
+
#include "libedit_autoconf.h"
static EditLine *el;
diff --git a/VNFs/DPPD-PROX/lconf.h b/VNFs/DPPD-PROX/lconf.h
index 8ac1112e..897e6b37 100644
--- a/VNFs/DPPD-PROX/lconf.h
+++ b/VNFs/DPPD-PROX/lconf.h
@@ -17,6 +17,11 @@
#ifndef _LCONF_H_
#define _LCONF_H_
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include "task_init.h"
#include "stats.h"
@@ -100,8 +105,8 @@ static inline void lconf_flush_all_queues(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
task = lconf->tasks_all[task_id];
- if (!(task->flags & FLAG_TX_FLUSH) || (task->flags & FLAG_NEVER_FLUSH)) {
- task->flags |= FLAG_TX_FLUSH;
+ if (!(task->flags & TBASE_FLAG_TX_FLUSH) || (task->flags & TBASE_FLAG_NEVER_FLUSH)) {
+ task->flags |= TBASE_FLAG_TX_FLUSH;
continue;
}
lconf->flush_queues[task_id](task);
diff --git a/VNFs/DPPD-PROX/main.c b/VNFs/DPPD-PROX/main.c
index f6fa3e80..61abe6e6 100644
--- a/VNFs/DPPD-PROX/main.c
+++ b/VNFs/DPPD-PROX/main.c
@@ -54,6 +54,7 @@
#endif
uint8_t lb_nb_txrings = 0xff;
+extern const char *git_version;
struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
static void __attribute__((noreturn)) prox_usage(const char *prgname)
@@ -115,7 +116,7 @@ static void check_mixed_normal_pipeline(void)
}
}
-static void check_zero_rx(void)
+static void check_no_rx(void)
{
struct lcore_cfg *lconf = NULL;
struct task_args *targ;
@@ -227,7 +228,7 @@ static void check_cfg_consistent(void)
{
check_nb_mbuf();
check_missing_rx();
- check_zero_rx();
+ check_no_rx();
check_mixed_normal_pipeline();
}
@@ -306,7 +307,7 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
}
#else
if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
- prox_port_cfg[if_port].requested_tx_offload &= ~(DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM);
+ prox_port_cfg[if_port].requested_tx_offload &= ~(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
#endif
}
@@ -315,6 +316,49 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
{
struct prox_port_cfg *port;
+ uint8_t port_used_counter[PROX_MAX_PORTS] = {0};
+ bool multiple_port_reference = false;
+ uint8_t total_number_of_queues = 0;
+ // Check how many times a port is referenced for this task
+ for (uint8_t i = 0; i < targ->nb_rxports; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+ port_used_counter[if_port]++;
+ if (port_used_counter[if_port] > 1) {
+ multiple_port_reference = true;
+ port = &prox_port_cfg[if_port];
+ PROX_PANIC((port->all_rx_queues), "Multiple queues defined in rx port, but all_rx_queues also set for port %s\n", port->names[0]);
+ }
+ }
+ // If only referenced once, it is possible that we want to use all queues
+ // Therefore we will check all_rx_queues for that port
+ if (!multiple_port_reference) {
+ for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+ if (port_used_counter[if_port]) {
+ port = &prox_port_cfg[if_port];
+ if (port->all_rx_queues) {
+ port_used_counter[if_port] = port->max_rxq;
+ total_number_of_queues += port->max_rxq;
+ plog_info("\tall_rx_queues for Port %s: %u rx_queues will be applied\n", port->names[0], port_used_counter[if_port]);
+ }
+ }
+ }
+ }
+ if (total_number_of_queues) {
+ PROX_PANIC((total_number_of_queues > PROX_MAX_PORTS), "%u queues using the all_rx_queues. PROX_MAX_PORTS is set to %u\n", total_number_of_queues, PROX_MAX_PORTS);
+ uint8_t index = 0;
+ for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+ if (port_used_counter[i]) {
+ for (uint8_t j = 0; j < port_used_counter[i]; j++) {
+ targ->rx_port_queue[index].port = i;
+ index ++;
+ }
+ port = &prox_port_cfg[i];
+ plog_info("\t\tConfiguring task to use port %s with %u rx_queues\n", port->names[0], port_used_counter[i]);
+ }
+ }
+ targ->nb_rxports = index;
+ }
for (int i = 0; i < targ->nb_rxports; i++) {
uint8_t if_port = targ->rx_port_queue[i].port;
@@ -381,12 +425,12 @@ static void configure_tx_queue_flags(void)
prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
}
#else
- /* Set the DEV_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+ /* Set the RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
the tasks up to the task transmitting to the port
use refcnt and per-queue all mbufs comes from the same mempool. */
if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
- prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
}
#endif
}
@@ -411,7 +455,7 @@ static void configure_multi_segments(void)
#else
// We enable "multi segment" if at least one task requires it in the chain of tasks.
if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
- prox_port_cfg[if_port].requested_tx_offload |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
#endif
}
@@ -812,7 +856,7 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
sprintf(name, "core_%u_task_%u_pool", lconf->id, targ->id);
}
- snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
+ snprintf(memzone_name, sizeof(memzone_name), "MP_%.*s", (int)(sizeof(memzone_name)-4), targ->pool_name);
mz = rte_memzone_lookup(memzone_name);
if (mz != NULL) {
@@ -947,10 +991,10 @@ static void setup_all_task_structs(void)
while(prox_core_next(&lcore_id, 1) == 0) {
lconf = &lcore_cfg[lcore_id];
- plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
+ plog_info("\t*** Initializing core %d (%d task) ***\n", lcore_id, lconf->n_tasks_all);
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
if (!task_is_master(&lconf->targs[task_id])) {
- plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
+ plog_info("\t\tInitializing struct for core %d task %d\n", lcore_id, task_id);
lconf->targs[task_id].tmaster = tmaster;
lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
}
@@ -1148,7 +1192,7 @@ static void set_term_env(void)
plog_info("\tncurses version = %d.%d (%s)\n", max_ver, min_ver, ncurses_version);
}
- if (((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
+ if ((old_value) && ((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
// On recent OSes such as RHEL 8.0, ncurses(6.1) introduced support
// for ECMA-48 repeat character control.
// Some terminal emulators use TERM=xterm but do not support this feature.
@@ -1175,6 +1219,7 @@ int main(int argc, char **argv)
plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
plog_info("=== " PROGRAM_NAME " %s ===\n", VERSION_STR());
plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
+ plog_info("\tgit version %s\n", git_version);
set_term_env();
read_rdt_info();
diff --git a/VNFs/DPPD-PROX/meson.build b/VNFs/DPPD-PROX/meson.build
new file mode 100644
index 00000000..48251e8d
--- /dev/null
+++ b/VNFs/DPPD-PROX/meson.build
@@ -0,0 +1,206 @@
+##
+## Copyright (c) 2021 Heinrich Kuhn <heinrich.kuhn@corigine.com>
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+project('dppd-prox', 'C',
+ version:
+ run_command(['git', 'describe',
+ '--abbrev=8', '--dirty', '--always']).stdout().strip(),
+ license: 'Apache',
+ default_options: ['buildtype=release', 'c_std=gnu99'],
+ meson_version: '>= 0.47'
+)
+
+cc = meson.get_compiler('c')
+
+# Configure options for prox
+# Grab the DPDK version here "manually" as it is not available in the dpdk_dep
+# object
+dpdk_version = run_command('pkg-config', '--modversion', 'libdpdk').stdout()
+
+if get_option('bng_qinq').enabled()
+ add_project_arguments('-DUSE_QINQ', language: 'c')
+endif
+
+if get_option('mpls_routing').enabled()
+ add_project_arguments('-DMPLS_ROUTING', language: 'c')
+endif
+
+if get_option('prox_stats').enabled()
+ add_project_arguments('-DPROX_STATS', language: 'c')
+endif
+
+if get_option('hw_direct_stats').enabled()
+ add_project_arguments('-DPROX_HW_DIRECT_STATS', language: 'c')
+endif
+
+if get_option('dbg')
+ add_project_arguments('-ggdb', language: 'c')
+endif
+
+if get_option('log')
+ add_project_arguments('-DPROX_MAX_LOG_LVL=2', language: 'c')
+endif
+
+if get_option('gen_decap_ipv6_to_ipv4_cksum').enabled()
+ add_project_arguments('-DGEN_DECAP_IPV6_TO_IPV4_CKSUM', language: 'c')
+endif
+
+if get_option('crc') == 'soft'
+ add_project_arguments('-DSOFT_CRC', language: 'c')
+endif
+
+cflags = [
+ '-DPROGRAM_NAME="prox"',
+ '-fno-stack-protector',
+ '-DPROX_PREFETCH_OFFSET=2',
+ '-DLATENCY_PER_PACKET',
+ '-DLATENCY_HISTOGRAM',
+ '-DGRE_TP',
+ '-D_GNU_SOURCE'] # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+# Add configured cflags to arguments
+foreach arg: cflags
+ add_project_arguments(arg, language: 'c')
+endforeach
+
+# enable warning flags if they are supported by the compiler
+warning_flags = [
+ '-Wno-unused',
+ '-Wno-unused-parameter',
+ '-Wno-unused-result',
+ '-Wno-deprecated-declarations']
+
+foreach arg: warning_flags
+ if cc.has_argument(arg)
+ add_project_arguments(arg, language: 'c')
+ endif
+endforeach
+
+# Attempt to find a suitable lua and add to deps
+lua_versions = ['lua', 'lua5.2', 'lua5.3']
+foreach i:lua_versions
+ lua_dep = dependency(i, required: false)
+ if not lua_dep.found()
+ lua_dep = cc.find_library(i, required: false)
+ endif
+ if lua_dep.found()
+ break
+ endif
+endforeach
+if not lua_dep.found()
+ error('Suitable lua version not found')
+endif
+
+has_sym_args = [
+ [ 'HAVE_LIBEDIT_EL_RFUNC_T', 'histedit.h',
+ 'el_rfunc_t' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'libedit_autoconf.h', configuration : config)
+
+# All other dependencies
+dpdk_dep = dependency('libdpdk', required: true)
+tinfo_dep = dependency('tinfo', required: false)
+threads_dep = dependency('threads', required: true)
+pcap_dep = dependency('pcap', required: true)
+ncurses_dep = dependency('ncurses', required: true)
+ncursesw_dep = dependency('ncursesw', required: true)
+libedit_dep = dependency('libedit', required: true)
+math_dep = cc.find_library('m', required : false)
+dl_dep = cc.find_library('dl', required : true)
+
+deps = [dpdk_dep,
+ tinfo_dep,
+ threads_dep,
+ pcap_dep,
+ ncurses_dep,
+ ncursesw_dep,
+ libedit_dep,
+ math_dep,
+ dl_dep,
+ lua_dep]
+
+# Explicitly add these to the dependency list
+deps += [cc.find_library('rte_bus_pci', required: true)]
+deps += [cc.find_library('rte_bus_vdev', required: true)]
+
+if dpdk_version.version_compare('<20.11.0')
+deps += [cc.find_library('rte_pmd_ring', required: true)]
+else
+deps += [cc.find_library('rte_net_ring', required: true)]
+endif
+
+sources = files(
+ 'task_init.c', 'handle_aggregator.c', 'handle_nop.c', 'handle_irq.c',
+ 'handle_arp.c', 'handle_impair.c', 'handle_lat.c', 'handle_qos.c',
+ 'handle_qinq_decap4.c', 'handle_routing.c', 'handle_untag.c',
+ 'handle_mplstag.c', 'handle_qinq_decap6.c',
+ 'handle_lb_qinq.c', 'handle_lb_pos.c', 'handle_lb_net.c',
+ 'handle_qinq_encap4.c', 'handle_qinq_encap6.c', 'handle_classify.c',
+ 'handle_l2fwd.c', 'handle_swap.c', 'handle_police.c', 'handle_acl.c',
+ 'handle_gen.c', 'handle_master.c', 'packet_utils.c', 'handle_mirror.c',
+ 'handle_genl4.c', 'handle_ipv6_tunnel.c', 'handle_read.c',
+ 'handle_cgnat.c', 'handle_nat.c', 'handle_dump.c', 'handle_tsc.c',
+ 'handle_fm.c', 'handle_lb_5tuple.c', 'handle_blockudp.c', 'toeplitz.c',
+ 'thread_nop.c', 'thread_generic.c', 'prox_args.c', 'prox_cfg.c',
+ 'prox_cksum.c', 'prox_port_cfg.c', 'cfgfile.c', 'clock.c',
+ 'commands.c', 'cqm.c', 'msr.c', 'defaults.c', 'display.c',
+ 'display_latency.c', 'display_latency_distr.c', 'display_mempools.c',
+ 'display_ports.c', 'display_rings.c', 'display_priority.c',
+ 'display_pkt_len.c', 'display_l4gen.c', 'display_tasks.c',
+ 'display_irq.c', 'log.c', 'hash_utils.c', 'main.c', 'parse_utils.c',
+ 'file_utils.c', 'run.c', 'input_conn.c', 'input_curses.c', 'rx_pkt.c',
+ 'lconf.c', 'tx_pkt.c', 'expire_cpe.c', 'ip_subnet.c', 'stats_port.c',
+ 'stats_mempool.c', 'stats_ring.c', 'stats_l4gen.c', 'stats_latency.c',
+ 'stats_global.c', 'stats_core.c', 'stats_task.c', 'stats_prio.c',
+ 'stats_irq.c', 'cmd_parser.c', 'input.c', 'prox_shared.c',
+ 'prox_lua_types.c', 'genl4_bundle.c', 'heap.c', 'genl4_stream_tcp.c',
+ 'genl4_stream_udp.c', 'cdf.c', 'stats.c', 'stats_cons_log.c',
+ 'stats_cons_cli.c', 'stats_parser.c', 'hash_set.c', 'prox_lua.c',
+ 'prox_malloc.c', 'prox_ipv6.c', 'prox_compat.c', 'handle_nsh.c')
+
+sources += files('rw_reg.c')
+
+# Include a couple of source files depending on DPDK support
+if cc.find_library('rte_crypto_ipsec_mb', required: false).found()
+ add_project_arguments('-DRTE_LIBRTE_PMD_AESNI_MB', language: 'c')
+ sources += files('handle_esp.c')
+else
+ warning('Building w/o IPSEC support')
+endif
+
+if cc.find_library('rte_pipeline', required: false).found()
+ sources += files('handle_pf_acl.c', 'thread_pipeline.c')
+endif
+
+# Generate the git_version.c file and add to sources
+git_version = configuration_data()
+git_version.set('GIT_VERSION', '@0@'.format(meson.project_version()))
+git_version_c = configure_file(input: 'git_version.c.in',
+ output: 'git_version.c',
+ configuration: git_version)
+
+git_version_file = join_paths(meson.current_build_dir(), 'git_version.c')
+sources += files(git_version_file)
+
+executable('prox',
+ sources,
+ c_args: cflags,
+ dependencies: deps,
+ install: true)
diff --git a/VNFs/DPPD-PROX/meson_options.txt b/VNFs/DPPD-PROX/meson_options.txt
new file mode 100644
index 00000000..afc2be7e
--- /dev/null
+++ b/VNFs/DPPD-PROX/meson_options.txt
@@ -0,0 +1,9 @@
+#Keep the options sorted alphabetically
+option('bng_qinq', type: 'feature', value: 'enabled')
+option('crc', type: 'string', value: 'hard')
+option('dbg', type: 'boolean', value: false)
+option('gen_decap_ipv6_to_ipv4_cksum', type: 'feature', value: 'enabled')
+option('hw_direct_stats', type: 'feature', value: 'enabled')
+option('log', type: 'boolean', value: true)
+option('mpls_routing', type: 'feature', value: 'enabled')
+option('prox_stats', type: 'feature', value: 'enabled')
diff --git a/VNFs/DPPD-PROX/packet_utils.c b/VNFs/DPPD-PROX/packet_utils.c
index 9832a039..95ce7abc 100644
--- a/VNFs/DPPD-PROX/packet_utils.c
+++ b/VNFs/DPPD-PROX/packet_utils.c
@@ -46,7 +46,7 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_
*vlan = 0;
// Unstack VLAN tags
- while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ while (((ether_type == ETYPE_VLAN) || (ether_type == ETYPE_8021ad)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
l2_len +=4;
ether_type = vlan_hdr->eth_proto;
@@ -81,11 +81,11 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_
return -1;
}
-static inline struct ipv6_addr *find_ip6(prox_rte_ether_hdr *pkt, uint16_t len, struct ipv6_addr *ip_dst, uint16_t *vlan)
+static inline void find_vlan(struct ether_hdr_arp *pkt, uint16_t len, uint16_t *vlan)
{
prox_rte_vlan_hdr *vlan_hdr;
- prox_rte_ipv6_hdr *ip;
- uint16_t ether_type = pkt->ether_type;
+ prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
+ uint16_t ether_type = eth_hdr->ether_type;
uint16_t l2_len = sizeof(prox_rte_ether_hdr);
*vlan = 0;
@@ -94,29 +94,29 @@ static inline struct ipv6_addr *find_ip6(prox_rte_ether_hdr *pkt, uint16_t len,
vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
l2_len +=4;
ether_type = vlan_hdr->eth_proto;
- *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F); // Store VLAN, or CVLAN if QinQ
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F); // Store VLAN, or CVLAN if QinQ
}
+}
- switch (ether_type) {
- case ETYPE_MPLSU:
- case ETYPE_MPLSM:
- // In case of MPLS, next hop MAC is based on MPLS, not destination IP
- l2_len = 0;
- break;
- case ETYPE_IPv4:
- case ETYPE_EoGRE:
- case ETYPE_ARP:
- l2_len = 0;
- break;
- case ETYPE_IPv6:
- break;
- default:
- l2_len = 0;
- plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type);
- break;
- }
+static inline struct ipv6_addr *find_ip6(prox_rte_ether_hdr *pkt, uint16_t len, struct ipv6_addr *ip_dst, uint16_t *vlan)
+{
+ uint16_t ether_type = pkt->ether_type;
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+ *vlan = 0;
- if (l2_len && (l2_len + sizeof(prox_rte_ipv6_hdr) <= len)) {
+ if ((ether_type == ETYPE_VLAN) || (ether_type == ETYPE_8021ad)) {
+ prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
+ ether_type = vlan_hdr->eth_proto;
+ l2_len +=4;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ if (ether_type == ETYPE_VLAN) {
+ vlan_hdr = (prox_rte_vlan_hdr *)(vlan_hdr + 1);
+ ether_type = vlan_hdr->eth_proto;
+ l2_len +=4;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ }
+ }
+ if ((ether_type == ETYPE_IPv6) && (l2_len + sizeof(prox_rte_ipv6_hdr) <= len)) {
prox_rte_ipv6_hdr *ip = (prox_rte_ipv6_hdr *)((uint8_t *)pkt + l2_len);
// TODO: implement LPM => replace ip_dst by next hop IP DST
memcpy(ip_dst, &ip->dst_addr, sizeof(struct ipv6_addr));
@@ -125,20 +125,38 @@ static inline struct ipv6_addr *find_ip6(prox_rte_ether_hdr *pkt, uint16_t len,
return NULL;
}
-static void send_unsollicited_neighbour_advertisement(struct task_base *tbase, struct task_args *targ)
+void send_unsollicited_neighbour_advertisement(struct task_base *tbase)
{
int ret;
uint8_t out = 0, port_id = tbase->l3.reachable_port_id;
- struct rte_mbuf *mbuf;
-
- ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
- if (likely(ret == 0)) {
- mbuf->port = port_id;
- build_neighbour_advertisement(tbase->l3.tmaster, mbuf, &prox_port_cfg[port_id].eth_addr, &targ->local_ipv6, PROX_UNSOLLICITED);
- tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
- TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
- } else {
- plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ struct rte_mbuf *mbuf = NULL;
+
+ if (*(__int128 *)(&tbase->l3.local_ipv6) != 0) {
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = port_id;
+ build_neighbour_advertisement(tbase->l3.tmaster, mbuf, &prox_port_cfg[port_id].eth_addr, &tbase->l3.local_ipv6, PROX_UNSOLLICITED, prox_port_cfg[port_id].vlan_tags[0]);
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ } else {
+ plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ return;
+ }
+ }
+ if (*(__int128 *)(&tbase->l3.global_ipv6) != 0) {
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = port_id;
+ build_neighbour_advertisement(tbase->l3.tmaster, mbuf, &prox_port_cfg[port_id].eth_addr, &tbase->l3.global_ipv6, PROX_UNSOLLICITED, prox_port_cfg[port_id].vlan_tags[0]);
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ } else {
+ plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ return;
+ }
+ }
+ if (mbuf == NULL) {
+ plog_err("No neighbor advertisement sent as no local or global ipv6\n");
}
}
@@ -151,7 +169,7 @@ static void send_router_sollicitation(struct task_base *tbase, struct task_args
ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
if (likely(ret == 0)) {
mbuf->port = port_id;
- build_router_sollicitation(mbuf, &prox_port_cfg[port_id].eth_addr, &targ->local_ipv6);
+ build_router_sollicitation(mbuf, &prox_port_cfg[port_id].eth_addr, &targ->local_ipv6, prox_port_cfg[port_id].vlan_tags[0]);
tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
} else {
@@ -253,7 +271,10 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
}
// No Routing table specified: only a local ip and maybe a gateway
// Old default behavior: if a gw is specified, ALL packets go to this gateway (even those we could send w/o the gw
+
+ uint16_t len = rte_pktmbuf_pkt_len(mbuf);
if (l3->gw.ip) {
+ find_vlan(packet, len, vlan);
if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_ndp_retransmit_timeout) && (tsc < l3->gw.reachable_timeout))) {
memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr));
return SEND_MBUF;
@@ -278,7 +299,6 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
}
}
- uint16_t len = rte_pktmbuf_pkt_len(mbuf);
if (find_ip(packet, len, ip_dst, vlan) != 0) {
// Unable to find IP address => non IP packet => send it as it
return SEND_MBUF;
@@ -328,14 +348,13 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
return DROP_MBUF;
}
-int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan)
+int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan, uint64_t tsc)
{
const uint64_t hz = rte_get_tsc_hz();
prox_rte_ether_hdr *packet = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
prox_rte_ether_addr *mac = &packet->d_addr;
struct ipv6_addr *used_ip_src;
- uint64_t tsc = rte_rdtsc();
uint16_t len = rte_pktmbuf_pkt_len(mbuf);
struct ipv6_addr *pkt_src_ip6;
@@ -344,26 +363,35 @@ int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv
return SEND_MBUF;
}
struct l3_base *l3 = &(tbase->l3);
- if (memcmp(&l3->local_ipv6, ip_dst, 8) == 0) {
+
+ // Configure source IP
+ if (*(uint64_t *)(&l3->local_ipv6) == *(uint64_t *)ip_dst) {
// Same prefix as local -> use local
used_ip_src = &l3->local_ipv6;
- } else if (memcmp(&l3->global_ipv6 , &null_addr, 16) != 0) {
+ } else if (*(uint64_t *)(&l3->global_ipv6) == *(uint64_t *)ip_dst) {
+ // Same prefix as global -> use global
+ used_ip_src = &l3->global_ipv6;
+ } else if (*(__int128 *)(&l3->gw.ip6) != 0) {
+ used_ip_src = &l3->global_ipv6;
+ memcpy(ip_dst, &l3->gw.ip6, sizeof(struct ipv6_addr));
+ } else if (*(__int128 *)(&l3->global_ipv6) != 0) {
// Global IP is defined -> use it
used_ip_src = &l3->global_ipv6;
} else {
plog_info("Error as trying to send a packet to "IPv6_BYTES_FMT" using "IPv6_BYTES_FMT" (local)\n", IPv6_BYTES(ip_dst->bytes), IPv6_BYTES(l3->local_ipv6.bytes));
return DROP_MBUF;
}
+ rte_memcpy(pkt_src_ip6, used_ip_src, sizeof(struct ipv6_addr));
- memcpy(pkt_src_ip6, used_ip_src, sizeof(struct ipv6_addr));
+ // Configure dst mac
if (likely(l3->n_pkts < 4)) {
for (unsigned int idx = 0; idx < l3->n_pkts; idx++) {
- if (memcmp(ip_dst, &l3->optimized_arp_table[idx].ip6, sizeof(struct ipv6_addr)) == 0) {
+ if (*(__int128 *)ip_dst == *(__int128 *)(&l3->optimized_arp_table[idx].ip6)) {
// IP address already in table
if ((tsc < l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout) && (tsc < l3->optimized_arp_table[idx].reachable_timeout)) {
// MAC address was recently updated in table, use it
// plog_dbg("Valid MAC address found => send packet\n");
- memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr));
+ rte_memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr));
return SEND_MBUF;
} else if (tsc > l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout) {
// NDP not sent since a long time, send NDP
@@ -464,27 +492,29 @@ void task_init_l3(struct task_base *tbase, struct task_args *targ)
.key_len = sizeof(uint32_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket_id,
};
if (targ->flags & TASK_ARG_L3) {
- plog_info("\tInitializing L3 (IPv4)\n");
+ plog_info("\t\tInitializing L3 (IPv4)\n");
tbase->l3.ip_hash = rte_hash_create(&hash_params);
PROX_PANIC(tbase->l3.ip_hash == NULL, "Failed to set up ip hash table\n");
hash_name[0]++;
}
if (targ->flags & TASK_ARG_NDP) {
- plog_info("\tInitializing NDP (IPv6)\n");
+ plog_info("\t\tInitializing NDP (IPv6)\n");
hash_params.key_len = sizeof(struct ipv6_addr);
tbase->l3.ip6_hash = rte_hash_create(&hash_params);
PROX_PANIC(tbase->l3.ip6_hash == NULL, "Failed to set up ip hash table\n");
}
tbase->l3.arp_table = (struct arp_table *)prox_zmalloc(n_entries * sizeof(struct arp_table), socket_id);
PROX_PANIC(tbase->l3.arp_table == NULL, "Failed to allocate memory for %u entries in arp/ndp table\n", n_entries);
- plog_info("\tarp/ndp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
+ plog_info("\t\tarp/ndp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
targ->lconf->ctrl_func_p[targ->task] = handle_ctrl_plane_pkts;
targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
tbase->l3.gw.ip = rte_cpu_to_be_32(targ->gateway_ipv4);
+ memcpy(&tbase->l3.gw.ip6, &targ->gateway_ipv6, sizeof(struct ipv6_addr));
tbase->flags |= TASK_L3;
tbase->l3.core_id = targ->lconf->id;
tbase->l3.task_id = targ->id;
@@ -511,26 +541,28 @@ void task_start_l3(struct task_base *tbase, struct task_args *targ)
if (port && (tbase->l3.arp_nd_pool == NULL)) {
static char name[] = "arp0_pool";
tbase->l3.reachable_port_id = port - prox_port_cfg;
- if ((targ->local_ipv4 && port->ip) && (targ->local_ipv4 != port->ip)) {
- PROX_PANIC(1, "local_ipv4 in core section ("IPv4_BYTES_FMT") differs from port section ("IPv4_BYTES_FMT")\n", IP4(rte_be_to_cpu_32(targ->local_ipv4)), IP4(rte_be_to_cpu_32(port->ip)));
+ if ((targ->local_ipv4 && port->ip_addr[0].ip) && (targ->local_ipv4 != port->ip_addr[0].ip)) {
+ PROX_PANIC(1, "local_ipv4 in core section ("IPv4_BYTES_FMT") differs from port section ("IPv4_BYTES_FMT")\n", IP4(rte_be_to_cpu_32(targ->local_ipv4)), IP4(rte_be_to_cpu_32(port->ip_addr[0].ip)));
}
- if ((targ->local_ipv4 && port->ip) && (targ->local_prefix != port->prefix)) {
- PROX_PANIC(1, "local_ipv4 prefix in core section (%d) differs from port section (%d)\n", targ->local_prefix, port->prefix);
+ if ((targ->local_ipv4 && port->ip_addr[0].ip) && (targ->local_prefix != port->ip_addr[0].prefix)) {
+ PROX_PANIC(1, "local_ipv4 prefix in core section (%d) differs from port section (%d)\n", targ->local_prefix, port->ip_addr[0].prefix);
}
- if (!targ->local_ipv4) {
- targ->local_ipv4 = port->ip;
- targ->local_prefix = port->prefix;
- plog_info("Setting core local_ipv4 from port %d local_ipv4 to "IPv4_BYTES_FMT"\n", tbase->l3.reachable_port_id, IP4(rte_be_to_cpu_32(port->ip)));
+ if (!port->ip_addr[0].ip && targ->local_ipv4) {
+ port->ip_addr[0].ip = targ->local_ipv4;
+ port->ip_addr[0].prefix = targ->local_prefix;
+ port->n_vlans = 1;
+ port->vlan_tags[0] = 0;
+ plog_info("Setting port local_ipv4 from core %d local_ipv4 to "IPv4_BYTES_FMT"\n", tbase->l3.reachable_port_id, IP4(rte_be_to_cpu_32(port->ip_addr[0].ip)));
}
- if (targ->local_ipv4) {
- tbase->l3.local_ipv4 = rte_be_to_cpu_32(targ->local_ipv4);
- register_ip_to_ctrl_plane(tbase->l3.tmaster, tbase->l3.local_ipv4, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
+ for (int vlan_id = 0; vlan_id < port->n_vlans; vlan_id++) {
+ if (port->ip_addr[vlan_id].ip)
+ register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_be_to_cpu_32(port->ip_addr[vlan_id].ip), tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
}
if (strcmp(targ->route_table, "") != 0) {
struct lpm4 *lpm;
int ret;
- PROX_PANIC(tbase->l3.local_ipv4 == 0, "missing local_ipv4 while route table is specified in L3 mode\n");
+ PROX_PANIC(port->n_vlans == 0, "missing local_ipv4 while route table is specified in L3 mode\n");
// LPM might be modified runtime => do not share with other cores
ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
@@ -553,10 +585,13 @@ void task_start_l3(struct task_base *tbase, struct task_args *targ)
}
plog_info("Using routing table %s in l3 mode, with %d gateways\n", targ->route_table, tbase->l3.nb_gws);
- // Last but one "next_hop_index" is not a gateway but direct routes
- tbase->l3.next_hops[tbase->l3.nb_gws].ip = 0;
- ret = rte_lpm_add(tbase->l3.ipv4_lpm, targ->local_ipv4, targ->local_prefix, tbase->l3.nb_gws++);
- PROX_PANIC(ret, "Failed to add local_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(tbase->l3.local_ipv4), targ->local_prefix);
+ // Last but one (x n_vlans) "next_hop_index" is not a gateway but direct routes
+ for (int vlan_id = 0; vlan_id < port->n_vlans; vlan_id++) {
+ tbase->l3.next_hops[tbase->l3.nb_gws].ip = 0;
+ ret = rte_lpm_add(tbase->l3.ipv4_lpm, port->ip_addr[vlan_id].ip, port->ip_addr[vlan_id].prefix, tbase->l3.nb_gws++);
+ PROX_PANIC(ret, "Failed to add local_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(port->ip_addr[vlan_id].ip), port->ip_addr[vlan_id].prefix);
+ }
+
// Last "next_hop_index" is default gw
tbase->l3.next_hops[tbase->l3.nb_gws].ip = rte_bswap32(targ->gateway_ipv4);
if (targ->gateway_ipv4) {
@@ -569,7 +604,7 @@ void task_start_l3(struct task_base *tbase, struct task_args *targ)
// Create IPv6 addr if none were configured
if (targ->flags & TASK_ARG_NDP) {
- if (!memcmp(&targ->local_ipv6, &null_addr, 16)) {
+ if (!memcmp(&targ->local_ipv6, &null_addr, sizeof(struct ipv6_addr))) {
set_link_local(&targ->local_ipv6);
set_EUI(&targ->local_ipv6, &port->eth_addr);
}
@@ -607,7 +642,7 @@ void task_start_l3(struct task_base *tbase, struct task_args *targ)
}
if ((targ->flags & TASK_ARG_NDP) && (targ->flags & TASK_ARG_SEND_NA_AT_STARTUP)) {
plog_info("Sending unsollicited Neighbour Advertisement\n");
- send_unsollicited_neighbour_advertisement(tbase, targ);
+ send_unsollicited_neighbour_advertisement(tbase);
}
}
@@ -619,11 +654,6 @@ void task_set_gateway_ip(struct task_base *tbase, uint32_t ip)
tbase->flags &= ~FLAG_DST_MAC_KNOWN;
}
-void task_set_local_ip(struct task_base *tbase, uint32_t ip)
-{
- tbase->l3.local_ipv4 = ip;
-}
-
static void reset_arp_ndp_retransmit_timeout(struct l3_base *l3, uint32_t ip)
{
uint32_t idx;
diff --git a/VNFs/DPPD-PROX/packet_utils.h b/VNFs/DPPD-PROX/packet_utils.h
index e1b262dc..ef15cd22 100644
--- a/VNFs/DPPD-PROX/packet_utils.h
+++ b/VNFs/DPPD-PROX/packet_utils.h
@@ -79,10 +79,11 @@ struct l3_base {
void task_init_l3(struct task_base *tbase, struct task_args *targ);
void task_start_l3(struct task_base *tbase, struct task_args *targ);
int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst, uint16_t *vlan, uint64_t **time, uint64_t tsc);
-int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan);
+int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan, uint64_t tsc);
void task_set_gateway_ip(struct task_base *tbase, uint32_t ip);
void task_set_local_ip(struct task_base *tbase, uint32_t ip);
void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
+void send_unsollicited_neighbour_advertisement(struct task_base *tbase);
static inline void update_arp_ndp_retransmit_timeout(struct l3_base *l3, uint64_t *ptr, uint32_t base)
{
diff --git a/VNFs/DPPD-PROX/parse_utils.c b/VNFs/DPPD-PROX/parse_utils.c
index 9ceb1c59..8d846fd3 100644
--- a/VNFs/DPPD-PROX/parse_utils.c
+++ b/VNFs/DPPD-PROX/parse_utils.c
@@ -846,6 +846,72 @@ int parse_task_set(struct core_task_set *cts, const char *str2)
return 0;
}
+int parse_ip_set(struct ip4_subnet *list, const char *str2, uint32_t max_list)
+{
+ char str[MAX_STR_LEN_PROC];
+ char *parts[MAX_STR_LEN_PROC];
+ int n = 0, rc;
+
+ if (parse_vars(str, sizeof(str), str2))
+ return -1;
+ int n_parts = rte_strsplit(str, strlen(str), parts, MAX_STR_LEN_PROC, ',');
+ for (int i = 0; i < n_parts; i++) {
+ if ((rc = parse_ip4_and_prefix(&list[i], parts[i])) < 0) {
+ set_errf("Unable to parse ip4/prefix");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int parse_int_set(uint32_t *list, const char *str2, uint32_t max_list)
+{
+ char str[MAX_STR_LEN_PROC];
+ char *parts[MAX_STR_LEN_PROC];
+ uint32_t n = 0;
+
+ if (parse_vars(str, sizeof(str), str2))
+ return -1;
+
+ int n_parts = rte_strsplit(str, strlen(str), parts, MAX_STR_LEN_PROC, ',');
+ for (int i = 0; i < n_parts; i++) {
+ char *cur_part = parts[i];
+ char *sub_parts[3];
+ int n_sub_parts = rte_strsplit(cur_part, strlen(cur_part), sub_parts, 3, '-');
+ uint32_t n1, n2;
+ int ret = 0;
+
+ if (n_sub_parts == 1) {
+ if (n >= max_list - 1) {
+ set_errf("Too many entries\n");
+ return -1;
+ }
+ if (parse_int(&list[n], sub_parts[0]))
+ return -1;
+ n++;
+ } else if (n_sub_parts == 2) {
+ if (parse_int(&n1, sub_parts[0]))
+ return -1;
+ if (parse_int(&n2, sub_parts[1]))
+ return -1;
+ if (n + n2 - n1 >= max_list) {
+ set_errf("Too many entries\n");
+ return -1;
+ }
+ for (uint32_t j = n1; j < n2; j++) {
+ list[n++] = j;
+ }
+ } else if (n_sub_parts >= 3) {
+ set_errf("Multiple '-' characters in range syntax found");
+ return -1;
+ } else {
+ set_errf("Invalid list syntax");
+ return -1;
+ }
+ }
+ return 0;
+}
+
int parse_list_set(uint32_t *list, const char *str2, uint32_t max_list)
{
char str[MAX_STR_LEN_PROC];
diff --git a/VNFs/DPPD-PROX/parse_utils.h b/VNFs/DPPD-PROX/parse_utils.h
index 5671e377..03c03188 100644
--- a/VNFs/DPPD-PROX/parse_utils.h
+++ b/VNFs/DPPD-PROX/parse_utils.h
@@ -64,6 +64,8 @@ int parse_bool(uint32_t* val, const char *str);
int parse_flag(uint32_t* val, uint32_t flag, const char *str);
int parse_list_set(uint32_t *list, const char *str, uint32_t max_limit);
+int parse_ip_set(struct ip4_subnet *list, const char *str2, uint32_t max_list);
+int parse_int_set(uint32_t *list, const char *str2, uint32_t max_list);
int parse_task_set(struct core_task_set *val, const char *str);
diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c
index d89481ab..dc212494 100644
--- a/VNFs/DPPD-PROX/prox_args.c
+++ b/VNFs/DPPD-PROX/prox_args.c
@@ -38,9 +38,10 @@
#include "defines.h"
#include "prox_ipv6.h"
#include "prox_compat.h"
+#include "ip_subnet.h"
#define MAX_RTE_ARGV 64
-#define MAX_ARG_LEN 64
+#define MAX_ARG_LEN 256
struct cfg_depr {
const char *opt;
@@ -531,7 +532,7 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
}
else if (STR_EQ(str, "name")) {
uint32_t val;
- prox_strncpy(cfg->name, pkey, MAX_NAME_SIZE);
+ prox_strncpy(cfg->names[0], pkey, MAX_NAME_SIZE);
PROX_ASSERT(cur_if < PROX_MAX_PORTS);
return add_port_name(cur_if, pkey);
}
@@ -541,6 +542,16 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
else if (STR_EQ(str, "tx desc")) {
return parse_int(&cfg->n_txd, pkey);
}
+ else if (STR_EQ(str, "ipv6 mask length")) {
+ return parse_int(&cfg->v6_mask_length, pkey);
+ }
+ else if (STR_EQ(str, "all_rx_queues")) {
+ uint32_t val;
+ if (parse_bool(&val, pkey)) {
+ return -1;
+ }
+ cfg->all_rx_queues = val;
+ }
else if (STR_EQ(str, "promiscuous")) {
uint32_t val;
if (parse_bool(&val, pkey)) {
@@ -568,15 +579,18 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
cfg->lsc_val = val;
}
else if (STR_EQ(str, "local ipv4")) {
- struct ip4_subnet cidr;
- if (parse_ip4_and_prefix(&cidr, pkey) != 0) {
- cfg->prefix = 24;
- return parse_ip(&cfg->ip, pkey);
- } else {
- cfg->ip = cidr.ip;
- cfg->prefix = cidr.prefix;
- return 0;
+ if (parse_ip_set(cfg->ip_addr, pkey, PROX_MAX_VLAN_TAGS) != 0) {
+ cfg->ip_addr[0].ip = 24;
+ return parse_ip(&cfg->ip_addr[0].ip, pkey);
+ }
+ return 0;
+ }
+ else if (STR_EQ(str, "virtual")) {
+ uint32_t val;
+ if (parse_bool(&val, pkey)) {
+ return -1;
}
+ cfg->virtual = val;
}
else if (STR_EQ(str, "vdev")) {
prox_strncpy(cfg->vdev, pkey, MAX_NAME_SIZE);
@@ -596,23 +610,23 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
if (parse_bool(&val, pkey)) {
return -1;
}
-#if defined(DEV_RX_OFFLOAD_CRC_STRIP)
+#if defined(RTE_ETH_RX_OFFLOAD_CRC_STRIP)
if (val)
- cfg->requested_rx_offload |= DEV_RX_OFFLOAD_CRC_STRIP;
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_CRC_STRIP;
else
- cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_CRC_STRIP;
#else
-#if defined (DEV_RX_OFFLOAD_KEEP_CRC)
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
if (val)
- cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
else
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
#endif
- cfg->requested_rx_offload |= DEV_RX_OFFLOAD_KEEP_CRC;
#endif
}
else if (STR_EQ(str, "vlan tag")) {
- return parse_int(&cfg->vlan_tag, pkey);
+ return parse_int_set(cfg->vlan_tags, pkey, sizeof(cfg->vlan_tags) / sizeof(cfg->vlan_tags[0]));
}
else if (STR_EQ(str, "vlan")) {
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
@@ -621,11 +635,11 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
return -1;
}
if (val) {
- cfg->requested_rx_offload |= DEV_RX_OFFLOAD_VLAN_STRIP;
- cfg->requested_tx_offload |= DEV_TX_OFFLOAD_VLAN_INSERT;
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ cfg->requested_tx_offload |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
} else {
- cfg->requested_rx_offload &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
- cfg->requested_tx_offload &= ~DEV_TX_OFFLOAD_VLAN_INSERT;
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ cfg->requested_tx_offload &= ~RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
#else
plog_warn("vlan option not supported : update DPDK at least to 18.08 to support this option\n");
@@ -641,10 +655,14 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
// A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan)
// should not be considered as a jumbo frame. However rte_ethdev.c considers that
// the max_rx_pkt_len for a non jumbo frame is 1518
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN;
- if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN) {
- cfg->requested_rx_offload |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
+ if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN)
+#else
+ cfg->port_conf.rxmode.mtu = cfg->mtu;
+ if (cfg->port_conf.rxmode.mtu > PROX_MTU)
+#endif
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
}
}
@@ -654,8 +672,8 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
return -1;
}
if (val) {
- cfg->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
- cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4;
+ cfg->port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+ cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IPV4;
}
}
else if (STR_EQ(str, "rx_ring")) {
@@ -909,6 +927,9 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey);
}
+ if (STR_EQ(str, "do not forward geneve")) {
+ return parse_flag(&targ->runtime_flags, TASK_DO_NOT_FWD_GENEVE, pkey);
+ }
/* Using tx port name, only a _single_ port can be assigned to a task. */
if (STR_EQ(str, "tx port")) {
if (targ->nb_txports > 0) {
@@ -1008,6 +1029,18 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "random")) {
return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0]));
}
+ if (STR_EQ(str, "range")) {
+ int rc = parse_range(&targ->range[targ->n_ranges].min, &targ->range[targ->n_ranges].max, pkey);
+ targ->n_ranges++;
+ return rc;
+ }
+ if (STR_EQ(str, "range_offset")) {
+ if (targ->n_ranges == 0) {
+ set_errf("No range defined previously (use range=...)");
+ return -1;
+ }
+ return parse_int(&targ->range[targ->n_ranges - 1].offset, pkey);
+ }
if (STR_EQ(str, "rand_offset")) {
if (targ->n_rand_str == 0) {
set_errf("No random defined previously (use random=...)");
@@ -1118,6 +1151,9 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "latency buffer size")) {
return parse_int(&targ->latency_buffer_size, pkey);
}
+ if (STR_EQ(str, "loss buffer size")) {
+ return parse_int(&targ->loss_buffer_size, pkey);
+ }
if (STR_EQ(str, "accuracy pos")) {
return parse_int(&targ->accur_pos, pkey);
}
@@ -1134,7 +1170,16 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "packet id pos")) {
return parse_int(&targ->packet_id_pos, pkey);
}
- if (STR_EQ(str, "probability")) {
+ if (STR_EQ(str, "flow id pos")) {
+ return parse_int(&targ->flow_id_pos, pkey);
+ }
+ if (STR_EQ(str, "packet id in flow pos")) {
+ return parse_int(&targ->packet_id_in_flow_pos, pkey);
+ }
+ if (STR_EQ(str, "flow count")) {
+ return parse_int(&targ->flow_count, pkey);
+ }
+ if (STR_EQ(str, "probability")) { // old - use "probability no drop" instead
float probability;
int rc = parse_float(&probability, pkey);
if (probability == 0) {
@@ -1144,9 +1189,44 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
set_errf("Probability must be < 100\n");
return -1;
}
- targ->probability = probability * 10000;
+ targ->probability_no_drop = probability * 10000;
return rc;
}
+ if (STR_EQ(str, "proba no drop")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability == 0) {
+ set_errf("probability no drop must be != 0\n");
+ return -1;
+ } else if (probability > 100.0) {
+ set_errf("Probability must be < 100\n");
+ return -1;
+ }
+ targ->probability_no_drop = probability * 10000;
+ return rc;
+ }
+ if (STR_EQ(str, "proba delay")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability > 100.0) {
+ set_errf("Probability must be < 100\n");
+ return -1;
+ }
+ targ->probability_delay = probability * 10000;
+ return rc;
+ }
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ if (STR_EQ(str, "proba duplicate")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability > 100.0) {
+ set_errf("probability duplicate must be < 100\n");
+ return -1;
+ }
+ targ->probability_duplicate = probability * 10000;
+ return rc;
+ }
+#endif
if (STR_EQ(str, "concur conn")) {
return parse_int(&targ->n_concur_conn, pkey);
}
@@ -1480,6 +1560,11 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
}
return 0;
}
+ if (STR_EQ(str, "gateway ipv6")) { /* Gateway IP address used when generating */
+ if ((targ->flags & TASK_ARG_NDP) == 0)
+ plog_warn("gateway ipv6 configured but NDP sub mode not enabled\n");
+ return parse_ip6(&targ->gateway_ipv6, pkey);
+ }
if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */
struct ip4_subnet cidr;
if (parse_ip4_and_prefix(&cidr, pkey) != 0) {
@@ -1536,6 +1621,8 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return parse_int(&targ->arp_ndp_retransmit_timeout, pkey);
if (STR_EQ(str, "number of packets"))
return parse_int(&targ->n_pkts, pkey);
+ if (STR_EQ(str, "store size"))
+ return parse_int(&targ->store_max, pkey);
if (STR_EQ(str, "pipes")) {
uint32_t val;
int err = parse_int(&val, pkey);
@@ -1555,7 +1642,7 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (err) {
return -1;
}
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
targ->qos_conf.subport_params[0].qsize[0] = val;
targ->qos_conf.subport_params[0].qsize[1] = val;
targ->qos_conf.subport_params[0].qsize[2] = val;
@@ -1569,46 +1656,70 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return 0;
}
if (STR_EQ(str, "subport tb rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_rate, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tb_rate, pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tb size")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_size, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tb_size, pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tc 0 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[0], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tc 1 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[1], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tc 2 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[2], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tc 3 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[3], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
#endif
+#endif
}
if (STR_EQ(str, "subport tc rate")) {
@@ -1618,29 +1729,40 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return -1;
}
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ targ->qos_conf.port_params.subport_profiles->tc_rate[0] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[1] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[2] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[3] = val;
+#else
targ->qos_conf.subport_params[0].tc_rate[0] = val;
targ->qos_conf.subport_params[0].tc_rate[1] = val;
targ->qos_conf.subport_params[0].tc_rate[2] = val;
targ->qos_conf.subport_params[0].tc_rate[3] = val;
+#endif
return 0;
}
if (STR_EQ(str, "subport tc period")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_period, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.subport_params[0].tc_period, pkey);
#else
return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey);
#endif
+#endif
}
if (STR_EQ(str, "pipe tb rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
#endif
}
if (STR_EQ(str, "pipe tb size")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tb_size, pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey);
@@ -1660,35 +1782,35 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return 0;
}
if (STR_EQ(str, "pipe tc 0 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
#endif
}
if (STR_EQ(str, "pipe tc 1 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
#endif
}
if (STR_EQ(str, "pipe tc 2 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
#endif
}
if (STR_EQ(str, "pipe tc 3 rate")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
#endif
}
if (STR_EQ(str, "pipe tc period")) {
-#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
return parse_u64(&targ->qos_conf.pipe_params[0].tc_period, pkey);
#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey);
@@ -1785,31 +1907,41 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return parse_int(&targ->n_max_rules, pkey);
}
- if (STR_EQ(str, "tunnel hop limit")) {
- uint32_t val;
- int err = parse_int(&val, pkey);
- if (err) {
- return -1;
- }
- targ->tunnel_hop_limit = val;
- return 0;
- }
+ if (STR_EQ(str, "tunnel hop limit")) {
+ uint32_t val;
+ int err = parse_int(&val, pkey);
+ if (err) {
+ return -1;
+ }
+ targ->tunnel_hop_limit = val;
+ return 0;
+ }
- if (STR_EQ(str, "lookup port mask")) {
- uint32_t val;
- int err = parse_int(&val, pkey);
- if (err) {
- return -1;
- }
- targ->lookup_port_mask = val;
- return 0;
- }
+ if (STR_EQ(str, "lookup port mask")) {
+ uint32_t val;
+ int err = parse_int(&val, pkey);
+ if (err) {
+ return -1;
+ }
+ targ->lookup_port_mask = val;
+ return 0;
+ }
if (STR_EQ(str, "irq debug")) {
parse_int(&targ->irq_debug, pkey);
return 0;
}
+ if (STR_EQ(str, "multiplier")) {
+ parse_int(&targ->multiplier, pkey);
+ return 0;
+ }
+
+ if (STR_EQ(str, "mirror size")) {
+ parse_int(&targ->mirror_size, pkey);
+ return 0;
+ }
+
set_errf("Option '%s' is not known", str);
/* fail on unknown keys */
return -1;
@@ -2168,10 +2300,14 @@ int prox_setup_rte(const char *prog_name)
sprintf(rte_arg[++argc], "-c%s", tmp);
rte_argv[argc] = rte_arg[argc];
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
+ uint32_t master_core = prox_cfg.master;
if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
- sprintf(rte_arg[++argc], "--master-lcore=%u", 0);
- else
- sprintf(rte_arg[++argc], "--master-lcore=%u", prox_cfg.master);
+ master_core = 0;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+ sprintf(rte_arg[++argc], "--master-lcore=%u", master_core);
+#else
+ sprintf(rte_arg[++argc], "--main-lcore=%u", master_core);
+#endif
rte_argv[argc] = rte_arg[argc];
#else
/* For old DPDK versions, the master core had to be the first
@@ -2234,7 +2370,7 @@ int prox_setup_rte(const char *prog_name)
if (ptr) {
*ptr++ = '\0';
}
- strcpy(rte_arg[++argc], ptr2);
+ prox_strncpy(rte_arg[++argc], ptr2, MAX_ARG_LEN);
rte_argv[argc] = rte_arg[argc];
}
}
diff --git a/VNFs/DPPD-PROX/prox_cksum.c b/VNFs/DPPD-PROX/prox_cksum.c
index 11d37a6c..401191f6 100644
--- a/VNFs/DPPD-PROX/prox_cksum.c
+++ b/VNFs/DPPD-PROX/prox_cksum.c
@@ -91,14 +91,14 @@ static inline void prox_write_tcp_pseudo_hdr(prox_rte_tcp_hdr *tcp, uint16_t len
inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload)
{
- prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & DEV_TX_OFFLOAD_IPV4_CKSUM);
+ prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
uint32_t l4_len = rte_bswap16(pip->total_length) - l3_len;
if (pip->next_proto_id == IPPROTO_UDP) {
prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t*)pip) + l3_len);
#ifndef SOFT_CRC
- if (cksum_offload & DEV_TX_OFFLOAD_UDP_CKSUM) {
- mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
+ if (cksum_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
prox_write_udp_pseudo_hdr(udp, l4_len, pip->src_addr, pip->dst_addr);
} else
#endif
@@ -106,9 +106,9 @@ inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *pip, uin
} else if (pip->next_proto_id == IPPROTO_TCP) {
prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t*)pip) + l3_len);
#ifndef SOFT_CRC
- if (cksum_offload & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ if (cksum_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
prox_write_tcp_pseudo_hdr(tcp, l4_len, pip->src_addr, pip->dst_addr);
- mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
} else
#endif
prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr);
diff --git a/VNFs/DPPD-PROX/prox_cksum.h b/VNFs/DPPD-PROX/prox_cksum.h
index 03be595a..d4ac5a6b 100644
--- a/VNFs/DPPD-PROX/prox_cksum.h
+++ b/VNFs/DPPD-PROX/prox_cksum.h
@@ -42,7 +42,7 @@ static void prox_ip_cksum_hw(struct rte_mbuf *mbuf, uint16_t l2_len, uint16_t l3
#else
mbuf->tx_offload = CALC_TX_OL(l2_len, l3_len);
#endif
- mbuf->ol_flags |= PKT_TX_IP_CKSUM;
+ mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
}
void prox_ip_cksum_sw(prox_rte_ipv4_hdr *buf);
diff --git a/VNFs/DPPD-PROX/prox_compat.h b/VNFs/DPPD-PROX/prox_compat.h
index 404ce9ed..d4b7f247 100644
--- a/VNFs/DPPD-PROX/prox_compat.h
+++ b/VNFs/DPPD-PROX/prox_compat.h
@@ -153,16 +153,6 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc
#define rte_cryptodev_sym_get_private_session_size rte_cryptodev_get_private_session_size
#endif
-#ifndef DEV_RX_OFFLOAD_JUMBO_FRAME
-#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
-#endif
-
-#ifndef DEV_RX_OFFLOAD_KEEP_CRC
-#ifndef DEV_RX_OFFLOAD_CRC_STRIP
-#define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000
-#endif
-#endif
-
#if RTE_VERSION < RTE_VERSION_NUM(19,2,0,0)
#define RTE_COLOR_GREEN e_RTE_METER_GREEN
#define RTE_COLOR_YELLOW e_RTE_METER_YELLOW
@@ -210,7 +200,7 @@ typedef struct icmp_hdr prox_rte_icmp_hdr;
#define PROX_RTE_IS_IPV4_MCAST IS_IPV4_MCAST
#define prox_rte_is_same_ether_addr is_same_ether_addr
#define prox_rte_is_zero_ether_addr is_zero_ether_addr
-#else
+#else // >= 19.08
#define PROX_RTE_ETHER_CRC_LEN RTE_ETHER_CRC_LEN
#define PROX_RTE_ETHER_MIN_LEN RTE_ETHER_MIN_LEN
@@ -229,7 +219,16 @@ typedef struct icmp_hdr prox_rte_icmp_hdr;
typedef struct rte_ipv6_hdr prox_rte_ipv6_hdr;
typedef struct rte_ipv4_hdr prox_rte_ipv4_hdr;
typedef struct rte_ether_addr prox_rte_ether_addr;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
typedef struct rte_ether_hdr prox_rte_ether_hdr;
+#else
+typedef struct prox_rte_ether_hdr
+{
+ struct rte_ether_addr d_addr; /**< Destination address. */
+ struct rte_ether_addr s_addr; /**< Source address. */
+ rte_be16_t ether_type; /**< Frame type. */
+} __rte_aligned(2) prox_rte_ether_hdr;
+#endif
typedef struct rte_vlan_hdr prox_rte_vlan_hdr;
typedef struct rte_vxlan_gpe_hdr prox_rte_vxlan_gpe_hdr;
typedef struct rte_udp_hdr prox_rte_udp_hdr;
@@ -286,4 +285,378 @@ static int prox_rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pa
#define prox_rte_eth_dev_count_avail() rte_eth_dev_count_avail()
#endif
+// deal with RTE_DEPRECATED symbols
+
+#if RTE_VERSION < RTE_VERSION_NUM(20,11,0,0)
+#define SKIP_MAIN SKIP_MASTER
+#define CALL_MAIN CALL_MASTER
+#define RTE_DEVTYPE_ALLOWED RTE_DEVTYPE_WHITELISTED_PCI
+#define RTE_DEVTYPE_BLOCKED RTE_DEVTYPE_BLACKLISTED_PCI
+#define RTE_LCORE_FOREACH_WORKER RTE_LCORE_FOREACH_SLAVE
+#if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,0)
+#define RTE_DEV_ALLOWED RTE_DEV_WHITELISTED
+#define RTE_DEV_BLOCKED RTE_DEV_BLACKLISTED
+#define RTE_BUS_SCAN_ALLOWLIST RTE_BUS_SCAN_WHITELIST
+#define RTE_BUS_SCAN_BLOCKLIST RTE_BUS_SCAN_BLACKLIST
+#endif
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(21,5,0,0)
+#define RTE_PCI_ANY_ID PCI_ANY_ID
+#define PKT_RX_OUTER_IP_CKSUM_BAD PKT_RX_EIP_CKSUM_BAD
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+#define RTE_MEMPOOL_HEADER_SIZE MEMPOOL_HEADER_SIZE
+#define RTE_MBUF_F_RX_RSS_HASH PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD PKT_RX_OUTER_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_IEEE1588_PTP PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_TX_QINQ PKT_TX_QINQ_PKT
+#define RTE_MBUF_F_TX_TCP_SEG PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4 PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6 PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN PKT_TX_VLAN_PKT
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4 PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6 PKT_TX_OUTER_IPV6
+#define RTE_MBUF_F_INDIRECT IND_ATTACHED_MBUF
+#define RTE_ETH_LINK_SPEED_AUTONEG ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G ETH_LINK_SPEED_100G
+#define RTE_ETH_SPEED_NUM_NONE ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G ETH_SPEED_NUM_100G
+#define RTE_ETH_LINK_HALF_DUPLEX ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG ETH_LINK_AUTONEG
+#define RTE_ETH_MQ_RX_RSS_FLAG ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG ETH_MQ_RX_VMDQ_FLAG
+#define RTE_ETH_MQ_RX_NONE ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_VMDQ_DCB_RSS ETH_MQ_RX_VMDQ_DCB_RSS
+#define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY ETH_MQ_TX_VMDQ_ONLY
+#define RTE_ETH_VLAN_TYPE_UNKNOWN ETH_VLAN_TYPE_UNKNOWN
+#define RTE_ETH_VLAN_TYPE_INNER ETH_VLAN_TYPE_INNER
+#define RTE_ETH_VLAN_TYPE_OUTER ETH_VLAN_TYPE_OUTER
+#define RTE_ETH_VLAN_TYPE_MAX ETH_VLAN_TYPE_MAX
+#define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4 ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6 ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_IP ETH_RSS_IP
+#define RTE_ETH_RSS_UDP ETH_RSS_UDP
+#define RTE_ETH_RSS_TCP ETH_RSS_TCP
+#define RTE_ETH_RSS_SCTP ETH_RSS_SCTP
+#define RTE_ETH_RSS_PROTO_MASK ETH_RSS_PROTO_MASK
+#define RTE_ETH_RSS_RETA_SIZE_64 ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_512 ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE RTE_RETA_GROUP_SIZE
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES ETH_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_PG_SUPPORT ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT ETH_DCB_PFC_SUPPORT
+#define RTE_ETH_VLAN_STRIP_OFFLOAD ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_VLAN_STRIP_MASK ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_VLAN_ID_MAX ETH_VLAN_ID_MAX
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR ETH_NUM_RECEIVE_MAC_ADDR
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST ETH_VMDQ_ACCEPT_MULTICAST
+#define RTE_ETH_4_TCS ETH_4_TCS
+#define RTE_ETH_8_TCS ETH_8_TCS
+#define RTE_ETH_8_POOLS ETH_8_POOLS
+#define RTE_ETH_16_POOLS ETH_16_POOLS
+#define RTE_ETH_32_POOLS ETH_32_POOLS
+#define RTE_ETH_64_POOLS ETH_64_POOLS
+#define RTE_ETH_FC_NONE RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL RTE_FC_FULL
+#define RTE_ETH_TUNNEL_TYPE_NONE RTE_TUNNEL_TYPE_NONE
+#define RTE_ETH_TUNNEL_TYPE_VXLAN RTE_TUNNEL_TYPE_VXLAN
+#define RTE_ETH_TUNNEL_TYPE_GENEVE RTE_TUNNEL_TYPE_GENEVE
+#define RTE_ETH_TUNNEL_TYPE_TEREDO RTE_TUNNEL_TYPE_TEREDO
+#define RTE_ETH_TUNNEL_TYPE_NVGRE RTE_TUNNEL_TYPE_NVGRE
+#define RTE_ETH_TUNNEL_TYPE_IP_IN_GRE RTE_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_ETH_L2_TUNNEL_TYPE_E_TAG RTE_L2_TUNNEL_TYPE_E_TAG
+#define RTE_ETH_TUNNEL_TYPE_MAX RTE_TUNNEL_TYPE_MAX
+#define RTE_ETH_FDIR_PBALLOC_64K RTE_FDIR_PBALLOC_64K
+#define RTE_ETH_FDIR_PBALLOC_128K RTE_FDIR_PBALLOC_128K
+#define RTE_ETH_FDIR_PBALLOC_256K RTE_FDIR_PBALLOC_256K
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_DCB_NUM_TCS ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL ETH_MAX_VMDQ_POOL
+#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
+#define RTE_MEMPOOL_REGISTER_OPS MEMPOOL_REGISTER_OPS
+#define RTE_MBUF_F_RX_VLAN_STRIPPED PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_QINQ_STRIPPED PKT_RX_QINQ_STRIPPED
+#define RTE_ETH_RSS_PORT ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE ETH_RSS_NVGRE
+#define RTE_ETH_RSS_TUNNEL ETH_RSS_TUNNEL
+#define RTE_ETH_RSS_RETA_SIZE_256 ETH_RSS_RETA_SIZE_256
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(16,11,0,0)
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_LRO PKT_RX_LRO
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN PKT_TX_TUNNEL_VXLAN
+#define RTE_MBUF_F_TX_TUNNEL_GRE PKT_TX_TUNNEL_GRE
+#define RTE_MBUF_F_TX_TUNNEL_IPIP PKT_TX_TUNNEL_IPIP
+#define RTE_MBUF_F_TX_TUNNEL_GENEVE PKT_TX_TUNNEL_GENEVE
+#define RTE_MBUF_F_TX_TUNNEL_MASK PKT_TX_TUNNEL_MASK
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO DEV_TX_OFFLOAD_GENEVE_TNL_TSO
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,2,0,0)
+#define RTE_MBUF_F_TX_MACSEC PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_OFFLOAD_MASK PKT_TX_OFFLOAD_MASK
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT DEV_TX_OFFLOAD_MACSEC_INSERT
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,0)
+#define RTE_MBUF_F_TX_TUNNEL_MPLSINUDP PKT_TX_TUNNEL_MPLSINUDP
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE DEV_TX_OFFLOAD_MT_LOCKFREE
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0)
+#define RTE_MBUF_F_RX_VLAN PKT_RX_VLAN
+#define RTE_MBUF_F_RX_SEC_OFFLOAD PKT_RX_SEC_OFFLOAD
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED PKT_RX_SEC_OFFLOAD_FAILED
+#define RTE_MBUF_F_RX_QINQ PKT_RX_QINQ
+#define RTE_MBUF_F_TX_SEC_OFFLOAD PKT_TX_SEC_OFFLOAD
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT DEV_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM DEV_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN DEV_RX_OFFLOAD_VLAN
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE DEV_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY DEV_TX_OFFLOAD_SECURITY
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,2,0,0)
+#define RTE_MBUF_F_TX_UDP_SEG PKT_TX_UDP_SEG
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE PKT_TX_TUNNEL_VXLAN_GPE
+#define RTE_MBUF_F_TX_TUNNEL_IP PKT_TX_TUNNEL_IP
+#define RTE_MBUF_F_TX_TUNNEL_UDP PKT_TX_TUNNEL_UDP
+#define RTE_MBUF_F_EXTERNAL EXT_ATTACHED_MBUF
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO DEV_TX_OFFLOAD_IP_TNL_TSO
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,11,0,0)
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK PKT_RX_OUTER_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN PKT_RX_OUTER_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD PKT_RX_OUTER_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD PKT_RX_OUTER_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID PKT_RX_OUTER_L4_CKSUM_INVALID
+#define RTE_MBUF_F_TX_OUTER_UDP_CKSUM PKT_TX_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,5,0,0)
+#define RTE_ETH_TUNNEL_TYPE_VXLAN_GPE RTE_TUNNEL_TYPE_VXLAN_GPE
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,8,0,0)
+#define RTE_ETH_QINQ_STRIP_OFFLOAD ETH_QINQ_STRIP_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_MASK ETH_QINQ_STRIP_MASK
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#define RTE_MBUF_DYNFLAG_RX_METADATA PKT_RX_DYNF_METADATA
+#define RTE_MBUF_DYNFLAG_TX_METADATA PKT_TX_DYNF_METADATA
+#define RTE_MBUF_F_FIRST_FREE PKT_FIRST_FREE
+#define RTE_MBUF_F_LAST_FREE PKT_LAST_FREE
+#define RTE_MBUF_F_TX_TUNNEL_GTP PKT_TX_TUNNEL_GTP
+#define RTE_ETH_RSS_GTPU ETH_RSS_GTPU
+#define RTE_ETH_RSS_L3_SRC_ONLY ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH DEV_RX_OFFLOAD_RSS_HASH
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,5,0,0)
+#define RTE_ETH_LINK_SPEED_200G ETH_LINK_SPEED_200G
+#define RTE_ETH_SPEED_NUM_200G ETH_SPEED_NUM_200G
+#define RTE_ETH_RSS_ETH ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP ETH_RSS_ESP
+#define RTE_ETH_RSS_AH ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3 ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP ETH_RSS_PFCP
+#define RTE_ETH_RSS_L2_SRC_ONLY ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY ETH_RSS_L2_DST_ONLY
+#define RTE_ETH_RSS_VLAN ETH_RSS_VLAN
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,8,0,0)
+#define RTE_ETH_RSS_PPPOE ETH_RSS_PPPOE
+#define RTE_ETH_RSS_IPV6_PRE32 ETH_RSS_IPV6_PRE32
+#define RTE_ETH_RSS_IPV6_PRE40 ETH_RSS_IPV6_PRE40
+#define RTE_ETH_RSS_IPV6_PRE48 ETH_RSS_IPV6_PRE48
+#define RTE_ETH_RSS_IPV6_PRE56 ETH_RSS_IPV6_PRE56
+#define RTE_ETH_RSS_IPV6_PRE64 ETH_RSS_IPV6_PRE64
+#define RTE_ETH_RSS_IPV6_PRE96 ETH_RSS_IPV6_PRE96
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ETH_RSS_IPV6_PRE32_UDP
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ETH_RSS_IPV6_PRE40_UDP
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ETH_RSS_IPV6_PRE48_UDP
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ETH_RSS_IPV6_PRE56_UDP
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ETH_RSS_IPV6_PRE64_UDP
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ETH_RSS_IPV6_PRE96_UDP
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ETH_RSS_IPV6_PRE32_TCP
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ETH_RSS_IPV6_PRE40_TCP
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ETH_RSS_IPV6_PRE48_TCP
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ETH_RSS_IPV6_PRE56_TCP
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ETH_RSS_IPV6_PRE64_TCP
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ETH_RSS_IPV6_PRE96_TCP
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ETH_RSS_IPV6_PRE32_SCTP
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ETH_RSS_IPV6_PRE40_SCTP
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ETH_RSS_IPV6_PRE48_SCTP
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ETH_RSS_IPV6_PRE56_SCTP
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ETH_RSS_IPV6_PRE64_SCTP
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ETH_RSS_IPV6_PRE96_SCTP
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+#define RTE_ETH_SPEED_NUM_UNKNOWN ETH_SPEED_NUM_UNKNOWN
+#define RTE_ETH_RSS_ECPRI ETH_RSS_ECPRI
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT ETH_RSS_LEVEL_PMD_DEFAULT
+#define RTE_ETH_RSS_LEVEL_OUTERMOST ETH_RSS_LEVEL_OUTERMOST
+#define RTE_ETH_RSS_LEVEL_INNERMOST ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK ETH_RSS_LEVEL_MASK
+#define RTE_ETH_RSS_LEVEL ETH_RSS_LEVEL
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(21,2,0,0)
+#define RTE_ETH_RSS_MPLS ETH_RSS_MPLS
+#define RTE_ETH_TUNNEL_TYPE_ECPRI RTE_TUNNEL_TYPE_ECPRI
+#endif
+
+#ifndef DEV_RX_OFFLOAD_JUMBO_FRAME
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME 0x00000800
+#else
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME DEV_RX_OFFLOAD_JUMBO_FRAME
+#endif
+
+#ifndef DEV_RX_OFFLOAD_KEEP_CRC
+#ifndef DEV_RX_OFFLOAD_CRC_STRIP
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP 0x00001000
+#else
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP DEV_RX_OFFLOAD_CRC_STRIP
+#endif
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_KEEP_CRC
+
+#else
+#ifndef DEV_RX_OFFLOAD_CRC_STRIP
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_CRC_STRIP
+#else
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP DEV_RX_OFFLOAD_CRC_STRIP
+#endif
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC DEV_RX_OFFLOAD_KEEP_CRC
+#endif
+
+#else // >= 21.11
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME RTE_BIT64(11)
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_CRC_STRIP
+#endif
+
#endif // _PROX_COMPAT_H
diff --git a/VNFs/DPPD-PROX/prox_globals.h b/VNFs/DPPD-PROX/prox_globals.h
index b09f3a52..7463ded5 100644
--- a/VNFs/DPPD-PROX/prox_globals.h
+++ b/VNFs/DPPD-PROX/prox_globals.h
@@ -18,6 +18,7 @@
#define MAX_TASKS_PER_CORE 8
#define MAX_SOCKETS 64
#define MAX_NAME_SIZE 64
+#define MAX_NAME_BUFFER_SIZE 128
#define MAX_PROTOCOLS 3
#define MAX_RINGS_PER_TASK (MAX_WT_PER_LB*MAX_PROTOCOLS)
#define MAX_WT_PER_LB 64
diff --git a/VNFs/DPPD-PROX/prox_ipv6.c b/VNFs/DPPD-PROX/prox_ipv6.c
index 9425f4a0..f8ec147f 100644
--- a/VNFs/DPPD-PROX/prox_ipv6.c
+++ b/VNFs/DPPD-PROX/prox_ipv6.c
@@ -119,17 +119,34 @@ void create_mac_from_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac)
mac->addr_bytes[0] = mac->addr_bytes[0] ^ 0x02;
memcpy(&mac->addr_bytes[3], &ipv6_addr->bytes[13], 3);
}
-void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix)
+
+static inline prox_rte_ipv6_hdr *prox_set_vlan_ipv6(prox_rte_ether_hdr *peth, uint16_t vlan)
+{
+ prox_rte_ipv6_hdr *ipv6_hdr;
+
+ if (vlan) {
+ prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)(peth + 1);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
+ peth->ether_type = ETYPE_VLAN;
+ vlan_hdr->eth_proto = ETYPE_IPv6;
+ vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan);
+ } else {
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
+ peth->ether_type = ETYPE_IPv6;
+ }
+ return ipv6_hdr;
+}
+
+void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix, uint16_t vlan)
{
prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
init_mbuf_seg(mbuf);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); // Software calculates the checksum
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_nodes_mac_addr, sizeof(prox_rte_ether_addr));
memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
- peth->ether_type = ETYPE_IPv6;
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
ipv6_hdr->vtc_flow = 0x00000060;
ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RA) + sizeof(struct icmpv6_prefix_option));
ipv6_hdr->proto = ICMPv6;
@@ -165,22 +182,21 @@ void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_ad
router_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_advertisement);
uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
- rte_pktmbuf_pkt_len(mbuf) = pktlen;
- rte_pktmbuf_data_len(mbuf) = pktlen;
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}
-void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr)
+void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, uint16_t vlan)
{
prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
init_mbuf_seg(mbuf);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); // Software calculates the checksum
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_routers_mac_addr, sizeof(prox_rte_ether_addr));
memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
- peth->ether_type = ETYPE_IPv6;
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
ipv6_hdr->vtc_flow = 0x00000060;
ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RS));
ipv6_hdr->proto = ICMPv6;
@@ -198,24 +214,24 @@ void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_ad
router_sollicitation->checksum = 0;
router_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_sollicitation);
uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
- rte_pktmbuf_pkt_len(mbuf) = pktlen;
- rte_pktmbuf_data_len(mbuf) = pktlen;
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}
-void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src)
+void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src, uint16_t vlan)
{
prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
prox_rte_ether_addr mac_dst;
set_mcast_mac_from_ipv6(&mac_dst, dst);
init_mbuf_seg(mbuf);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); // Software calculates the checksum
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
memcpy(peth->d_addr.addr_bytes, &mac_dst, sizeof(prox_rte_ether_addr));
memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
- peth->ether_type = ETYPE_IPv6;
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
+
ipv6_hdr->vtc_flow = 0x00000060;
ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NS));
ipv6_hdr->proto = ICMPv6;
@@ -235,20 +251,21 @@ void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s
neighbour_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_sollicitation);
uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
- rte_pktmbuf_pkt_len(mbuf) = pktlen;
- rte_pktmbuf_data_len(mbuf) = pktlen;
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
}
-void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target, struct ipv6_addr *src_ipv6_addr, int sollicited)
+void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target, struct ipv6_addr *src_ipv6_addr, int sollicited, uint16_t vlan)
{
struct task_master *task = (struct task_master *)tbase;
prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
uint8_t port_id = get_port(mbuf);
init_mbuf_seg(mbuf);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); // Software calculates the checksum
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
+
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
// If source mac is null, use all_nodes_mac_addr.
if ((!sollicited) || (memcmp(peth->s_addr.addr_bytes, &null_addr, sizeof(struct ipv6_addr)) == 0)) {
@@ -260,7 +277,6 @@ void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbu
}
memcpy(peth->s_addr.addr_bytes, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));
- peth->ether_type = ETYPE_IPv6;
ipv6_hdr->vtc_flow = 0x00000060;
ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NA));
@@ -297,6 +313,27 @@ void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbu
neighbour_advertisement->checksum = 0;
neighbour_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_advertisement);
uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
- rte_pktmbuf_pkt_len(mbuf) = pktlen;
- rte_pktmbuf_data_len(mbuf) = pktlen;
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
+}
+
+prox_rte_ipv6_hdr *prox_get_ipv6_hdr(prox_rte_ether_hdr *hdr, uint16_t len, uint16_t *vlan)
+{
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
+ uint16_t ether_type = hdr->ether_type;
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
+
+ while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)hdr + l2_len);
+ l2_len +=4;
+ ether_type = vlan_hdr->eth_proto;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
+ }
+ if (ether_type == ETYPE_IPv6)
+ return ipv6_hdr;
+ else
+ return NULL;
}
diff --git a/VNFs/DPPD-PROX/prox_ipv6.h b/VNFs/DPPD-PROX/prox_ipv6.h
index 48030054..e2ae7d61 100644
--- a/VNFs/DPPD-PROX/prox_ipv6.h
+++ b/VNFs/DPPD-PROX/prox_ipv6.h
@@ -132,9 +132,10 @@ void set_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac);
void create_mac_from_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac);
struct task_base;
-void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr);
-void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix);
-void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src);
-void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target_mac, struct ipv6_addr *ipv6_addr, int sollicited);
+prox_rte_ipv6_hdr *prox_get_ipv6_hdr(prox_rte_ether_hdr *hdr, uint16_t len, uint16_t *vlan);
+void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, uint16_t vlan);
+void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix, uint16_t vlan);
+void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src, uint16_t vlan);
+void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target_mac, struct ipv6_addr *ipv6_addr, int sollicited, uint16_t vlan);
#endif /* _PROX_IP_V6_H_ */
diff --git a/VNFs/DPPD-PROX/prox_lua_types.h b/VNFs/DPPD-PROX/prox_lua_types.h
index ce6bd9d2..83cc73cd 100644
--- a/VNFs/DPPD-PROX/prox_lua_types.h
+++ b/VNFs/DPPD-PROX/prox_lua_types.h
@@ -73,7 +73,7 @@ struct ipv6_tun_binding_entry {
prox_rte_ether_addr next_hop_mac; // mac addr of next hop towards lwB4
uint32_t public_ipv4; // Public IPv4 address
uint16_t public_port; // Public base port (together with port mask, defines the Port Set)
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
struct ipv6_tun_binding_table {
uint32_t num_binding_entries;
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c
index b8b80a12..3b7f778d 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.c
+++ b/VNFs/DPPD-PROX/prox_port_cfg.c
@@ -173,13 +173,14 @@ static inline uint32_t get_netmask(uint8_t prefix)
return rte_cpu_to_be_32(~((1 << (32 - prefix)) - 1));
}
-static void set_ip_address(char *devname, uint32_t *ip, uint8_t prefix)
+static void set_ip_address(char *devname, uint32_t ip, uint8_t prefix)
{
struct ifreq ifreq;
struct sockaddr_in in_addr;
int fd, rc;
uint32_t netmask = get_netmask(prefix);
plog_info("Setting netmask to %x\n", netmask);
+ uint32_t ip_cpu = rte_be_to_cpu_32(ip);
fd = socket(AF_INET, SOCK_DGRAM, 0);
@@ -187,12 +188,12 @@ static void set_ip_address(char *devname, uint32_t *ip, uint8_t prefix)
memset(&in_addr, 0, sizeof(struct sockaddr_in));
in_addr.sin_family = AF_INET;
- in_addr.sin_addr = *(struct in_addr *)ip;
+ in_addr.sin_addr = *(struct in_addr *)&ip_cpu;
- strncpy(ifreq.ifr_name, devname, IFNAMSIZ);
+ prox_strncpy(ifreq.ifr_name, devname, IFNAMSIZ);
ifreq.ifr_addr = *(struct sockaddr *)&in_addr;
rc = ioctl(fd, SIOCSIFADDR, &ifreq);
- PROX_PANIC(rc < 0, "Failed to set IP address %x on device %s: error = %d (%s)\n", *ip, devname, errno, strerror(errno));
+ PROX_PANIC(rc < 0, "Failed to set IP address %x on device %s: error = %d (%s)\n", ip_cpu, devname, errno, strerror(errno));
in_addr.sin_addr = *(struct in_addr *)&netmask;
ifreq.ifr_netmask = *(struct sockaddr *)&in_addr;
@@ -210,15 +211,26 @@ void init_rte_dev(int use_dummy_devices)
const struct rte_pci_device *pci_dev;
for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
+ if (prox_port_cfg[port_id].active && (prox_port_cfg[port_id].virtual == 0) && (port_id >= prox_rte_eth_dev_count_avail())) {
+ PROX_PANIC(1, "port %u used but only %u available\n", port_id, prox_rte_eth_dev_count_avail());
+ }
+ }
+ for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
if (!prox_port_cfg[port_id].active) {
continue;
}
struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
+
+ prox_port_cfg[port_id].n_vlans = 0;
+ while ((prox_port_cfg[port_id].n_vlans < PROX_MAX_VLAN_TAGS) && (prox_port_cfg[port_id].vlan_tags[prox_port_cfg[port_id].n_vlans])) {
+ prox_port_cfg[port_id].n_vlans++;
+ }
+
if (port_cfg->vdev[0]) {
- char name[MAX_NAME_SIZE], tap[MAX_NAME_SIZE];
+ char name[MAX_NAME_BUFFER_SIZE], tap[MAX_NAME_SIZE];
snprintf(tap, MAX_NAME_SIZE, "net_tap%d", port_id);
#if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1))
- snprintf(name, MAX_NAME_SIZE, "iface=%s", port_cfg->vdev);
+ snprintf(name, MAX_NAME_BUFFER_SIZE, "iface=%s", port_cfg->vdev);
rc = rte_vdev_init(tap, name);
#else
PROX_PANIC(1, "vdev not supported in DPDK < 17.05\n");
@@ -231,20 +243,30 @@ void init_rte_dev(int use_dummy_devices)
prox_port_cfg[vdev_port_id].active = 1;
prox_port_cfg[vdev_port_id].dpdk_mapping = port_id;
prox_port_cfg[vdev_port_id].n_txq = 1;
+ prox_port_cfg[vdev_port_id].n_vlans = prox_port_cfg[port_id].n_vlans;
- if (prox_port_cfg[port_id].vlan_tag) {
+ for (uint32_t tag_id = 0; tag_id < prox_port_cfg[port_id].n_vlans; tag_id++) {
+ prox_port_cfg[vdev_port_id].vlan_tags[tag_id] = prox_port_cfg[port_id].vlan_tags[tag_id];
char command[1024];
- snprintf(prox_port_cfg[vdev_port_id].name, MAX_NAME_SIZE, "%s_%d", port_cfg->vdev, prox_port_cfg[port_id].vlan_tag);
- sprintf(command, "ip link add link %s name %s type vlan id %d", port_cfg->vdev, prox_port_cfg[vdev_port_id].name, prox_port_cfg[port_id].vlan_tag);
+ snprintf(prox_port_cfg[vdev_port_id].names[tag_id], MAX_NAME_BUFFER_SIZE, "%s_%d", port_cfg->vdev, prox_port_cfg[port_id].vlan_tags[tag_id]);
+ sprintf(command, "ip link add link %s name %s type vlan id %d", port_cfg->vdev, prox_port_cfg[vdev_port_id].names[tag_id], prox_port_cfg[port_id].vlan_tags[tag_id]);
system(command);
- plog_info("Running %s\n", command);
- plog_info("Using vlan tag %d - added device %s\n", prox_port_cfg[port_id].vlan_tag, prox_port_cfg[vdev_port_id].name);
- } else
- strncpy(prox_port_cfg[vdev_port_id].name, port_cfg->vdev, MAX_NAME_SIZE);
+ plog_info("\tRunning %s\n", command);
+ plog_info("\tUsing vlan tag %d - added device %s\n", prox_port_cfg[port_id].vlan_tags[tag_id], prox_port_cfg[vdev_port_id].names[tag_id]);
+ }
+ if (prox_port_cfg[port_id].n_vlans == 0) {
+ strncpy(prox_port_cfg[vdev_port_id].names[0], port_cfg->vdev, MAX_NAME_SIZE);
+ prox_port_cfg[vdev_port_id].n_vlans = 1;
+ prox_port_cfg[vdev_port_id].vlan_tags[0] = 0;
+ }
prox_port_cfg[port_id].dpdk_mapping = vdev_port_id;
- prox_port_cfg[vdev_port_id].ip = rte_be_to_cpu_32(prox_port_cfg[port_id].ip);
- prox_port_cfg[vdev_port_id].prefix = prox_port_cfg[port_id].prefix;
+ uint32_t i = 0;
+ while ((i < PROX_MAX_VLAN_TAGS) && (prox_port_cfg[port_id].ip_addr[i].ip)) {
+ prox_port_cfg[vdev_port_id].ip_addr[i].ip = prox_port_cfg[port_id].ip_addr[i].ip;
+ prox_port_cfg[vdev_port_id].ip_addr[i].prefix = prox_port_cfg[port_id].ip_addr[i].prefix;
+ i++;
+ }
prox_port_cfg[vdev_port_id].type = prox_port_cfg[port_id].type;
if (prox_port_cfg[vdev_port_id].type == PROX_PORT_MAC_HW) {
// If DPDK port MAC set to HW, then make sure the vdev has the same MAC as DPDK port
@@ -255,6 +277,10 @@ void init_rte_dev(int use_dummy_devices)
} else
memcpy(&prox_port_cfg[vdev_port_id].eth_addr, &prox_port_cfg[port_id].eth_addr, sizeof(prox_port_cfg[port_id].eth_addr));
}
+ if (prox_port_cfg[port_id].n_vlans == 0) {
+ prox_port_cfg[port_id].n_vlans = 1;
+ prox_port_cfg[port_id].vlan_tags[0] = 0;
+ }
}
nb_ports = prox_rte_eth_dev_count_avail();
/* get available ports configuration */
@@ -359,6 +385,11 @@ void init_rte_dev(int use_dummy_devices)
if ((ptr = strstr(port_cfg->short_name, "_pmd")) != NULL) {
*ptr = '\x0';
}
+ // Set socket for vdev device identical to socket of corresponding port
+ if (prox_port_cfg[port_id].is_vdev) {
+ prox_port_cfg[port_id].socket = prox_port_cfg[prox_port_cfg[port_id].dpdk_mapping].socket;
+ continue;
+ }
#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
pci_dev = dev_info.pci_dev;
@@ -388,20 +419,20 @@ void init_rte_dev(int use_dummy_devices)
}
// In DPDK 18.08 vmxnet3 reports it supports IPV4 checksum, but packets does not go through when IPv4 cksum is enabled
- if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
+ if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
plog_info("\t\tDisabling IPV4 cksum on vmxnet3\n");
- port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
}
- if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
plog_info("\t\tDisabling UDP cksum on vmxnet3\n");
- port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
}
// Some OVS versions reports that they support UDP offload and no IPv4 offload, but fails when UDP offload is enabled
if ((!strcmp(port_cfg->short_name, "virtio")) &&
- ((port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) &&
- (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ ((port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) &&
+ (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
plog_info("\t\tDisabling UDP cksum on virtio\n");
- port_cfg->disabled_tx_offload |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
}
}
}
@@ -425,7 +456,7 @@ uint8_t init_rte_ring_dev(void)
struct rte_ring* tx_ring = rte_ring_lookup(port_cfg->tx_ring);
PROX_PANIC(tx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->tx_ring, port_id);
- int ret = rte_eth_from_rings(port_cfg->name, &rx_ring, 1, &tx_ring, 1, rte_socket_id());
+ int ret = rte_eth_from_rings(port_cfg->names[0], &rx_ring, 1, &tx_ring, 1, rte_socket_id());
PROX_PANIC(ret != 0, "Failed to create eth_dev from rings for port %d\n", port_id);
port_cfg->port_conf.intr_conf.lsc = 0; /* Link state interrupt not supported for ring-backed ports */
@@ -443,7 +474,7 @@ static void print_port_capa(struct prox_port_cfg *port_cfg)
port_id = port_cfg - prox_port_cfg;
plog_info("\t*** Initializing port %u ***\n", port_id);
- plog_info("\t\tPort name is set to %s\n", port_cfg->name);
+ plog_info("\t\tPort name is set to %s\n", port_cfg->names[0]);
plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
@@ -455,84 +486,86 @@ static void print_port_capa(struct prox_port_cfg *port_cfg)
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
plog_info("VLAN STRIP | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
plog_info("IPV4 CKSUM | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM)
plog_info("UDP CKSUM | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
plog_info("TCP CKSUM | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
plog_info("TCP LRO | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
plog_info("QINQ STRIP | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)
plog_info("OUTER_IPV4_CKSUM | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_MACSEC_STRIP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_MACSEC_STRIP)
plog_info("MACSEC STRIP | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_HEADER_SPLIT)
+#if defined(RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
plog_info("HEADER SPLIT | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_FILTER)
+#endif
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
plog_info("VLAN FILTER | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
plog_info("VLAN EXTEND | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
plog_info("JUMBO FRAME | ");
-#if defined(DEV_RX_OFFLOAD_CRC_STRIP)
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_CRC_STRIP)
+#if defined(RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_CRC_STRIP)
plog_info("CRC STRIP | ");
#endif
-#if defined(DEV_RX_OFFLOAD_KEEP_CRC)
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC)
+#if defined(RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
plog_info("KEEP CRC | ");
#endif
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCATTER)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_SCATTER)
plog_info("SCATTER | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
plog_info("TIMESTAMP | ");
- if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_SECURITY)
plog_info("SECURITY ");
plog_info("\n");
plog_info("\t\tTX offload capa = 0x%lx = ", port_cfg->dev_info.tx_offload_capa);
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
plog_info("VLAN INSERT | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
plog_info("IPV4 CKSUM | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
plog_info("UDP CKSUM | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
plog_info("TCP CKSUM | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
plog_info("SCTP CKSUM | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO)
plog_info("TCP TS0 | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TSO)
plog_info("UDP TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
plog_info("OUTER IPV4 CKSUM | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
plog_info("QINQ INSERT | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)
plog_info("VLAN TNL TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)
plog_info("GRE TNL TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)
plog_info("IPIP TNL TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
plog_info("GENEVE TNL TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
plog_info("MACSEC INSERT | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MT_LOCKFREE)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)
plog_info("MT LOCKFREE | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
plog_info("MULTI SEG | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SECURITY)
plog_info("SECURITY | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)
plog_info("UDP TNL TSO | ");
- if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO)
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)
plog_info("IP TNL TSO | ");
plog_info("\n");
@@ -557,30 +590,30 @@ static void get_max_link_speed(struct prox_port_cfg *port_cfg)
// or rte_eth_link_get_nowait) might be reported too late
// and might result in wrong exrapolation, and hence should not be used
// for extrapolation purposes
- if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_100G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_100G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_56G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_56G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_50G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_50G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_40G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_40G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_25G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_25G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_20G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_20G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_10G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_10G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_5G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_5G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_2_5G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_2_5G;
- else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_1G)
- port_cfg->max_link_speed = ETH_SPEED_NUM_1G;
- else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M))
- port_cfg->max_link_speed = ETH_SPEED_NUM_100M;
- else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M))
- port_cfg->max_link_speed = ETH_SPEED_NUM_10M;
+ if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_100G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_100G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_56G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_56G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_50G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_50G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_40G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_40G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_25G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_25G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_20G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_20G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_10G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_10G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_5G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_5G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_2_5G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_2_5G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_1G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_1G;
+ else if (port_cfg->dev_info.speed_capa & (RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M))
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_100M;
+ else if (port_cfg->dev_info.speed_capa & (RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M))
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_10M;
}
#endif
@@ -638,48 +671,54 @@ static void init_port(struct prox_port_cfg *port_cfg)
if (port_cfg->n_rxq > 1) {
// Enable RSS if multiple receive queues
- port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
+ if (strcmp(port_cfg->short_name, "virtio")) {
+ port_cfg->port_conf.rxmode.mq_mode |= RTE_ETH_MQ_RX_RSS;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP|ETH_RSS_UDP;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP|RTE_ETH_RSS_UDP;
#else
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
#endif
+ }
}
// Make sure that the requested RSS offload is supported by the PMD
#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf &= port_cfg->dev_info.flow_type_rss_offloads;
#endif
- plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx, supported 0x%lx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, ETH_RSS_IP|ETH_RSS_UDP, port_cfg->dev_info.flow_type_rss_offloads);
+ if (strcmp(port_cfg->short_name, "virtio")) {
+ plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx, supported 0x%lx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, RTE_ETH_RSS_IP|RTE_ETH_RSS_UDP, port_cfg->dev_info.flow_type_rss_offloads);
+ } else {
+ plog_info("\t\t Not enabling RSS on virtio port");
+ }
// rxmode such as hw src strip
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
-#if defined (DEV_RX_OFFLOAD_CRC_STRIP)
- CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_CRC_STRIP);
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_CRC_STRIP);
#endif
-#if defined (DEV_RX_OFFLOAD_KEEP_CRC)
- CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_KEEP_CRC);
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_KEEP_CRC);
#endif
- CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_JUMBO_FRAME);
- CONFIGURE_RX_OFFLOAD(DEV_RX_OFFLOAD_VLAN_STRIP);
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
#else
- if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP) {
+ if (port_cfg->requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
port_cfg->port_conf.rxmode.hw_strip_crc = 1;
}
- if (port_cfg->requested_rx_offload & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (port_cfg->requested_rx_offload & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
port_cfg->port_conf.rxmode.jumbo_frame = 1;
}
#endif
// IPV4, UDP, SCTP Checksums
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
- CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_IPV4_CKSUM);
- CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_UDP_CKSUM);
- CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_VLAN_INSERT);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
#else
- if ((port_cfg->dev_info.tx_offload_capa & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) == 0) {
+ if ((port_cfg->dev_info.tx_offload_capa & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) == 0) {
port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n");
}
@@ -690,7 +729,7 @@ static void init_port(struct prox_port_cfg *port_cfg)
#endif
// Multi Segments
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
- CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MULTI_SEGS);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
#else
if (!strcmp(port_cfg->short_name, "vmxnet3")) {
port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
@@ -708,7 +747,7 @@ static void init_port(struct prox_port_cfg *port_cfg)
// Refcount
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
- CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
#else
if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT)
plog_info("\t\tEnabling No refcnt on port %d\n", port_id);
@@ -796,8 +835,10 @@ static void init_port(struct prox_port_cfg *port_cfg)
PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_start() failed on port %u: error %d\n", port_id, ret);
plog_info(" done: ");
- if ((prox_port_cfg[port_id].ip) && (prox_port_cfg[port_id].is_vdev)) {
- set_ip_address(prox_port_cfg[port_id].name, &prox_port_cfg[port_id].ip, prox_port_cfg[port_id].prefix);
+ if (prox_port_cfg[port_id].is_vdev) {
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[port_id].n_vlans; vlan_id++) {
+ set_ip_address(prox_port_cfg[port_id].names[vlan_id], prox_port_cfg[port_id].ip_addr[vlan_id].ip, prox_port_cfg[port_id].ip_addr[vlan_id].prefix);
+ }
}
/* Getting link status can be done without waiting if Link
State Interrupt is enabled since in that case, if the link
@@ -814,7 +855,7 @@ static void init_port(struct prox_port_cfg *port_cfg)
if (link.link_status) {
plog_info("Link Up - speed %'u Mbps - %s\n",
link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"full-duplex" : "half-duplex");
}
else {
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h
index 9d025999..82d58f76 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.h
+++ b/VNFs/DPPD-PROX/prox_port_cfg.h
@@ -21,19 +21,25 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(22,11,0,0)
+#include <bus_pci_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#else
#if RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0)
#include <rte_bus_pci.h>
#endif
+#endif
#include <rte_pci.h>
#include "prox_compat.h"
#include "prox_globals.h"
+#include "ip_subnet.h"
enum addr_type {PROX_PORT_MAC_HW, PROX_PORT_MAC_SET, PROX_PORT_MAC_RAND};
#define IPV4_CKSUM 1
#define UDP_CKSUM 2
#define NB_MCAST_ADDR 16
+#define PROX_MAX_VLAN_TAGS 256
struct prox_port_cfg {
struct rte_mempool *pool[32]; /* Rx/Tx mempool */
@@ -55,7 +61,7 @@ struct prox_port_cfg {
uint32_t mtu;
enum addr_type type;
prox_rte_ether_addr eth_addr; /* port MAC address */
- char name[MAX_NAME_SIZE];
+ char names[PROX_MAX_VLAN_TAGS][MAX_NAME_BUFFER_SIZE];
char vdev[MAX_NAME_SIZE];
char short_name[MAX_NAME_SIZE];
char driver_name[MAX_NAME_SIZE];
@@ -82,11 +88,14 @@ struct prox_port_cfg {
uint8_t available;
prox_rte_ether_addr mc_addr[NB_MCAST_ADDR];
int dpdk_mapping;
- uint32_t ip;
- int fd;
- uint32_t vlan_tag;
- uint8_t prefix;
+ struct ip4_subnet ip_addr[PROX_MAX_VLAN_TAGS];
+ int fds[PROX_MAX_VLAN_TAGS];
+ uint32_t vlan_tags[PROX_MAX_VLAN_TAGS];
uint8_t is_vdev;
+ uint8_t virtual;
+ uint8_t all_rx_queues;
+ uint16_t n_vlans;
+ uint32_t v6_mask_length;
};
extern rte_atomic32_t lsc;
diff --git a/VNFs/DPPD-PROX/prox_shared.c b/VNFs/DPPD-PROX/prox_shared.c
index 52f4eb18..de26441d 100644
--- a/VNFs/DPPD-PROX/prox_shared.c
+++ b/VNFs/DPPD-PROX/prox_shared.c
@@ -55,6 +55,7 @@ static void prox_sh_create_hash(struct prox_shared *ps, size_t size)
{
param.entries = size;
param.name = get_sh_name();
+ param.socket_id = rte_socket_id();
ps->hash = rte_hash_create(&param);
PROX_PANIC(ps->hash == NULL, "Failed to create hash table for shared data");
ps->size = size;
diff --git a/VNFs/DPPD-PROX/qinq.h b/VNFs/DPPD-PROX/qinq.h
index 1d11114d..03c89b9b 100644
--- a/VNFs/DPPD-PROX/qinq.h
+++ b/VNFs/DPPD-PROX/qinq.h
@@ -36,6 +36,6 @@ struct qinq_hdr {
struct my_vlan_hdr svlan;
struct my_vlan_hdr cvlan;
uint16_t ether_type;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
#endif /* _QINQ_H_ */
diff --git a/VNFs/DPPD-PROX/rw_reg.c b/VNFs/DPPD-PROX/rw_reg.c
index a0e59085..b4f6c214 100644
--- a/VNFs/DPPD-PROX/rw_reg.c
+++ b/VNFs/DPPD-PROX/rw_reg.c
@@ -14,6 +14,10 @@
// limitations under the License.
*/
+#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(21,11,0,0)
+#include <ethdev_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#endif
#include <rte_ethdev.h>
#include "rw_reg.h"
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c
index 1fd5ca85..e1756cb3 100644
--- a/VNFs/DPPD-PROX/rx_pkt.c
+++ b/VNFs/DPPD-PROX/rx_pkt.c
@@ -180,9 +180,11 @@ static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_
static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
{
struct rte_mbuf **mbufs = *mbufs_ptr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
int i;
prox_rte_ether_hdr *hdr[MAX_PKT_BURST];
int skip = 0;
+ uint16_t vlan = 0;
for (i = 0; i < nb_rx; i++) {
PREFETCH0(mbufs[i]);
@@ -192,8 +194,8 @@ static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte
PREFETCH0(hdr[i]);
}
for (i = 0; i < nb_rx; i++) {
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr[i] + 1);
- if (unlikely((hdr[i]->ether_type == ETYPE_IPv6) && (ipv6_hdr->proto == ICMPv6))) {
+ ipv6_hdr = prox_get_ipv6_hdr(hdr[i], rte_pktmbuf_pkt_len(mbufs[i]), &vlan);
+ if (unlikely((ipv6_hdr) && (ipv6_hdr->proto == ICMPv6))) {
dump_l3(tbase, mbufs[i]);
tx_ring(tbase, tbase->l3.ctrl_plane_ring, NDP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
skip++;
diff --git a/VNFs/DPPD-PROX/stats_irq.h b/VNFs/DPPD-PROX/stats_irq.h
index 71ff80f7..9a3f6c2f 100644
--- a/VNFs/DPPD-PROX/stats_irq.h
+++ b/VNFs/DPPD-PROX/stats_irq.h
@@ -51,7 +51,7 @@ struct irq_task_stats {
struct irq_rt_stats *stats;
};
-uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
+extern uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
extern uint64_t irq_bucket_maxtime_micro[];
void stats_irq_reset(void);
diff --git a/VNFs/DPPD-PROX/stats_latency.c b/VNFs/DPPD-PROX/stats_latency.c
index 58bad6fa..5b2989df 100644
--- a/VNFs/DPPD-PROX/stats_latency.c
+++ b/VNFs/DPPD-PROX/stats_latency.c
@@ -228,6 +228,9 @@ static void stats_latency_from_lat_test(struct stats_latency *dst, struct lat_te
dst->tot_packets = src->tot_pkts;
dst->tot_all_packets = src->tot_all_pkts;
dst->lost_packets = src->lost_packets;
+ dst->mis_ordered = src->mis_ordered;
+ dst->extent = src->extent;
+ dst->duplicate = src->duplicate;
}
static void stats_latency_update_entry(struct stats_latency_manager_entry *entry)
diff --git a/VNFs/DPPD-PROX/stats_latency.h b/VNFs/DPPD-PROX/stats_latency.h
index 32f3ba34..833bbff4 100644
--- a/VNFs/DPPD-PROX/stats_latency.h
+++ b/VNFs/DPPD-PROX/stats_latency.h
@@ -29,6 +29,9 @@ struct stats_latency {
struct time_unit accuracy_limit;
uint64_t lost_packets;
+ uint64_t mis_ordered;
+ uint64_t extent;
+ uint64_t duplicate;
uint64_t tot_packets;
uint64_t tot_all_packets;
};
diff --git a/VNFs/DPPD-PROX/stats_port.c b/VNFs/DPPD-PROX/stats_port.c
index 124c849e..fb6cf10a 100644
--- a/VNFs/DPPD-PROX/stats_port.c
+++ b/VNFs/DPPD-PROX/stats_port.c
@@ -18,6 +18,9 @@
#include <stdio.h>
#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(21,11,0,0)
+#include <ethdev_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#endif
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_byteorder.h>
@@ -289,16 +292,16 @@ static void nic_read_stats(uint8_t port_id)
dropped by the nic". Note that in case CRC
is stripped on ixgbe, the CRC bytes are not
counted. */
-#if defined (DEV_RX_OFFLOAD_CRC_STRIP)
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_CRC_STRIP)
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP)
stats->rx_bytes = eth_stat.ibytes +
(24 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
else
stats->rx_bytes = eth_stat.ibytes +
(20 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
#else
-#if defined (DEV_RX_OFFLOAD_KEEP_CRC)
- if (prox_port_cfg[port_id].requested_rx_offload & DEV_RX_OFFLOAD_KEEP_CRC)
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
stats->rx_bytes = eth_stat.ibytes +
(20 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
else
diff --git a/VNFs/DPPD-PROX/stats_task.h b/VNFs/DPPD-PROX/stats_task.h
index 7dc54eab..001ebbc7 100644
--- a/VNFs/DPPD-PROX/stats_task.h
+++ b/VNFs/DPPD-PROX/stats_task.h
@@ -17,6 +17,11 @@
#ifndef _STATS_TASK_H_
#define _STATS_TASK_H_
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <inttypes.h>
#include "clock.h"
diff --git a/VNFs/DPPD-PROX/task_base.h b/VNFs/DPPD-PROX/task_base.h
index df876e9a..89e5bb9d 100644
--- a/VNFs/DPPD-PROX/task_base.h
+++ b/VNFs/DPPD-PROX/task_base.h
@@ -36,6 +36,7 @@
#define TASK_FP_HANDLE_ARP 0x0040
#define TASK_TX_CRC 0x0080
#define TASK_L3 0x0100
+#define TASK_DO_NOT_FWD_GENEVE 0x0200
// flag_features 64 bits
#define TASK_FEATURE_ROUTING 0x0001
@@ -55,11 +56,11 @@
#define TASK_FEATURE_RX_ALL 0x8000
#define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL 0x20000
-#define FLAG_TX_FLUSH 0x01
-#define FLAG_NEVER_FLUSH 0x02
+#define TBASE_FLAG_TX_FLUSH 0x01
+#define TBASE_FLAG_NEVER_FLUSH 0x02
// Task specific flags
-#define BASE_FLAG_LUT_QINQ_HASH 0x08
-#define BASE_FLAG_LUT_QINQ_RSS 0x10
+#define TBASE_FLAG_LUT_QINQ_HASH 0x08
+#define TBASE_FLAG_LUT_QINQ_RSS 0x10
#define OUT_DISCARD 0xFF
#define OUT_HANDLED 0xFE
diff --git a/VNFs/DPPD-PROX/task_init.c b/VNFs/DPPD-PROX/task_init.c
index fc12eae7..97f7188c 100644
--- a/VNFs/DPPD-PROX/task_init.c
+++ b/VNFs/DPPD-PROX/task_init.c
@@ -302,7 +302,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
}
if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
else
targ->lconf->flush_queues[targ->task] = flush_function(targ);
}
@@ -316,7 +316,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
else {
tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
}
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
}
}
else {
@@ -352,7 +352,7 @@ struct task_base *init_task_struct(struct task_args *targ)
offset += t->size;
if (targ->nb_txrings == 0 && targ->nb_txports == 0)
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
offset = init_rx_tx_rings_ports(targ, tbase, offset);
tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
@@ -366,7 +366,7 @@ struct task_base *init_task_struct(struct task_args *targ)
tbase->handle_bulk = t->handle;
if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP)) {
- plog_info("\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
+ plog_info("\t\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
if (targ->nb_txports != 0) {
tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
diff --git a/VNFs/DPPD-PROX/task_init.h b/VNFs/DPPD-PROX/task_init.h
index 33b912e8..53bfaf35 100644
--- a/VNFs/DPPD-PROX/task_init.h
+++ b/VNFs/DPPD-PROX/task_init.h
@@ -100,6 +100,14 @@ enum police_action {
ACT_INVALID = 4
};
+struct range {
+ uint32_t min;
+ uint32_t value;
+ uint32_t max;
+ uint32_t offset;
+ uint8_t range_len;
+};
+
/* Configuration for task that is only used during startup. */
struct task_args {
struct task_base *tbase;
@@ -191,10 +199,15 @@ struct task_args {
/* gen related*/
uint64_t rate_bps;
uint32_t n_rand_str;
- char rand_str[64][64];
+ uint32_t n_ranges;
uint32_t rand_offset[64];
+ char rand_str[64][64];
+ struct range range[64];
char pcap_file[256];
uint32_t accur_pos;
+ uint32_t flow_id_pos;
+ uint32_t packet_id_in_flow_pos;
+ uint32_t flow_count;
uint32_t sig_pos;
uint32_t sig;
uint32_t lat_pos;
@@ -204,7 +217,9 @@ struct task_args {
uint32_t lat_enabled;
uint32_t pkt_size;
uint8_t pkt_inline[MAX_PKT_SIZE];
- uint32_t probability;
+ uint32_t probability_no_drop;
+ uint32_t probability_duplicate;
+ uint32_t probability_delay;
char nat_table[256];
uint32_t use_src;
char route_table[256];
@@ -244,6 +259,10 @@ struct task_args {
uint32_t igmp_address;
uint32_t imix_nb_pkts;
uint32_t imix_pkt_sizes[MAX_IMIX_PKTS];
+ uint32_t multiplier;
+ uint32_t mirror_size;
+ uint32_t store_max;
+ uint32_t loss_buffer_size;
};
/* Return the first port that is reachable through the task. If the
diff --git a/VNFs/DPPD-PROX/thread_generic.c b/VNFs/DPPD-PROX/thread_generic.c
index 14fb943e..39964dea 100644
--- a/VNFs/DPPD-PROX/thread_generic.c
+++ b/VNFs/DPPD-PROX/thread_generic.c
@@ -213,7 +213,6 @@ int thread_generic(struct lcore_cfg *lconf)
next[task_id] = t->handle_bulk(t, mbufs, nb_rx);
}
}
-
}
}
return 0;
diff --git a/VNFs/DPPD-PROX/toeplitz.c b/VNFs/DPPD-PROX/toeplitz.c
index 62424579..a9f7e585 100644
--- a/VNFs/DPPD-PROX/toeplitz.c
+++ b/VNFs/DPPD-PROX/toeplitz.c
@@ -25,9 +25,7 @@ uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN] =
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
uint32_t toeplitz_hash(uint8_t *buf_p, int buflen)
diff --git a/VNFs/DPPD-PROX/toeplitz.h b/VNFs/DPPD-PROX/toeplitz.h
index f24ae766..7cf052ae 100644
--- a/VNFs/DPPD-PROX/toeplitz.h
+++ b/VNFs/DPPD-PROX/toeplitz.h
@@ -17,7 +17,7 @@
#ifndef _TOEPLITZ_H_
#define _TOEPLITZ_H_
-#define TOEPLITZ_KEY_LEN 52
+#define TOEPLITZ_KEY_LEN 40
extern uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN];
uint32_t toeplitz_hash(uint8_t *buf_p, int buflen);
#endif
diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c
index f45516ec..cd62cc54 100644
--- a/VNFs/DPPD-PROX/tx_pkt.c
+++ b/VNFs/DPPD-PROX/tx_pkt.c
@@ -61,17 +61,17 @@ void store_packet(struct task_base *tbase, struct rte_mbuf *mbuf)
int tx_pkt_ndp(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
{
- // TODO NDP
struct ipv6_addr ip_dst;
int first = 0, ret, ok = 0, rc;
const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
struct rte_mbuf *mbuf = NULL; // used when one need to send both an ARP and a mbuf
uint16_t vlan;
+ uint64_t tsc = rte_rdtsc();
for (int j = 0; j < n_pkts; j++) {
if ((out) && (out[j] >= OUT_HANDLED))
continue;
- if (unlikely((rc = write_ip6_dst_mac(tbase, mbufs[j], &ip_dst, &vlan)) != SEND_MBUF)) {
+ if (unlikely((rc = write_ip6_dst_mac(tbase, mbufs[j], &ip_dst, &vlan, tsc)) != SEND_MBUF)) {
if (j - first) {
ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out);
ok += ret;
@@ -286,7 +286,7 @@ void flush_queues_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_sw(struct task_base *tbase)
@@ -303,7 +303,7 @@ void flush_queues_sw(struct task_base *tbase)
ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_hw(struct task_base *tbase)
@@ -321,7 +321,7 @@ void flush_queues_no_drop_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_sw(struct task_base *tbase)
@@ -338,7 +338,7 @@ void flush_queues_no_drop_sw(struct task_base *tbase)
ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
/* "try" functions try to send packets to sw/hw w/o failing or blocking;
@@ -427,7 +427,7 @@ int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct r
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -456,7 +456,7 @@ int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -595,7 +595,7 @@ int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -616,7 +616,7 @@ int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -637,7 +637,7 @@ int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -657,7 +657,7 @@ int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
diff --git a/VNFs/DPPD-PROX/version.h b/VNFs/DPPD-PROX/version.h
index fe5fcbfc..355a5dcb 100644
--- a/VNFs/DPPD-PROX/version.h
+++ b/VNFs/DPPD-PROX/version.h
@@ -19,7 +19,7 @@
/* PROGRAM_NAME defined through Makefile */
#define VERSION_MAJOR 0 // Pre-production
-#define VERSION_MINOR 2005 // 20.05 i.e. May 2020
+#define VERSION_MINOR 2212 // 22.12 i.e. December 2022
#define VERSION_REV 0
static inline char *VERSION_STR(void)
diff --git a/docs/conf.py b/docs/conf.py
index 3c4453e7..4821d0aa 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1 +1,4 @@
from docs_conf.conf import *
+linkcheck_ignore = [
+ r'https://trex-tgn.cisco.com/',
+ ]
diff --git a/docs/index.rst b/docs/index.rst
index 1376a69f..ccf2d8ad 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -14,6 +14,4 @@ OPNFV Samplevnf
:maxdepth: 1
release/release-notes/index
- release/results/index
- testing/developer/index
testing/user/userguide/index
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 6ff1679d..f9daae50 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -3,61 +3,18 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Intel Corporation and others.
-=======
-License
-=======
-OPNFV release note for SampleVNF Docs
-are licensed under a Creative Commons Attribution 4.0 International License.
-You should have received a copy of the license along with this.
-If not, see <http://creativecommons.org/licenses/by/4.0/>.
-:
-
-The *SampleVNFs*, the *SampleVNF test cases* are opensource software,
-licensed under the terms of the Apache License, Version 2.0.
-
-==========================================
-OPNFV Hunter Release Note for SampleVNF
-==========================================
-
-.. toctree::
- :maxdepth: 2
-
-.. _SampleVNF: https://wiki.opnfv.org/SAM
-
-.. _Yardstick: https://wiki.opnfv.org/yardstick
-
-.. _NFV-TST001: http://www.etsi.org/deliver/etsi_gs/NFV-TST/001_099/001/01.01.01_60/gs_NFV-TST001v010101p.pdf
-
-
-Abstract
-========
-
-This document describes the release note of SampleVNF project.
-
-
-Version History
-===============
-
-+----------------+--------------------+---------------------------------+
-| *Date* | *Version* | *Comment* |
-| | | |
-+----------------+--------------------+---------------------------------+
-| "May 10 2019" | 8.0 | SampleVNF for Hunter release |
-| | | |
-+----------------+--------------------+---------------------------------+
-
-
-Important Notes
-===============
-
-The software delivered in the OPNFV SampleVNF_ Project, comprising the
-*SampleVNF VNFs* and performance test case are part of OPNFV Yardstick_
-Project is a realization of the methodology in ETSI-ISG NFV-TST001_.
+OPNFV Jerma Release
+===================
+* The only supported test VNF in this release for dataplane benchmarking purposes is PROX
+* PROX supporting up to DPDK:20.05
+* Introducing ability to make test cloud-configured dataplane networking benchmarks using
+ ETSI NFV TST009 standard methods
+* Test automation using X-testing
OPNFV Hunter Release
-======================
+====================
This Hunter release provides *SampleVNF* as a approx VNF repository for
VNF/NFVI testing, characterization and OPNFV feature testing, automated on
@@ -73,7 +30,7 @@ OPNFV platform, including:
* Results
-* Automated SampleVNF test suit in OPNFV Yardstick_ Project
+* Automated SampleVNF test suit in OPNFV Yardstick Project
* SampleVNF source code
@@ -85,7 +42,7 @@ For Hunter release, the *SampleVNF* supported:
| *CGNAPT* | Carrier Grade Network Address and port Translation .5.0 | v0.1.0 |
+----------------+---------------------------------------------------------+-------------------+
| *Prox* | Packet pROcessing eXecution engine | v0.40.0 |
-| | acts as traffic generator, L3FWD, L2FWD, BNG etc | |
+| | acts as traffic generator, L3FWD, L2FWD, BNG etc | |
+----------------+---------------------------------------------------------+-------------------+
| *vACL* | Access Control List | v0.1.0 |
+----------------+---------------------------------------------------------+-------------------+
@@ -111,17 +68,19 @@ testing:
* Network - rfc2544, rfc3511, latency, http_test etc
-The *SampleVNF* is developed in the OPNFV community, by the SampleVNF_ team.
+The *SampleVNF* is developed in the OPNFV community, by the SampleVNF team.
The *Network Service Benchmarking* SampleVNF Characterization Testing tool is a part of the
Yardstick Project.
.. note:: The test case description template used for the SampleVNF in yardstick
- test cases is based on the document ETSI-ISG NFV-TST001_; the results report template
+ test cases is based on the document `ETSI GS NFV-TST 001`_; the results report template
used for the SampleVNF test results is based on the IEEE Std 829-2008.
+.. _ETSI GS NFV-TST 001: https://portal.etsi.org/webapp/workprogram/Report_WorkItem.asp?WKI_ID=46009
+
Release Data
-============
+------------
+--------------------------------------+--------------------------------------+
| **Project** | SampleVNF |
@@ -141,6 +100,7 @@ Release Data
+--------------------------------------+--------------------------------------+
| **Purpose of the delivery** | Hunter alignment to Released |
| | bug-fixes for the following: |
+| | |
| | - Memory leak |
| | - minimum latency |
| | - Increase default mbuf size and |
@@ -151,10 +111,10 @@ Release Data
Deliverables
-============
+------------
Documents
----------
+^^^^^^^^^
- User Guide: http://artifacts.opnfv.org/samplevnf/docs/testing_user_userguide/index.html
@@ -162,7 +122,7 @@ Documents
Software Deliverables
----------------------
+^^^^^^^^^^^^^^^^^^^^^
- The SampleVNF Docker image: To be added
@@ -184,7 +144,7 @@ Software Deliverables
+---------------------+-------------------------------------------------------+
Document Version Changes
-------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^
This is the first version of the SampleVNF in OPNFV.
It includes the following documentation updates:
@@ -197,7 +157,7 @@ It includes the following documentation updates:
Feature additions
------------------
+^^^^^^^^^^^^^^^^^
- Support for DPDK 18.05 and DPDK 18.08
- Add support for counting non dataplane related packets
@@ -222,7 +182,7 @@ Bug fixes:
Known Issues/Faults
--------------------
+^^^^^^^^^^^^^^^^^^^
- Huge page freeing needs to be handled properly while running the application else it might
cause system crash. Known issue from DPDK.
- UDP Replay is used to capture throughput for dynamic cgnapt
@@ -232,37 +192,37 @@ Known Issues/Faults
- Rest API uses port 80, make sure other webservices are stopped before using SampleVNF RestAPI.
Corrected Faults
-----------------
+^^^^^^^^^^^^^^^^
Hunter 8.2:
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-129 | Support for DPDK 18.05 and DPDK 18.08 |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-130 | Add support for counting non dataplane related packets |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-131 | test improvements and fixes for image creation |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-132 | Local Documentation Builds |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-133 | Improve l3fwd performance |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-134 | Enable the local cache mac address |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-135 | Initial support for DPDK 18.05 |
-+----------------------------+-------------------------------------------------------------------+
++----------------------------+----------------------------------------------------------------------+
+| **JIRA REFERENCE** | **DESCRIPTION** |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-129 | Support for DPDK 18.05 and DPDK 18.08 |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-130 | Add support for counting non dataplane related packets |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-131 | test improvements and fixes for image creation |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-132 | Local Documentation Builds |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-133 | Improve l3fwd performance |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-134 | Enable the local cache mac address |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-135 | Initial support for DPDK 18.05 |
++----------------------------+----------------------------------------------------------------------+
| SAMPLEVNF-136 | Adding centos.json to be used with packer to generate a VM with PROX|
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-137 | Adding support for Ubuntu 17.20... |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-138 | Get multiple port stats simultaneously |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-139 | Increase default mbuf size and code simplification/cleanup |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF-140 | update from src port in the pvt/pub handler |
-+----------------------------+-------------------------------------------------------------------+
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-137 | Adding support for Ubuntu 17.20... |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-138 | Get multiple port stats simultaneously |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-139 | Increase default mbuf size and code simplification/cleanup |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-140 | update from src port in the pvt/pub handler |
++----------------------------+----------------------------------------------------------------------+
@@ -286,7 +246,7 @@ Bug Fix Jira:
+----------------------------+-------------------------------------------------------------------+
Hunter known restrictions/issues
-====================================
+--------------------------------
+-----------+-----------+----------------------------------------------+
| Installer | Scenario | Issue |
+===========+===========+==============================================+
@@ -295,226 +255,7 @@ Hunter known restrictions/issues
Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-| SAMPLEVNF- | PROX support for dpdk 18,05 |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | PROX support for dpdk 18,05 |
-+----------------------------+-------------------------------------------------------------------+
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
+-----------------
+----------------------------+------------------------------------------------+
| **JIRA REFERENCE** | **DESCRIPTION** |
@@ -526,55 +267,12 @@ Open JIRA tickets
Useful links
-============
+------------
- - wiki project page: https://wiki.opnfv.org/display/SAM
+ - wiki project page: https://wiki-old.opnfv.org/display/SAM
- wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
-
- - SampleVNF IRC chanel: #opnfv-samplevnf
-
-
-
-
-Bug Fix Jira:
-
-+----------------------------+-------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-+----------------------------+-------------------------------------------------------------------+
-| SAMPLEVNF- | Fix samplevnf perf issues |
-+----------------------------+-------------------------------------------------------------------+
-
-Hunter known restrictions/issues
-====================================
-+-----------+-----------+----------------------------------------------+
-| Installer | Scenario | Issue |
-+===========+===========+==============================================+
-| | | |
-+-----------+-----------+----------------------------------------------+
-
-
-Open JIRA tickets
-=================
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Useful links
-============
-
- - wiki project page: https://wiki.opnfv.org/display/SAM
-
- - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/H+-++Release+SampleVNF+planning
-
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
+ - SampleVNF repo: https://git.opnfv.org/samplevnf/
- SampleVNF IRC chanel: #opnfv-samplevnf
diff --git a/docs/release/results/overview.rst b/docs/release/results/overview.rst
index df04d327..5a2f2b8a 100644
--- a/docs/release/results/overview.rst
+++ b/docs/release/results/overview.rst
@@ -6,7 +6,7 @@
SampleVNF test tesult document overview
=======================================
-.. _`SampleVNF user guide`: artifacts.opnfv.org/samplevnf/docs/userguide/index.html
+.. _`SampleVNF user guide`: http://artifacts.opnfv.org/samplevnf/docs/testing_user_userguide/index.html
This document provides an overview of the results of test cases developed by
the OPNFV SampleVNF Project & test cases executed part of yardstick
diff --git a/docs/testing/developer/design/02-Get_started_Guide.rst b/docs/testing/developer/design/02-Get_started_Guide.rst
index c8f35ed3..2a9806b5 100644
--- a/docs/testing/developer/design/02-Get_started_Guide.rst
+++ b/docs/testing/developer/design/02-Get_started_Guide.rst
@@ -6,7 +6,7 @@
====================================
Get started as a SampleVNF developer
-===================================
+====================================
.. _SampleVNF: https://wiki.opnfv.org/samplevnf
.. _Gerrit: https://www.gerritcodereview.com/
diff --git a/docs/testing/developer/design/04-SampleVNF_Design.rst b/docs/testing/developer/design/04-SampleVNF_Design.rst
index a3332e27..f813a297 100644
--- a/docs/testing/developer/design/04-SampleVNF_Design.rst
+++ b/docs/testing/developer/design/04-SampleVNF_Design.rst
@@ -348,7 +348,7 @@ transmit takes packets from worker thread in a dedicated ring and sent to the
hardware queue.
Master pipeline
-^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^
This component does not process any packets and should configure with Core 0,
to save cores for other components which processes traffic. The component
is responsible for:
@@ -359,7 +359,7 @@ is responsible for:
4. ARP and ICMP are handled here.
Load Balancer pipeline
-^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
Load balancer is part of the Multi-Threaded CGMAPT release which distributes
the flows to Multiple ACL worker threads.
@@ -371,7 +371,7 @@ affinity of flows to worker threads.
Tuple can be modified/configured using configuration file
vCGNAPT - Static
-------------------
+----------------
The vCGNAPT component performs translation of private IP & port to public IP &
port at egress side and public IP & port to private IP & port at Ingress side
@@ -383,7 +383,7 @@ match will be taken a default action. The default action may result in drop of
the packets.
vCGNAPT- Dynamic
------------------
+----------------
The vCGNAPT component performs translation of private IP & port to public IP &
port at egress side and public IP & port to private IP & port at Ingress side
@@ -399,11 +399,13 @@ Dynamic vCGNAPT acts as static one too, we can do NAT entries statically.
Static NAT entries port range must not conflict to dynamic NAT port range.
vCGNAPT Static Topology
-----------------------
+-----------------------
-IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 1) IXIA
+IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 1)IXIA
operation:
+
Egress --> The packets sent out from ixia(port 0) will be CGNAPTed to ixia(port 1).
+
Igress --> The packets sent out from ixia(port 1) will be CGNAPTed to ixia(port 0).
vCGNAPT Dynamic Topology (UDP_REPLAY)
@@ -411,9 +413,11 @@ vCGNAPT Dynamic Topology (UDP_REPLAY)
IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 0)UDP_REPLAY
operation:
+
Egress --> The packets sent out from ixia will be CGNAPTed to L3FWD/L4REPLAY.
+
Ingress --> The L4REPLAY upon reception of packets (Private to Public Network),
- will immediately replay back the traffic to IXIA interface. (Pub -->Priv).
+ will immediately replay back the traffic to IXIA interface. (Pub -->Priv).
How to run L4Replay
-------------------
@@ -431,7 +435,7 @@ vACL - Design
=============
Introduction
---------------
+------------
This application implements Access Control List (ACL). ACL is typically used
for rule based policy enforcement. It restricts access to a destination IP
address/port based on various header fields, such as source IP address/port,
@@ -439,12 +443,12 @@ destination IP address/port and protocol. It is built on top of DPDK and uses
the packet framework infrastructure.
Scope
-------
+-----
This application provides a standalone DPDK based high performance ACL Virtual
Network Function implementation.
High Level Design
-------------------
+-----------------
The ACL Filter performs bulk filtering of incoming packets based on rules in
current ruleset, discarding any packets not permitted by the rules. The
mechanisms needed for building the rule database and performing lookups are
@@ -460,12 +464,12 @@ The Input and Output FIFOs will be implemented using DPDK Ring Buffers.
The DPDK ACL example:
-http://dpdk.org/doc/guides/sample_app_ug/l3_forward_access_ctrl.html
+http://doc.dpdk.org/guides/sample_app_ug/l3_forward.html
#figure-ipv4-acl-rule contains a suitable syntax and parser for ACL rules.
Components of ACL
-------------------
+-----------------
In ACL, each component is constructed as a packet framework. It includes
Master pipeline component, driver, load balancer pipeline component and ACL
worker pipeline component. A pipeline framework is a collection of input ports,
@@ -607,27 +611,33 @@ Edge Router has the following functionalities in Upstream.
Update the packet color in MPLS EXP field in each MPLS header.
Components of vPE
--------------------
+-----------------
The vPE has downstream and upstream pipelines controlled by Master component.
-Edge router processes two different types of traffic through pipelines
-I. Downstream (Core-to-Customer)
- 1. Receives TCP traffic from core
- 2. Routes the packet based on the routing rules
- 3. Performs traffic scheduling based on the traffic profile
- a. Qos scheduling is performed using token bucket algorithm
- SVLAN, CVLAN, DSCP fields are used to determine transmission priority.
- 4. Appends QinQ label in each outgoing packet.
-II. Upstream (Customer-to-Core)
- 1. Receives QinQ labelled TCP packets from Customer
- 2. Removes the QinQ label
- 3. Classifies the flow using QinQ label and apply Qos metering
- a. 1st stage Qos metering is performed with flow ID using trTCM algorithm
- b. 2nd stage Qos metering is performed with flow ID and traffic class using
- trTCM algorithm
- c. traffic class maps to DSCP field in the packet.
- 4. Routes the packet based on the routing rules
- 5. Appends two MPLS labels in each outgoing packet.
+Edge router processes two different types of traffic through pipelines:
+
+I) Downstream (Core-to-Customer)
+
+ 1. Receives TCP traffic from core
+ 2. Routes the packet based on the routing rules
+ 3. Performs traffic scheduling based on the traffic profile
+
+ a. Qos scheduling is performed using token bucket algorithm.
+ SVLAN, CVLAN, DSCP fields are used to determine transmission priority.
+ 4. Appends QinQ label in each outgoing packet.
+
+II) Upstream (Customer-to-Core)
+
+ 1. Receives QinQ labelled TCP packets from Customer
+ 2. Removes the QinQ label
+ 3. Classifies the flow using QinQ label and apply Qos metering
+
+ a. 1st stage Qos metering is performed with flow ID using trTCM algorithm
+ b. 2nd stage Qos metering is performed with flow ID and traffic class using
+ trTCM algorithm
+ c. traffic class maps to DSCP field in the packet.
+ 4. Routes the packet based on the routing rules
+ 5. Appends two MPLS labels in each outgoing packet.
Master Component
^^^^^^^^^^^^^^^^
@@ -635,7 +645,8 @@ Master Component
The Master component is part of all the IP Pipeline applications. This
component does not process any packets and should configure with Core0,
to save cores for other components which processes traffic. The component
-is responsible for
+is responsible for:
+
1. Initializing each component of the Pipeline application in different threads
2. Providing CLI shell for the user
3. Propagating the commands from user to the corresponding components.
@@ -656,7 +667,7 @@ To run the VNF, execute the following:
Prox - Packet pROcessing eXecution engine
-==========================================
+=========================================
Introduction
------------
diff --git a/docs/testing/developer/requirements/03-Requirements.rst b/docs/testing/developer/requirements/03-Requirements.rst
index 25798606..97b1813f 100644
--- a/docs/testing/developer/requirements/03-Requirements.rst
+++ b/docs/testing/developer/requirements/03-Requirements.rst
@@ -13,7 +13,7 @@ Requirements
.. _SampleVNF: https://wiki.opnfv.org/samplevnf
.. _Technical_Briefs: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-Supported Test setup:
+Supported Test setup
--------------------
The device under test (DUT) consists of a system following
diff --git a/docs/testing/user/userguide/01-introduction.rst b/docs/testing/user/userguide/01-introduction.rst
index 10c0161f..4ddde201 100755..100644
--- a/docs/testing/user/userguide/01-introduction.rst
+++ b/docs/testing/user/userguide/01-introduction.rst
@@ -9,30 +9,16 @@ Introduction
**Welcome to SampleVNF's documentation !**
-.. _Pharos: https://wiki.opnfv.org/pharos
-.. _SampleVNF: https://wiki.opnfv.org/samplevnf
-.. _Technical_Briefs: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-SampleVNF_ is an OPNFV Project.
-
-The project's goal is to provides a placeholder for various sample VNF
+The project's goal was to provide a placeholder for various sample VNF
(Virtual Network Function (:term:`VNF`)) development which includes example
reference architecture and optimization methods related to VNF/Network service
-for high performance VNFs. This project provides benefits to other OPNFV
-projects like Functest, Models, yardstick etc to perform real life
-use-case based testing and VNF/ Network Function Virtualization Infrastructure
-(:term:`NFVI`) characterization for the same.
-
-The Project's scope to create a repository of sample VNFs to help VNF
-benchmarking and NFVI characterization with real world traffic and host a
-common development environment for developing the VNF using optimized libraries.
-Also, develop a test framework in yardstick to enable VNF/NFVI verification.
-
-*SampleVNF* is used in OPNFV for characterization of NFVI/VNF on OPNFV infrastructure
-and some of the OPNFV features.
+for high performance VNFs.
+Today, we only maintain PROX and rapid scripts as part of this project
+to perform Network Function Virtualization Infrastructure
+(:term:`NFVI`) characterization.
-.. seealso:: Pharos_ for information on OPNFV community labs and this
- Technical_Briefs_ for an overview of *SampleVNF*
+*SampleVNF* is used in OPNFV for characterization of NFVI.
About This Document
@@ -44,24 +30,8 @@ This document consists of the following chapters:
project's background and describes the structure of this document.
* Chapter :doc:`02-methodology` describes the methodology implemented by the
- *SampleVNF* Project for :term:`VNF` and :term:`NFVI` verification.
-
-* Chapter :doc:`03-architecture` provides information on the software architecture
- of *SampleVNF*.
-
-* Chapter :doc:`04-installation` provides instructions to install *SampleVNF*.
-
-* Chapter :doc:`05-How_to_run_SampleVNFs` provides example on how installing and running *SampleVNF*.
-
-* Chapter :doc:`06-How_to_use_REST_api` provides info on how to run REST API *SampleVNF*.
-
-* Chapter :doc:`07-Config_files` provides info *SampleVNF* configuration.
-
-* Chapter :doc:`08-CLI_Commands_Reference` provides info on CLI commands supported by *SampleVNF*
-
-Contact SampleVNF
-=================
+ *SampleVNF* Project for :term:`NFVI` verification.
-Feedback? `Contact us`_
+* Chapter :doc:`03-installation` provides instructions to install *SampleVNF*.
-.. _Contact us: opnfv-users@lists.opnfv.org
+* Chapter :doc:`04-running_the_test` shows how to run the dataplane testing.
diff --git a/docs/testing/user/userguide/01-prox_documentation.rst b/docs/testing/user/userguide/01-prox_documentation.rst
new file mode 100644
index 00000000..12c740da
--- /dev/null
+++ b/docs/testing/user/userguide/01-prox_documentation.rst
@@ -0,0 +1,4 @@
+Testing with PROX
+=================
+The PROX documentation can be found in `Prox - Packet pROcessing eXecution engine <https://wiki-old.opnfv.org/x/AAa9>`_
+How to use PROX with the rapid pyton scripts can be found in `Rapid scripting <https://wiki-old.opnfv.org/x/OwM-Ag>`_
diff --git a/docs/testing/user/userguide/02-methodology.rst b/docs/testing/user/userguide/02-methodology.rst
index 01cbb276..e5a7d383 100644
--- a/docs/testing/user/userguide/02-methodology.rst
+++ b/docs/testing/user/userguide/02-methodology.rst
@@ -6,81 +6,68 @@
===========
Methodology
===========
+.. _NFV-TST009: https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
Abstract
========
This chapter describes the methodology/overview of SampleVNF project from
-the perspective of a :term:`VNF` and :term:`NFVI` Characterization
+the perspective of :term:`NFVI` Characterization
Overview
========
-This project provides a placeholder for various sample VNF (Virtual Network Function (:term:`VNF`))
-development which includes example reference architecture and optimization methods
-related to VNF/Network service for high performance VNFs.
+This project covers the dataplane benchmarking for Network Function Virtualization
+Infrastructure (:term:`NFVI`)) using the PROX tool, according to ETSI GS NFV-TST009_.
-The sample VNFs are Open Source approximations* of Telco grade :term:`VNF`
-using optimized VNF + NFVi Infrastructure libraries, with Performance Characterization of Sample† Traffic Flows.
-• * Not a commercial product. Encourage the community to contribute and close the feature gaps.
-• † No Vendor/Proprietary Workloads
+The test execution and reporting is driven by the Xtesting framework and is fully automated.
-ETSI-NFV
-========
-
-.. _NFV-TST001: http://www.etsi.org/deliver/etsi_gs/NFV-TST/001_099/001/01.01.01_60/gs_NFV-TST001v010101p.pdf
-.. _SampleVNFtst: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-13-nsb-overview
-
-SampleVNF Test Infrastructure (NSB (Yardstick_NSB_))in yardstick helps to facilitate
-consistent/repeatable methodologies for characterizing & validating the
-sample VNFs (:term:`VNF`) through OPEN SOURCE VNF approximations.
-
-Network Service Benchmarking in yardstick framework follows ETSI GS NFV-TST001_
-to verify/characterize both :term:`NFVI` & :term:`VNF`
-
-The document ETSI GS NFV-TST001_, "Pre-deployment Testing; Report on Validation
-of NFV Environments and Services", recommends methods for pre-deployment
-testing of the functional components of an NFV environment.
+When executing the tests, traffic will be send between 2 or more PROX VMs and all metrics
+will be collected in the Xtesting database.
+The placement of the test VMs (in which the PROX tool is running), can be controlled by
+Heat stacks, but can also be done through other means. This will be explained in the chapter
+covering the PROX instance deployment, and needs to be done prior to the test execution.
-The SampleVNF project implements the methodology described in chapter 13 of Yardstick_NSB_,
-"Pre-deployment validation of NFV infrastructure".
+The PROX tool is a DPDK based application optimized for high throughput packet handling.
+As such, we will not measure limitations imposed by the tool, but the capacity of the
+NFVI. In the rare case that the PROX tool would impose a limit, a warning will be logged.
-The methodology consists in decomposing the typical :term:`VNF` work-load
-performance metrics into a number of characteristics/performance vectors, which
-each can be represented by distinct test-cases.
-
-.. seealso:: SampleVNFtst_ for material on alignment ETSI TST001 and SampleVNF.
+ETSI-NFV
+========
+The document ETSI GS NFV-TST009_, "Specification of Networking Benchmarks and
+Measurement Methods for NFVI", specifies vendor-agnostic definitions of performance
+metrics and the associated methods of measurement for Benchmarking networks supported
+in the NFVI. Throughput, latency, packet loss and delay variation will be measured.
+The delay variation is not represented by the Frame Delay Variation (FDV) as defined in
+the specification, but by the average latency, the 99 percentile latency, the maximum
+latency and the complete latency distribution histogram.
Metrics
=======
-The metrics, as defined by ETSI GS NFV-TST001, are shown in
-:ref:`Table1 <table2_1>`.
+The metrics, as reported by the tool, and aligned with the definitions in ETSI GS NFV-TST009_,
+are shown in :ref:`Table1 <table2_1>`.
.. _table2_1:
-**Table 1 - Performance/Speed Metrics**
-
-+---------+-------------------------------------------------------------------+
-| Category| Performance/Speed |
-| | |
-+---------+-------------------------------------------------------------------+
-| Network | * Throughput per NFVI node (frames/byte per second) |
-| | * Throughput provided to a VM (frames/byte per second) |
-| | * Latency per traffic flow |
-| | * Latency between VMs |
-| | * Latency between NFVI nodes |
-| | * Packet delay variation (jitter) between VMs |
-| | * Packet delay variation (jitter) between NFVI nodes |
-| | * RFC 3511 benchmark |
-| | |
-+---------+-------------------------------------------------------------------+
+**Table 1 - Network Metrics**
+
++-----------------+---------------------------------------------------------------+
+| Measurement | Description |
+| | |
++-----------------+---------------------------------------------------------------+
+| Throughput | Maximum number of traffic that can be sent between 2 VM |
+| | instances, within the allowed packet loss requirements. |
+| | Results are expressed in Mpps and in Gb/s |
++-----------------+---------------------------------------------------------------+
+| Latency | 99 percentile Round trip latency expressed in micro-seconds |
+| | Note that you can also specify the n-th percentile |
++-----------------+---------------------------------------------------------------+
+| Delay Variation | Average latency, maximum latency and the latency histogram |
++-----------------+---------------------------------------------------------------+
+| Loss | Packets per seconds that were lost on their round trip between|
+| | VMs. Total packet loss numbers are also reported |
++-----------------+---------------------------------------------------------------+
.. note:: The description in this OPNFV document is intended as a reference for
- users to understand the scope of the SampleVNF Project and the
- deliverables of the SampleVNF framework. For complete description of
- the methodology, please refer to the ETSI document.
-
-.. rubric:: Footnotes
-.. [1] To be included in future deliveries.
-
+ users to execute the benchmarking. For complete description of the methodology,
+ please refer to the ETSI document.
diff --git a/docs/testing/user/userguide/03-architecture.rst b/docs/testing/user/userguide/03-architecture.rst
index 08e1b2f2..bdc51d3f 100755..100644
--- a/docs/testing/user/userguide/03-architecture.rst
+++ b/docs/testing/user/userguide/03-architecture.rst
@@ -37,8 +37,8 @@ validating the sample VNFs through OPEN SOURCE VNF approximations and test tools
The VNFs belongs to this project are never meant for field deployment.
All the VNF source code part of this project requires Apache License Version 2.0.
-Supported deployment:
-----------------------
+Supported deployment
+--------------------
* Bare-Metal - All VNFs can run on a Bare-Metal DUT
* Standalone Virtualization(SV): All VNFs can run on SV like VPP as switch, ovs,
ovs-dpdk, srioc
@@ -47,7 +47,6 @@ Supported deployment:
VNF supported
-------------
- Carrier Grade Network Address Translation (CG-NAT) VNF
- ::
The Carrier Grade Network Address and port Translation (vCG-NAPT) is a
VNF approximation extending the life of the service providers IPv4 network
infrastructure and mitigate IPv4 address exhaustion by using address and
@@ -55,23 +54,19 @@ VNF supported
It also supports the connectivity between the IPv6 access network to
IPv4 data network using the IPv6 to IPv4 address translation and vice versa.
- Firewall (vFW) VNF
- ::
The Virtual Firewall (vFW) is a VNF approximation serving as a state full
L3/L4 packet filter with connection tracking enabled for TCP, UDP and ICMP.
The VNF could be a part of Network Services (industry use-cases) deployed
to secure the enterprise network from un-trusted network.
- Access Control List (vACL) VNF
- ::
The vACL vNF is implemented as a DPDK application using VNF Infrastructure
Library (VIL). The VIL implements common VNF internal, optimized for
Intel Architecture functions like load balancing between cores, IPv4/IPv6
stack features, and interface to NFV infrastructure like OVS or SRIOV.
- UDP_Replay
- ::
The UDP Replay is implemented as a DPDK application using VNF Infrastructure
Library (VIL). Performs as a refelector of all the traffic on given port.
- Prox - Packet pROcessing eXecution engine.
- ::
Packet pROcessing eXecution Engine (PROX) which is a DPDK application.
PROX can do operations on packets in a highly configurable manner.
The PROX application is also displaying performance statistics that can
@@ -142,14 +137,15 @@ The following features were verified by SampleVNF test cases:
Test Framework
--------------
-.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-13-nsb-overview
+.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-11-nsb-overview
+.. _ETSI GS NFV-TST 001: https://portal.etsi.org/webapp/workprogram/Report_WorkItem.asp?WKI_ID=46009
SampleVNF Test Infrastructure (NSB (Yardstick_NSB_)) in yardstick helps to facilitate
consistent/repeatable methodologies for characterizing & validating the
sample VNFs (:term:`VNF`) through OPEN SOURCE VNF approximations.
-Network Service Benchmarking in yardstick framework follows ETSI GS NFV-TST001_
+Network Service Benchmarking in yardstick framework follows `ETSI GS NFV-TST 001`_
to verify/characterize both :term:`NFVI` & :term:`VNF`
For more inforamtion refer, Yardstick_NSB_
diff --git a/docs/testing/user/userguide/03-installation.rst b/docs/testing/user/userguide/03-installation.rst
new file mode 100644
index 00000000..4407b276
--- /dev/null
+++ b/docs/testing/user/userguide/03-installation.rst
@@ -0,0 +1,162 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation and others.
+
+SampleVNF Installation
+======================
+.. _RapidScripting: https://wiki.opnfv.org/display/SAM/Rapid+scripting
+.. _XtestingDocumentation: https://xtesting.readthedocs.io/en/latest/
+
+Abstract
+--------
+The installation procedures described below will result in the deployment of
+all SW components needed to run the benchmarking procedures as defined in ETSI
+GS NFV-TST009 on top of an NFVI instance that is the subject of this characterization.
+Xtesting in combination with the rapid scripts and the PROX tool will be used to achieve this.
+
+The steps needed to run the benchmarking are:
+ 1) Identify a machine on which you will install the containers to support the testing
+ 2) Clone the samplevnf project on that machine
+ 3) Deploy the testing VMs (hosting PROX tool) (Or containers)
+ 4) Deploy your own Xtesting toolchain.
+ 5) Build the test container that will drive the TST009 testing
+ 6) Publish your container on your local repository
+ 7) Execute the testing
+
+In this chapter, we will cover the first 6 installation steps.
+
+Prerequisites
+-------------
+
+Supported Test setup
+^^^^^^^^^^^^^^^^^^^^
+The device under test (DUT) is an NFVI instance on which we can deploy PROX instances.
+A PROX instance is a machine that:
+
+ * has a management interface that can be reached from the test container
+ * has one or more data plane interfaces on a dataplane network.
+ * can be a container, a VM or a bare metal machine. We just need to be able to ssh into the
+ PROX machine from the test container.
+ * is optimized for data plane traffic.
+ * will measure the throughput that is offered through its dataplane interface(s)
+
+There are no requirements on the NFVI instance itself. Of course, the measured throughput will
+depend heavily on the NFVI characteristics.
+In this release, we are supporting an automated deployment of the PROX instance on an NFVI that
+provides the OpenStack Heat interface. You could also deploy the PROX instances using other
+mechanisms. As long as you provide the necessary files describing these instances, the execution
+of the test can also be done automatically (steps 4-7) and hence be executed on different DUTs,
+e.g. VMWare, K8s, bare metal, ...
+
+Below is the basic picture of the deployment needed for the testing.
+
+.. image:: images/rapid.png
+ :width: 800px
+ :alt: supported topology
+
+Different test scenario's can now be executed by deploying the PROX machines on different systems:
+ * The generator machine could be deployed on a well defined compute node, that has network access
+ to the other nodes through the TOR. The generated traffic is very similar to external traffic.
+ * The Generator and the Swap instance could be on the same compute node to test E-W traffic between
+ 2 instance on the same compute node
+ * The Generator and the Swap instance could be on a different compute node
+
+Many VMs can be deployed before the test is running: each test case can then use different pairs of
+PROX instances to test all the above scenarios
+
+Hardware & Software Ingredients
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The only requirement is to have the PROX instances running. There are no stringent requirements to be able
+to run the test. Of course, the dataplane performance will heavily depend on the underlying NFVI HW & SW
+
+Installation Steps
+------------------
+
+Step 1: Identify a machine on which you will install the containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+This machine will need enough resources to install the Xtesting framework and needs to be enabled
+for containers.
+From a network point of view, it will need to have access to the PROX instances: That means it will need
+to be able to ssh into these machines and that the network also needs to allow for TCP port 8474 traffic.
+
+When using the automation to create the VM through the Heat Stack API, this machine also needs to be able
+to execute the OpenStack API. Alternatively, the creation of the VMs can be executed on another machine, but
+this will involve some manual file copying.
+
+Step 2: Clone the samplevnf project on that machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ git clone https://git.opnfv.org/samplevnf
+
+Go to the relevant directory in this repository: samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/
+
+Step 3: Deploy the testing VMs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In this step, we will be deploying 2 or more instances that host the PROX tool. At the end of this step,
+the instances will be running and an environment file (default name: rapid.env) will be created. This file
+will have all information needed to run the actual test. You can do this step manually on all kinds of
+platforms (OpenStack, VMWare, K8s, bare metal, ...), but the automation tools described in the rest of this
+paragraph will using OpenStack Heat yaml files.
+First, a PROX qcow2 image needs to be downloaded.
+
+.. code-block:: console
+
+ wget http://artifacts.opnfv.org/samplevnf/jerma/prox_jerma.qcow2
+
+This image can also be created mannualy by following instructions in RapidScripting_,
+in the section "Creating an image"
+Now upload this image to Openstack:
+
+.. code-block:: console
+
+ openstack image create --disk-format qcow2 --container-format bare --file prox_jerma.qcow2 rapidVM
+
+Now run createrapid.sh to create the stack. This process takes the config_file as input. Details can be found in
+RapidScripting_, in the section "Deploying the VMs"
+
+.. code-block:: console
+
+ ./createrapid.sh
+
+At the end of this step, VMs should be running and the rapid.env and rapid_key.pem files should be available.
+
+Step 4: Deploy your own Xtesting toolchain
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Install Xtesting as described in XtestingDocumentation_.
+First goto the xtesting directory in samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting (this was cloned
+in step 2)
+
+.. code-block:: console
+
+ virtualenv xtesting
+ . xtesting/bin/activate
+ pip install ansible
+ ansible-galaxy install collivier.xtesting
+ ansible-playbook site.yaml
+ deactivate
+ rm -r xtesting
+
+Step 5: Build the test container that will drive the TST009 testing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Go to the directory samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting
+While building this container, some files will be copied into the container image. Two of these files
+are generated by Step 3: rapid.env and rapid_key.pem and reside in the samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/.
+Please copy them into the xtesting directory.
+The 3rd file that will be copied is testcases.yaml.
+
+.. code-block:: console
+
+ docker build -t 127.0.0.1:5000/rapidxt .
+
+Step 6: Publish your container on your local repository
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ docker push 127.0.0.1:5000/rapidxt
+
+You are now ready to execute the testing
diff --git a/docs/testing/user/userguide/04-installation.rst b/docs/testing/user/userguide/04-installation.rst
deleted file mode 100644
index e54243cb..00000000
--- a/docs/testing/user/userguide/04-installation.rst
+++ /dev/null
@@ -1,230 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
-
-SampleVNF Installation
-======================
-
-Abstract
---------
-
-This project provides a placeholder for various sample VNF
-(Virtual Network Function (:term:`VNF`)) development which includes example
-reference architecture and optimization methods related to VNF/Network service
-for high performance VNFs.
-The sample VNFs are Open Source approximations* of Telco grade VNF’s using
-optimized VNF + NFVi Infrastructure libraries, with Performance Characterization
-of Sample† Traffic Flows.
-
-::
-
- * Not a commercial product. Encourage the community to contribute and close the feature gaps.
- † No Vendor/Proprietary Workloads
-
-SampleVNF supports installation directly in Ubuntu. The installation procedure
-are detailed in the sections below.
-
-The steps needed to run SampleVNF are:
- 1) Install and Build SampleVNF.
- 2) Deploy the VNF on the target and modify the config based on the Network under test
- 3) Run the traffic generator to generate the traffic.
-
-Prerequisites
--------------
-
-Supported Test setup
-^^^^^^^^^^^^^^^^^^^^^
-The device under test (DUT) consists of a system following;
- * A single or dual processor and PCH chip, except for System on Chip (SoC) cases
- * DRAM memory size and frequency (normally single DIMM per channel)
- * Specific Intel Network Interface Cards (NICs)
- * BIOS settings noting those that updated from the basic settings
- * DPDK build configuration settings, and commands used for tests
-Connected to the DUT is an IXIA* or Software Traffic generator like pktgen or TRex,
-simulation platform to generate packet traffic to the DUT ports and
-determine the throughput/latency at the tester side.
-
-Below are the supported/tested (:term:`VNF`) deployment type.
-
-.. image:: images/deploy_type.png
- :width: 800px
- :alt: SampleVNF supported topology
-
-Hardware & Software Ingredients
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-SUT requirements:
-
-::
-
- +-----------+------------------+
- | Item | Description |
- +-----------+------------------+
- | Memory | Min 20GB |
- +-----------+------------------+
- | NICs | 2 x 10G |
- +-----------+------------------+
- | OS | Ubuntu 16.04 LTS |
- +-----------+------------------+
- | kernel | 4.4.0-34-generic|
- +-----------+------------------+
- | DPDK | 17.02 |
- +-----------+------------------+
-
-Boot and BIOS settings:
-
-::
-
- +------------------+---------------------------------------------------+
- | Boot settings | default_hugepagesz=1G hugepagesz=1G hugepages=16 |
- | | hugepagesz=2M hugepages=2048 isolcpus=1-11,22-33 |
- | | nohz_full=1-11,22-33 rcu_nocbs=1-11,22-33 |
- | | Note: nohz_full and rcu_nocbs is to disable Linux*|
- | | kernel interrupts, and it’s import |
- +------------------+---------------------------------------------------+
- |BIOS | CPU Power and Performance Policy <Performance> |
- | | CPU C-state Disabled |
- | | CPU P-state Disabled |
- | | Enhanced Intel® Speedstep® Tech Disabled |
- | | Hyper-Threading Technology (If supported) Enable |
- | | Virtualization Techology Enable |
- | | Coherency Enable |
- | | Turbo Boost Disabled |
- +------------------+---------------------------------------------------+
-
-Network Topology for testing VNFs
----------------------------------
-The ethernet cables should be connected between traffic generator and the VNF server (BM,
-SRIOV or OVS) setup based on the test profile.
-
-The connectivity could be
-
-1) Single port pair : One pair ports used for traffic
-
-::
-
- e.g. Single port pair link0 and link1 of VNF are used
- TG:port 0 <------> VNF:Port 0
- TG:port 1 <------> VNF:Port 1
-
- For correalted traffic, use below configuration
- TG_1:port 0 <------> VNF:Port 0
- VNF:Port 1 <------> TG_2:port 0 (UDP Replay)
- (TG_2(UDP_Replay) reflects all the traffic on the given port)
-
-2) Multi port pair : More than one pair of traffic
-
-::
-
- e.g. Two port pair link 0, link1, link2 and link3 of VNF are used
- TG:port 0 <------> VNF:Port 0
- TG:port 1 <------> VNF:Port 1
- TG:port 2 <------> VNF:Port 2
- TG:port 3 <------> VNF:Port 3
-
- For correalted traffic, use below configuration
- TG_1:port 0 <------> VNF:Port 0
- VNF:Port 1 <------> TG_2:port 0 (UDP Replay)
- TG_1:port 1 <------> VNF:Port 2
- VNF:Port 3 <------> TG_2:port 1 (UDP Replay)
- (TG_2(UDP_Replay) reflects all the traffic on the given port)
-
-* Bare-Metal
- Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
-
-* Standalone Virtualization - PHY-VM-PHY
- * SRIOV
- Refer below link to setup sriov
- https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
-
- * OVS_DPDK
- Refer below link to setup ovs-dpdk
- http://docs.openvswitch.org/en/latest/intro/install/general/
- http://docs.openvswitch.org/en/latest/intro/install/dpdk/
-
- * Openstack
- Use any OPNFV installer to deploy the openstack.
-
-
-Build VNFs on the DUT:
-----------------------
-
-1) Clone sampleVNF project repository - git clone https://git.opnfv.org/samplevnf
-
-Auto Build - Using script to build VNFs
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- * Interactive options:
- ::
-
- ./tools/vnf_build.sh -i
- Follow the steps in the screen from option [1] –> [10] and
- select option [9] to build the vnfs.
- It will automatically download selected DPDK version and any
- required patches and will setup everything and build VNFs.
-
- Options [8], If RestAPI feature is needed install 'civetweb'
-
- Following are the options for setup:
- ----------------------------------------------------------
- Step 1: Environment setup.
- ----------------------------------------------------------
- [1] Check OS and network connection
- [2] Select DPDK RTE version
-
- ----------------------------------------------------------
- Step 2: Download and Install
- ----------------------------------------------------------
- [3] Agree to download
- [4] Download packages
- [5] Download DPDK zip
- [6] Build and Install DPDK
- [7] Setup hugepages
- [8] Download and Build civetweb
-
- ----------------------------------------------------------
- Step 3: Build VNFs
- ----------------------------------------------------------
- [9] Build all VNFs (vACL, vCGNAPT, vFW, UDP_Replay, DPPD-PROX)
-
- [10] Exit Script
-
- * non-Interactive options:
- ::
- ./tools/vnf_build.sh -s -d=<dpdk version eg 17.02>
-
-Manual Build
-^^^^^^^^^^^^
-
- ::
-
- 1. Download DPDK supported version from dpdk.org
- * http://dpdk.org/browse/dpdk/snapshot/dpdk-$DPDK_RTE_VER.zip
- * unzip dpdk-$DPDK_RTE_VER.zip and apply dpdk patches only in case of 16.04 (Not required for other DPDK versions)
- * cd dpdk
- * make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc
- * cd x86_64-native-linuxapp-gcc
- * make -j
- 2. Add this to Go to /etc/default/grub configuration file to setup hugepages.
- * Append “default_hugepagesz=1G hugepagesz=1G hugepages=8 hugepagesz=2M hugepages=2048” to the GRUB_CMDLINE_LINUX entry.
- 3. Setup Environment Variable
- * export RTE_SDK=<samplevnf>/dpdk
- * export RTE_TARGET=x86_64-native-linuxapp-gcc
- * export VNF_CORE=<samplevnf> or using ./tools/setenv.sh
- 4. Build SampleVNFs e.g, vACL
- * cd <samplevnf>/VNFs/vACL
- * make clean
- * make
- * The vACL executable will be created at the following location
- <samplevnf>/VNFs/vACL/build/vACL
-
-2) Standalone virtualization/Openstack:
-
- Build VM image from script in yardstick
- ::
- 1) git clone https://git.opnfv.org/samplevnf
- 2) cd samplevnf and run
- ./tools/samplevnf-img-dpdk-samplevnf-modify tools/ubuntu-server-cloudimg-samplevnf-modify.sh
- Image available in: /tmp/workspace/samplevnf/xenial-server-cloudimg-amd64-disk1.img
-
-To run VNFs. Please refer chapter `05-How_to_run_SampleVNFs.rst`
diff --git a/docs/testing/user/userguide/04-running_the_test.rst b/docs/testing/user/userguide/04-running_the_test.rst
new file mode 100644
index 00000000..3d3a1e6c
--- /dev/null
+++ b/docs/testing/user/userguide/04-running_the_test.rst
@@ -0,0 +1,226 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation and others.
+
+================
+Running the test
+================
+.. _NFV-TST009: https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
+.. _TST009_Throughput_64B_64F.test: https://github.com/opnfv/samplevnf/blob/master/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
+.. _rapid_location: https://github.com/opnfv/samplevnf/blob/master/VNFs/DPPD-PROX/helper-scripts/rapid/
+
+Overview
+--------
+A default test will be run automatically when you launch the testing. The
+details and definition of that test are defined in file
+TST009_Throughput_64B_64F.test_.
+
+We will discuss the sections of such a test file and how this can be changed to
+accomodate the testing you want to execute. This will be done by creating your
+own test file and making sure it becomes part of your testcases.yaml, as will
+be shown below.
+
+As the name of the default test file suggests, the test will find the
+throughput, latency and packet loss according to NFV-TST009_, for packets that
+are 64 bytes long and for 64 different flows.
+
+Test File Description
+---------------------
+The test file has multiple sections. The first section is a generic section
+called TestParameters. Then there are 1 or more sections desribing the test
+machines we will be using in the test. The sections are named TestMx, where x
+is a number (starting with 1). The tests to be executed are described in a
+section called testy, where y is the number of the test to be executed,
+starting with 1. In this automated testing driven by Xtesting, we will
+typically only run 1 test.
+
+TestParameters
+^^^^^^^^^^^^^^
+In this section, the name of the test is specified. This is only used in the
+reporting and has no influence on the actual testing.
+
+.. code-block:: console
+
+ name = Rapid_ETSINFV_TST009
+
+The number of test that will be executed by this run and that will be described
+in the [testy] sections, is defined by the number_of_tests parameter. In the
+Xtesting framework that we are using here, this will typically be set to 1.
+
+.. code-block:: console
+
+ number_of_tests = 1
+
+The total number of machines to be used in this testing will be defined by the
+parameter total_number_of_test_machines. The function that these machines have
+in this test will be described in the [TestMx] section. Typically, this number
+will be set to 2, but many more machines can particiapte in a test.
+
+.. code-block:: console
+
+ total_number_of_test_machines = 2
+
+lat_percentile is a variable that is setting which percentile to use during the
+course of this test. This will be used to report the percentile round trip
+latency and is a better measurement for the high latencies during this test than
+the maximum latency which will also be reported. Note that we also report the
+total round trip latency histogram.
+
+.. code-block:: console
+
+ lat_percentile = 99
+
+
+TestMx
+^^^^^^
+In the TestMx sections, where x denotes the index of the machine, the function
+of the machine in the testing, will be described. The machine can be defined as
+a generator, or as a packet reflector (swap function). The machines can be any
+machine that is created upfront (See step 3 of the installation steps). Other
+functions can also be executed by the test machines and examples of test files
+can be found in rapid_location_.
+
+The first parameter is the name of the machine and is only used for referencing
+the machine. This will be the name of the PROX instance and will be shown in
+case you run the PROX UI. In this automated testing, this will be not be
+visible.
+
+The config_file parameter defines which PROX config file is used by the PROX
+program and what PROX will be
+doing. For a generator, this will typically be gen.cfg. Multiple cfg files
+exist in the rapid_location_.
+
+The dest_vm parameter is used by a generator to find out to
+which VM he needs to send the packets. In the example below, the packets will be
+sent to TestM2.
+
+The gencores parameter defines a list of cores to be used for the generator tasks.
+Note that if you specify more than 1 core, the interface will need to support as
+many tx queues as there are generator cores.
+
+The latcores parameter specifies a
+list of cores to be used by the latency measurement tasks. You need as many rx
+queues on the interface as specified in the latcores parameter.
+
+The default value for the
+bucket_size_exp parameter is 12. It is also its minimum value. In case most of
+the latency measurements in the histogram are falling in the last bucket, this
+number needs to be increased. Every time you increase this number by 1, the
+bucket size for the latency histogram is multiplied by 2. There are 128 buckets
+in the histogram.
+
+cores is a parameter that will be used by non-generator configurations that
+don't need a disctinction between generator and latency cores (e.g. swap.cfg).
+
+Changing these parameters requires in depth knowledge of the PROX tool and is
+not something to start with.
+
+.. code-block:: console
+
+ name = Generator
+ config_file = gen.cfg
+ dest_vm = 2
+ gencores = [1]
+ latcores = [3]
+ #bucket_size_exp = 12
+
+testy
+^^^^^
+In the testy sections, where y denotes the index of the test, the test that will
+be executed on the machines that were specified in the TestMx sections, will be
+described. Using Xtesting, we will typically only use 1 test.
+Parameter test is defining which test needs to be run. This is a hardcoded
+string and can only be one of the following ['flowsizetest', 'TST009test',
+'fixed_rate', 'increment_till_fail', 'corestats', 'portstats', 'impairtest',
+'irqtest', 'warmuptest']. In this project, we will use the TST009test testing.
+For examples of the other tests, please check out the other test files in
+rapid_location_.
+
+The pass_threshold parameter defines the success criterium for the test. When
+this test uses multiple combinations of packet size and flows, all combinations
+must be meeting the same threshold. If one of the combinations fails, the test
+will be reported as failed.
+The threshold is expressed in Mpps.
+
+The imixs parameter defines the pakcet sizes that will be used. Each element in
+the imixs list will result in a separate test. Each element is on its turn a
+list of packet sizes which will be used during one test execution. If you only
+want to test 1 imix size, define imixs with only one element. For each element in
+the imixs list, the generator will iterate over the packet lengths and send them
+out in the order as specified in the list. An example of an imix list is [128,
+256, 64, 64, 128]. In this case, 40% of the packets will have a size of 64
+bytes, 40% will have a packet size of 128 and 20% will have a packet size of
+256. When using this with Xtesting, we will typically only use 1 imix. When
+needing results for more sizes, one should create a specific test file per size
+and launch the different tests using Xtesting.
+
+The flows parameter is a list of flow sizes. For each flow size, a test will be
+run with the specified amount of flows. The flow size needs to be a power of 2,
+max 2^30. If not a power of 2, we will use the lowest power of 2 that is larger
+than the requested number of flows. e.g. 9 will result in 16 flows.
+Same remark as for the imixs parameter: we will only use one element in the
+flows list. When more flows need to be tested, create a different test file and
+launch it using Xtesting.
+
+The drop_rate_threshold parameter specifies the maximum ratio of packets than
+can be dropped while still considering
+the test run as succesful. Note that a value of 0 means an absolute zero packet
+loss: even if we lose 1 packet during a certain step in a test run, it will be
+marked as failed.
+
+The lat_avg_threshold, lat_perc_threshold, lat_max_threshold parameters
+are thresholds to define
+the maximal acceptable round trip latency to mark the test step as successful.
+You can set this threshold for the average, the percentile and the maximum
+latency. Which percentile is being used is defined in the TestParameters section.
+All these thresholds are expressed in micro-seconds. You can also put the value
+to inf, which means the threshold will never be reached and hence the threshold
+value is not being used to define if the run is successful or not.
+
+The MAXr, MAXz, MAXFramesPerSecondAllIngress and StepSize parameters are defined in
+NFV-TST009_ and are used to control the binary search algorithm.
+
+The ramp_step variable controls the ramping of the generated traffic. When
+not specified, the requested traffic for each step in the testing will be
+applied immediately. If specified, the generator will slowly go to the requested
+speed by increasing the traffic each second with the value specified in this
+parameter till it reaches the requested speed. This parameter is expressed in
+100Mb/s.
+
+.. code-block:: console
+
+ pass_threshold=0.001
+ imixs=[[128, 256, 64, 64, 128]]
+ flows=[64]
+ drop_rate_threshold = 0
+ lat_avg_threshold = inf
+ lat_perc_threshold = inf
+ lat_max_threshold = inf
+ MAXr = 3
+ MAXz = 5000
+ MAXFramesPerSecondAllIngress = 12000000
+ StepSize = 10000
+ #ramp_step = 1
+
+Modifying the test
+------------------
+In case you want to modify the parameters as specified in
+TST009_Throughput_64B_64F.test_, it is best to create your own test file. Your
+test file will need to be uploaded to the test container. Hence you will have to
+rebuild your container, and add an extra copy command to the Dockerfile so that
+your new test file will be avaialble in the container.
+Then you will need to modify the testcases.yaml file. One of the args that you
+can specify is the test_file. Put your newly created test file as the new value
+for this argument.
+Now build and publish your test container as specified in steps 5 & 6 of the
+installation procedure.
+
+Note that other arguments than test_file can be specified in testcases.yaml. For
+a list of arugments, please check out the test_params dictionary in the
+rapid_defaults.py that you can find in rapid_location_.
+It is adviced not to change these parameters unless you have an in-depth
+knowledge of the code.
+The only 2 arguments that van be changed are the test_file which was already
+discussed and the runtime argument. This argument defines how long each test run
+will take and is expressed in seconds.
diff --git a/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst b/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
index 7ba25fe1..28da0ebd 100644
--- a/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
+++ b/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
@@ -17,6 +17,7 @@ The device under test (DUT) consists of a system following;
* Specific Intel Network Interface Cards (NICs)
* BIOS settings noting those that updated from the basic settings
* DPDK build configuration settings, and commands used for tests
+
Connected to the DUT is an IXIA* or Software Traffic generator like pktgen or TRex,
simulation platform to generate packet traffic to the DUT ports and
determine the throughput/latency at the tester side.
@@ -103,17 +104,16 @@ The connectivity could be
(TG_2(UDP_Replay) reflects all the traffic on the given port)
* Bare-Metal
- Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
+ Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
* Standalone Virtualization - PHY-VM-PHY
+
* SRIOV
- Refer below link to setup sriov
- https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
+ https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
* OVS_DPDK
- Refer below link to setup ovs-dpdk
- http://docs.openvswitch.org/en/latest/intro/install/general/
- http://docs.openvswitch.org/en/latest/intro/install/dpdk/
+ http://docs.openvswitch.org/en/latest/intro/install/general/
+ http://docs.openvswitch.org/en/latest/intro/install/dpdk/
* Openstack
Use any OPNFV installer to deploy the openstack.
@@ -132,19 +132,21 @@ Step 0: Preparing hardware connection
Step 1: Setting up Traffic generator (TRex)
TRex Software preparations
- **************************
* Install the OS (Bare metal Linux, not VM!)
* Obtain the latest TRex package: wget https://trex-tgn.cisco.com/trex/release/latest
* Untar the package: tar -xzf latest
* Change dir to unzipped TRex
* Create config file using command: sudo python dpdk_setup_ports.py -i
+
In case of Ubuntu 16 need python3
+
See paragraph config creation for detailed step-by-step
+
(Refer: https://trex-tgn.cisco.com/trex/doc/trex_stateless_bench.html)
Build SampleVNFs
------------------
+----------------
Step 2: Procedure to build SampleVNFs
@@ -487,7 +489,7 @@ step 4: Run Test using traffic geneator
UDP_Replay - How to run
-----------------------------------------
+-----------------------
Step 3: Bind the datapath ports to DPDK
@@ -532,7 +534,7 @@ step 4: Run Test using traffic geneator
For more details refer: https://trex-tgn.cisco.com/trex/doc/trex_stateless_bench.html
PROX - How to run
-------------------
+-----------------
Description
^^^^^^^^^^^
@@ -654,7 +656,7 @@ PROX COMMANDS AND SCREENS
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
| version | Show version | |
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
- | port_stats <port id> | Print rate for no_mbufs, ierrors, rx_bytes, tx_bytes, rx_pkts, | |
+ | port_stats <port id> | Print rate for no_mbufs, ierrors, rx_bytes, tx_bytes, rx_pkts, | |
| | tx_pkts and totals for RX, TX, no_mbufs ierrors for port <port id> | |
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
@@ -941,7 +943,7 @@ PROX Compiation installation
* cd samplevnf
* export RTE_SDK=`pwd`/dpdk
* export RTE_TARGET=x86_64-native-linuxapp-gcc
-* git clone http://dpdk.org/git/dpdk
+* git clone git://dpdk.org/dpdk
* cd dpdk
* git checkout v17.05
* make install T=$RTE_TARGET
diff --git a/docs/testing/user/userguide/06-How_to_use_REST_api.rst b/docs/testing/user/userguide/06-How_to_use_REST_api.rst
index b8c0cbea..ba768d78 100644
--- a/docs/testing/user/userguide/06-How_to_use_REST_api.rst
+++ b/docs/testing/user/userguide/06-How_to_use_REST_api.rst
@@ -3,12 +3,12 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) opnfv, national center of scientific research "demokritos" and others.
-========================================================
+========
REST API
-========================================================
+========
Introduction
----------------
+------------
As the internet industry progresses creating REST API becomes more concrete
with emerging best Practices. RESTful web services don’t follow a prescribed
standard except fpr the protocol that is used which is HTTP, its important
@@ -26,7 +26,7 @@ Here are important points to be considered:
always same no matter how many times these operations are invoked.
* PUT and POST operation are nearly same with the difference lying
only in the result where PUT operation is idempotent and POST
- operation can cause different result.
+ operation can cause different result.
REST API in SampleVNF
@@ -45,7 +45,7 @@ REST api on VNF’s will help adapting with the new automation techniques
being adapted in yardstick.
Web server integration with VNF’s
-----------------------------------
+---------------------------------
In order to implement REST api’s in VNF one of the first task is to
identify a simple web server that needs to be integrated with VNF’s.
@@ -150,7 +150,7 @@ API Usage
---------
Run time Usage
-^^^^^^^^^^^^^^
+==============
An application(say vFW) with REST API support is run as follows
with just PORT MASK as input. The following environment variables
@@ -182,6 +182,7 @@ samplevnf directory).
2. Check the Link IP's using the REST API (vCGNAPT/vACL/vFW)
::
+
e.g curl <IP>/vnf/config/link
This would indicate the number of links enabled. You should enable all the links
@@ -194,6 +195,7 @@ samplevnf directory).
3. Now that links are enabled we can configure IP's using link method as follows (vCGNAPT/vACL/vFW)
::
+
e.g curl -X POST -H "Content-Type:application/json" -d '{"ipv4":"<IP to be configured>","depth":"24"}'
http://<IP>/vnf/config/link/0
curl -X POST -H "Content-Type:application/json" -d '{"ipv4":"IP to be configured","depth":"24"}'
@@ -207,6 +209,7 @@ samplevnf directory).
4. Adding arp entries we can use this method (vCGNAPT/vACL/vFW)
::
+
/vnf/config/arp
e.g
@@ -220,15 +223,17 @@ samplevnf directory).
5. Adding route entries we can use this method (vCGNAPT/vACL/vFW)
::
+
/vnf/config/route
e.g curl -X POST -H "Content-Type:application/json" -d '{"type":"net", "depth":"8", "nhipv4":"202.16.100.20",
- "portid":"0"}' http://10.223.166.240/vnf/config/route
+ "portid":"0"}' http://10.223.166.240/vnf/config/route
curl -X POST -H "Content-Type:application/json" -d '{"type":"net", "depth":8", "nhipv4":"172.16.100.20",
"portid":"1"}' http://10.223.166.240/vnf/config/route
5. In order to load the rules a script file needs to be posting a script.(vACL/vFW)
::
+
/vnf/config/rules/load
Typical example for loading a script file is shown below
@@ -239,12 +244,14 @@ samplevnf directory).
6. The following REST api's for runtime configuring through a script (vCGNAPT Only)
::
+
/vnf/config/rules/clear
/vnf/config/nat
/vnf/config/nat/load
7. For debug purpose following REST API's could be used as described above.(vCGNAPT/vACL/vFW)
::
+
/vnf/dbg
e.g curl http://10.223.166.240/vnf/config/dbg
@@ -258,10 +265,12 @@ samplevnf directory).
8. For stats we can use the following method (vCGNAPT/vACL/vFW)
::
+
/vnf/stats
e.g curl <IP>/vnf/stats
9. For quittiong the application (vCGNAPT/vACL/vFW)
::
+
/vnf/quit
e.g curl <IP>/vnf/quit
diff --git a/docs/testing/user/userguide/07-Config_files.rst b/docs/testing/user/userguide/07-Config_files.rst
index d5564e8d..f96462e1 100644
--- a/docs/testing/user/userguide/07-Config_files.rst
+++ b/docs/testing/user/userguide/07-Config_files.rst
@@ -380,7 +380,7 @@ This configuration doesn't require LOADB and TXRX pipelines
vACL Config files
-----------------
+-----------------
The reference configuration files explained here are for Software and Hardware
loadbalancing with IPv4 traffic type and single port pair.
diff --git a/docs/testing/user/userguide/images/rapid.png b/docs/testing/user/userguide/images/rapid.png
new file mode 100644
index 00000000..1c9b05bd
--- /dev/null
+++ b/docs/testing/user/userguide/images/rapid.png
Binary files differ
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index 8d797627..5cc2c5e1 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -10,15 +10,8 @@ SampleVNF User Guide
.. toctree::
:maxdepth: 4
- :numbered:
- 01-introduction
- 02-methodology
- 03-architecture
- 04-installation
- 05-How_to_run_SampleVNFs
- 06-How_to_use_REST_api
- 07-Config_files
- 08-CLI_Commands_Reference
- glossary
- references
+ 01-introduction.rst
+ 02-methodology.rst
+ 03-installation.rst
+ 04-running_the_test.rst
diff --git a/docs/testing/user/userguide/references.rst b/docs/testing/user/userguide/references.rst
index 30f6e604..f00a872c 100644
--- a/docs/testing/user/userguide/references.rst
+++ b/docs/testing/user/userguide/references.rst
@@ -11,8 +11,8 @@ References
OPNFV
=====
-* Yardstick wiki: https://wiki.opnfv.org/yardstick
-* SampleVNF wiki: https://wiki.opnfv.org/samplevnf
+* Yardstick wiki: https://wiki-old.opnfv.org/display/yardstick
+* SampleVNF wiki: https://wiki-old.opnfv.org/display/SAM
References used in Test Cases
=============================
@@ -22,7 +22,7 @@ References used in Test Cases
* DPDK: http://dpdk.org
* DPDK supported NICs: http://dpdk.org/doc/nics
* fdisk: http://www.tldp.org/HOWTO/Partition/fdisk_partitioning.html
-* fio: http://www.bluestop.org/fio/HOWTO.txt
+* fio: https://github.com/axboe/fio
* free: http://manpages.ubuntu.com/manpages/trusty/en/man1/free.1.html
* iperf3: https://iperf.fr/
* Lmbench man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
diff --git a/rapidvm/README.rst b/rapidvm/README.rst
new file mode 100644
index 00000000..9ab02f10
--- /dev/null
+++ b/rapidvm/README.rst
@@ -0,0 +1,38 @@
+RAPID VM IMAGE
+++++++++++++++
+
+This repo will build a centos 7 image with dpdk and prox installed.
+Optimizations for dpdk will also be done.
+
+BUILD INSTRUCTIONS
+==================
+
+Build the image
+---------------
+- cd dib
+- update the version number for the image (if needed) by modifying __version__ in build-image.sh
+- setup your http_proxy if needed
+- bash build-image.sh
+
+IMAGE INSTANCE AND CONFIG
+=========================
+
+VM Requirements
+---------------
+The instance must be launched with:
+- 1 network interface for the management network
+- at least 1 interface for the dataplane networks
+- at least 4 vCPUs
+- 4 GB RAM
+- cpu pinning set to exclusive
+
+Auto-configuration
+------------------
+The rapid scripts will configure the prox instances and drive the testing.
+
+
+Hardcoded Username and Password
+--------------------------------
+In case of problems, you can ssh into the VM:
+- Username: rapid
+- Password: rapid
diff --git a/rapidvm/dib/build-image.sh b/rapidvm/dib/build-image.sh
new file mode 100755
index 00000000..23fe17ca
--- /dev/null
+++ b/rapidvm/dib/build-image.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A shell script to build the PROX VM image using diskimage-builder
+#
+usage() {
+ echo "Usage: $0 [-i image_name] [-g gs-url] [-v]"
+ echo " -i image_appendix image name to be pushed to google storage)"
+ echo " -g gs_url url to store the image"
+ echo " -v verify only (build but do not push to google storage)"
+ echo " -w cache cache directory for disk-image-create"
+ exit 1
+}
+
+# set -e
+#default values
+image_appendix="test"
+workspace="/home/jenkins-ci/opnfv/slave_root/workspace"
+gs_url="artifacts.opnfv.org/samplevnf/images"
+verify_only=0
+while getopts i:g:vw: flag
+do
+ case "${flag}" in
+ i) image_appendix=${OPTARG};;
+ g) gs_url=${OPTARG};;
+ v) verify_only=1;;
+ w) workspace=${OPTARG};;
+ *) usage;exit 1;;
+ esac
+done
+echo "gs_url: $gs_url";
+echo "Verify only: $verify_only";
+image_name=rapid-${image_appendix}
+echo "image name: $image_name.qcow2"
+echo "workspace: $workspace"
+
+ install diskimage-builder
+python3 -m venv dib-rapid-venv
+. dib-rapid-venv/bin/activate
+pip3 install --upgrade pip
+pip3 install six
+pip3 install diskimage-builder
+pip3 install gsutil
+
+echo "Checking if image exists in google storage..."
+if command -v gsutil >/dev/null; then
+ if gsutil -q stat gs://$gs_url/$image_name.qcow2; then
+ echo "Image already exists at http://$gs_url/$image_name.qcow2"
+ fi
+ echo "Starting build..."
+ echo
+else
+ echo "Cannot check image availability in OPNFV artifact repository (gsutil not available)"
+fi
+
+# Add rapid elements directory to the DIB elements path
+export ELEMENTS_PATH=`pwd`/elements
+# canned user/password for direct login
+export DIB_DEV_USER_USERNAME=prox
+export DIB_DEV_USER_PASSWORD=prox
+export DIB_DEV_USER_PWDLESS_SUDO=Y
+# Set the data sources to have ConfigDrive only
+export DIB_CLOUD_INIT_DATASOURCES="Ec2, ConfigDrive, OpenStack"
+# Use ELRepo to have latest kernel
+export DIB_USE_ELREPO_KERNEL=True
+echo "Building $image_name.qcow2..."
+cache=$workspace/cache
+mkdir $cache
+time disk-image-create -o $image_name --image-cache $cache centos7 cloud-init rapid vm
+
+ls -l $image_name.qcow2
+
+
+if [ $verify_only -eq 1 ]; then
+ echo "Image verification SUCCESS"
+ echo "NO upload to google storage (-v)"
+else
+ if command -v gsutil >/dev/null; then
+ echo "Uploading $image_name.qcow2..."
+ gsutil cp $image_name.qcow2 gs://$gs_url/$image_name.qcow2
+ echo "You can access image at http://$gs_url/$image_name.qcow2"
+ else
+ echo "Cannot upload new image to the OPNFV artifact repository (gsutil not available)"
+ exit 1
+ fi
+fi
+deactivate
+rm -r dib-rapid-venv
diff --git a/rapidvm/dib/elements/rapid/element-deps b/rapidvm/dib/elements/rapid/element-deps
new file mode 100644
index 00000000..c6be0aa3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/element-deps
@@ -0,0 +1,5 @@
+vm
+cloud-init-datasources
+install-static
+package-installs
+devuser
diff --git a/rapidvm/dib/elements/rapid/package-installs.yaml b/rapidvm/dib/elements/rapid/package-installs.yaml
new file mode 100644
index 00000000..8b3a3cf3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/package-installs.yaml
@@ -0,0 +1,20 @@
+deltarpm:
+yum-utils:
+git:
+wget:
+gcc:
+unzip:
+libpcap-devel:
+ncurses-devel:
+libedit-devel:
+lua-devel:
+kernel-devel:
+iperf3:
+pciutils:
+numactl-devel:
+vim:
+tuna:
+openssl-devel:
+wireshark:
+make:
+driverctl:
diff --git a/rapidvm/dib/elements/rapid/post-install.d/40-mlib b/rapidvm/dib/elements/rapid/post-install.d/40-mlib
new file mode 100755
index 00000000..34dc1b9c
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/40-mlib
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+MULTI_BUFFER_LIB_VER="0.52"
+BUILD_DIR="/opt/rapid"
+export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+# Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+pushd ${BUILD_DIR} > /dev/null 2>&1
+wget https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+unzip v${MULTI_BUFFER_LIB_VER}.zip
+pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+make -j`getconf _NPROCESSORS_ONLN`
+make install
+popd > /dev/null 2>&1
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk b/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk
new file mode 100755
index 00000000..6a7fdf36
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pick up the kernel version for the target image
+BUILD_DIR="/opt/rapid"
+export RTE_SDK="${BUILD_DIR}/dpdk"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+pushd ${RTE_SDK} > /dev/null 2>&1
+make config T=${RTE_TARGET}
+# Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+# Uncomment the sed command to enable the driver compilation
+sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/CONFIG_RTE_EAL_IGB_UIO=y/g' ${RTE_SDK}/build/.config
+#sed -i 's/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_TEST=y/CONFIG_RTE_APP_TEST=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_TEST_PMD=y/CONFIG_RTE_TEST_PMD=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_TEST_BBDEV=y/CONFIG_RTE_TEST_BBDEV=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_COMPRESS_PERF=y/CONFIG_RTE_APP_COMPRESS_PERF=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_CRYPTO_PERF=y/CONFIG_RTE_APP_CRYPTO_PERF=n/g' ${RTE_SDK}/build/.config
+#sed -i 's/CONFIG_RTE_APP_EVENTDEV=y/CONFIG_RTE_APP_EVENTDEV=n/g' ${RTE_SDK}/build/.config
+make -j`getconf _NPROCESSORS_ONLN`
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox b/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox
new file mode 100755
index 00000000..ebb87fd8
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+export RTE_SDK="${BUILD_DIR}/dpdk"
+export RTE_TARGET="build"
+pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX > /dev/null 2>&1
+make -j`getconf _NPROCESSORS_ONLN`
+cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+cp helper-scripts/rapid/check_prox_system_setup.sh ${BUILD_DIR}
+cp helper-scripts/rapid/check-prox-system-setup.service ${BUILD_DIR}
+cp helper-scripts/rapid/sharkproxlog.sh ${BUILD_DIR}
+cp helper-scripts/rapid/deploycentostools.sh ${BUILD_DIR}
+cp helper-scripts/rapid/rapid_rsa_key.pub ${BUILD_DIR}
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg b/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg
new file mode 100755
index 00000000..5171a32b
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+# huge pages to be used by DPDK
+sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+# Enabling tuned with the realtime-virtual-guest profile
+pushd ${BUILD_DIR} > /dev/null 2>&1
+wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm
+wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm
+# Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+# do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm --nodeps
+rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm --nodeps
+# Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+# isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+# When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+# and update the realtime-virtual-guest-variables.conf accordingly.
+echo "isolated_cores=1-3" | tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+# The actual tuned-adm profile is now done in check_prox_system_setup.sh and is started through
+# the check-prox-system-setup.service. This will happen when the system is booting.
+
+# Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+# assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+# systemctl daemon-reload, will be skipped when building image with disk-image-builder. That is OK
+systemctl daemon-reload
+systemctl enable check-prox-system-setup.service
+# Add the default rapid key as an authorized key for the rapid user
+cat ${BUILD_DIR}/rapid_rsa_key.pub >> /home/rapid/.ssh/authorized_keys
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions b/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions
new file mode 100755
index 00000000..86368431
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+chmod ugo+rwx ${BUILD_DIR}
diff --git a/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms b/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms
new file mode 100755
index 00000000..0fc166e3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+rm ${BUILD_DIR}/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm
+rm ${BUILD_DIR}/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm
+rm ${BUILD_DIR}/nasm-2.14.02-0.fc27.x86_64.rpm
diff --git a/rapidvm/dib/elements/rapid/source-repository-dpdk b/rapidvm/dib/elements/rapid/source-repository-dpdk
new file mode 100644
index 00000000..ce19a904
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/source-repository-dpdk
@@ -0,0 +1 @@
+dpdk tar /opt/rapid/dpdk http://fast.dpdk.org/rel/dpdk-20.05.tar.gz *
diff --git a/rapidvm/dib/elements/rapid/source-repository-samplevnf b/rapidvm/dib/elements/rapid/source-repository-samplevnf
new file mode 100644
index 00000000..80331875
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/source-repository-samplevnf
@@ -0,0 +1 @@
+samplevnf git /opt/rapid/samplevnf https://git.opnfv.org/samplevnf
diff --git a/tox.ini b/tox.ini
index 69aa1893..840ce6a3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,6 +6,7 @@ envlist =
skipsdist = true
[testenv:docs]
+basepython = python3
deps = -rdocs/requirements.txt
commands =
sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
@@ -13,5 +14,6 @@ commands =
whitelist_externals = echo
[testenv:docs-linkcheck]
+basepython = python3
deps = -rdocs/requirements.txt
commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck