From 83c4a7885d7c32901ff7335bdbef9d8d9f4417d7 Mon Sep 17 00:00:00 2001 From: fmenguy Date: Mon, 16 Nov 2020 17:01:33 +0100 Subject: NFVBENCH-196: New NFVbench image for generator part (nfvbench and TRex codes inside VM) Change-Id: Id919a1b9dbf5e04b198d022432ed5c64232a27ba Signed-off-by: fmenguy --- nfvbenchvm/README.rst | 202 +++++++++++++++- nfvbenchvm/dib/build-image.sh | 191 +++++++++------ .../nfvbenchvm/finalise.d/51-add-cpu-isolation | 5 +- .../dib/elements/nfvbenchvm/package-installs.yaml | 7 +- .../nfvbenchvm/post-install.d/02-testpmd-script | 6 +- .../post-install.d/03-add-execute-attribute | 4 - .../nfvbenchvm/post-install.d/03-copy-rc-local | 8 + .../post-install.d/04-add-execute-attribute | 7 + .../nfvbenchvm/post-install.d/50-pip-package | 6 +- .../nfvbenchvm/post-install.d/51-cloudcfg-edit | 3 + .../nfvbenchvm/post-install.d/52-nfvbench-script | 31 +++ .../nfvbenchvm/post-install.d/53-sshd-script | 4 + .../elements/nfvbenchvm/post-install.d/99-cleanup | 2 +- .../nfvbenchvm/static/etc/modprobe.d/vfio.conf | 1 + .../static/etc/modules-load.d/vfio-pci.conf | 1 + .../nfvbenchvm/static/etc/profile.d/nfvbench.sh | 1 + .../elements/nfvbenchvm/static/etc/rc.d/rc.local | 249 ------------------- .../nfvbenchvm/static/etc/rc.d/rc.local.generator | 109 +++++++++ .../nfvbenchvm/static/etc/rc.d/rc.local.loopvm | 247 +++++++++++++++++++ .../static/etc/systemd/system/nfvbench.service | 12 + .../static/nfvbench/configure-nfvbench.sh | 263 +++++++++++++++++++++ .../nfvbenchvm/static/nfvbench/nfvbench.conf | 25 ++ .../nfvbenchvm/static/nfvbench/start-nfvbench.sh | 51 ++++ 23 files changed, 1095 insertions(+), 340 deletions(-) delete mode 100755 nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-add-execute-attribute create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-copy-rc-local create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/04-add-execute-attribute create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/52-nfvbench-script create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/53-sshd-script create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh delete mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf create mode 100644 nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh (limited to 'nfvbenchvm') diff --git a/nfvbenchvm/README.rst b/nfvbenchvm/README.rst index 1bf0bbf..2e12bf4 100644 --- a/nfvbenchvm/README.rst +++ b/nfvbenchvm/README.rst @@ -1,7 +1,9 @@ -NFVBENCH VM IMAGE FOR OPENSTACK -+++++++++++++++++++++++++++++++ +NFVBENCH VM IMAGES FOR OPENSTACK +++++++++++++++++++++++++++++++++ -This repo will build a centos 7 image with testpmd and VPP installed. +This repo will build two centos 7 images with: + - testpmd and VPP installed for loop VM use case + - NFVbench and TRex installed for generator VM use case The VM will come with a pre-canned user/password: nfvbench/nfvbench BUILD INSTRUCTIONS @@ -20,10 +22,15 @@ Build the image - cd dib - update the version number for the image (if needed) by modifying __version__ in build-image.sh - setup your http_proxy if needed -- bash build-image.sh +- to build loop VM image only: + - `bash build-image.sh -l` +- to build generator VM image only: + - `bash build-image.sh -g` +- to build both images only: + - `bash build-image.sh` -IMAGE INSTANCE AND CONFIG -========================= +LOOP VM IMAGE INSTANCE AND CONFIG +================================= Interface Requirements ---------------------- @@ -84,3 +91,186 @@ Hardcoded Username and Password - Username: nfvbench - Password: nfvbench + +GENERATOR IMAGE INSTANCE AND CONFIG +=================================== + +Interface Requirements +---------------------- +The instance must be launched using OpenStack with 2 network interfaces for dataplane traffic (using SR-IOV function) and 1 management interface to control nfvbench. +For best performance, it should use network interfaces for dataplane traffic with a `vnic_type` to `direct-physical` (or `direct` if physical function is not possible) +and a flavor with: + +- 6 vCPU +- 8 GB RAM +- cpu pinning set to exclusive + +.. note:: For the management interface: any interface type can be used. This interface required a routable IP (through floating IP or direct) and an access to the openstack APIs. +.. note:: CPU pinning: 1 core dedicated for guest OS and NFVbench process, other provided cores are used by TRex + +Template of a genarator profile using CPU pinning: + +.. code-block:: bash + + generator_profile: + - name: {{name}} + tool: {{tool}} + ip: {{ip}} + zmq_pub_port: {{zmq_pub_port}} + zmq_rpc_port: {{zmq_rpc_port}} + software_mode: {{software_mode}} + cores: {{CORES}} + platform: + master_thread_id: '0' + latency_thread_id: '1' + dual_if: + - socket: 0 + threads: [{{CORE_THREADS}}] + + interfaces: + - port: 0 + pci: "{{PCI_ADDRESS_1}}" + switch: + - port: 1 + pci: "{{PCI_ADDRESS_2}}" + switch: + intf_speed: +.. note:: `CORE_THREADS` value is determined automatically based on the cores available on the VM starting from 2 to last worker core available. + +Auto-configuration +------------------ +nfvbench VM will automatically find the two virtual interfaces to use for dataplane based on MAC addresses or openstack port name (see config part below). +This applies to the management interface as well. + +nfvbenchvm Config +----------------- +nfvbenchvm config file is located at ``/etc/nfvbenchvm.conf``. + +Example of configuration: + +.. code-block:: bash + + ACTION=e2e + LOOPBACK_INTF_MAC1=FA:16:3E:A2:30:41 + LOOPBACK_INTF_MAC2=FA:16:3E:10:DA:10 + E2E_INTF_MAC1=FA:16:3E:B0:E2:43 + E2E_INTF_MAC2=FA:16:3E:D3:6A:FC +.. note:: `ACTION` parameter is not mandatory but will permit to start NFVbench with the accurate ports (loopback or e2e). +.. note:: Set of MAC parameters cannot be used in parallel as only one NFVbench/TRex process is running. +.. note:: Switching from `loopback` to `e2e` action can be done manually using `/nfvbench/start-nfvbench.sh ` with the accurate keyword for `action` parameter. This script will restart NFVbench with the good set of MAC. + +nfvbenchvm config file with management interface: + +.. code-block:: bash + + ACTION=e2e + LOOPBACK_INTF_MAC1=FA:16:3E:A2:30:41 + LOOPBACK_INTF_MAC2=FA:16:3E:10:DA:10 + INTF_MAC_MGMT=FA:16:3E:06:11:8A + INTF_MGMT_CIDR=172.20.56.228/2 + INTF_MGMT_IP_GW=172.20.56.225 + +.. note:: `INTF_MGMT_IP_GW` and `INTF_MGMT_CIDR` parameters are used by the VM to automatically configure virtual interface and route to allow an external access through SSH. + + +Using pre-created direct-physical ports on openstack, mac addresses value are only known when VM is deployed. In this case, you can pass the port name in config: + +.. code-block:: bash + + LOOPBACK_PORT_NAME1=nfvbench-pf1 + LOOPBACK_PORT_NAME2=nfvbench-pf2 + E2E_PORT_NAME1=nfvbench-pf1 + E2E_PORT_NAME1=nfvbench-pf3 + INTF_MAC_MGMT=FA:16:3E:06:11:8A + INTF_MGMT_CIDR=172.20.56.228/2 + INTF_MGMT_IP_GW=172.20.56.225 + DNS_SERVERS=8.8.8.8,dns.server.com +.. note:: A management interface is required to automatically find the virtual interface to use according to the MAC address provided (see `INTF_MAC_MGMT` parameter). +.. note:: NFVbench VM will call openstack API through the management interface to retrieve mac address for these ports +.. note:: If openstack API required a host name resolution, add the parameter DNS_SERVERS to add IP or DNS server names (multiple servers can be added separated by a `,`) + +Control nfvbenchvm VM and run test +---------------------------------- + +By default, NFVbench will be started in server mode (`--server`) and will act as an API. + +NFVbench VM will be accessible through SSH or HTTP using the management interface IP. + +NFVbench API endpoint is : `http://:` +.. note:: by default port value is 7555 + +Get NFVbench status +^^^^^^^^^^^^^^^^^^^ + +To check NFVbench is up and running use REST request: + +.. code-block:: bash + +curl -XGET ':/status' + +Example of answer: + +.. code-block:: bash + + { + "error_message": "nfvbench run still pending", + "status": "PENDING" + } + +Start NFVbench test +^^^^^^^^^^^^^^^^^^^ + +To start a test run using NFVbench API use this type of REST request: + +.. code-block:: bash + +curl -XPOST ':/start_run' -H "Content-Type: application/json" -d @nfvbenchconfig.json + +Example of return when the submission is successful: + +.. code-block:: bash + + { + "error_message": "NFVbench run still pending", + "request_id": "42cccb7effdc43caa47f722f0ca8ec96", + "status": "PENDING" + } + +Connect to the VM using SSH keypair +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If a key is provided at VM creation you can use it to log on the VM using `cloud-user` username: + +.. code-block:: bash + + ssh -i key.pem cloud-user@ + + +Connect to VM using SSH username/password +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +VM is accessible over SSH using the hardcoded username and password (see below): + +.. code-block:: bash + + ssh nfvbench@ + + +Launching nfvbenchvm VM +----------------------- + +Normally this image will be deployed using Ansible role, and the required configurations will be automatically generated and pushed to VM by Ansible. +If launched manually, users will have the full control to configure and run NFVbench via VNC console. + +To check if NFVbench is running, you can run this command in VNC console: + +.. code-block:: bash + + sudo screen -r nfvbench + + +Hardcoded Username and Password +-------------------------------- +- Username: nfvbench +- Password: nfvbench + diff --git a/nfvbenchvm/dib/build-image.sh b/nfvbenchvm/dib/build-image.sh index 87c1169..e4b468a 100755 --- a/nfvbenchvm/dib/build-image.sh +++ b/nfvbenchvm/dib/build-image.sh @@ -1,100 +1,149 @@ #!/bin/bash # -# A shell script to build the VPP VM image using diskinage-builder +# A shell script to build the VPP VM image or NFVbench+TRex VM image using diskinage-builder # # The following packages must be installed prior to using this script: # sudo apt-get -y install python-virtualenv qemu-utils kpartx usage() { - echo "Usage: $0 [-v]" + echo "Usage: $0 [-l] [-g] [-v]" + echo " -l build NFVbench loop VM image" + echo " -g build NFVbench generator image" echo " -v verify only (build but do not push to google storage)" exit 1 } -# Takes only 1 optional argument -if [ $# -gt 1 ]; then - usage -fi verify_only=0 +generator_only=0 +loopvm_only=0 +__prefix__="" +# ---------------------------------------------------------------------------- +# Parse command line options and configure the script +# ---------------------------------------------------------------------------- + +while getopts ":hglv" opt; do + case $opt in + h) + usage + exit 0 + ;; + g) + generator_only=1 + ;; + l) + loopvm_only=1 + ;; + v) + verify_only=1 + ;; + ?) + usage + exit 1 + ;; + esac +done -if [ $# -eq 1 ]; then - if [ $1 = "-v" ]; then - verify_only=1 - else - usage - fi -fi set -e # Artifact URL gs_url=artifacts.opnfv.org/nfvbench/images # image version number -__version__=0.12 -image_name=nfvbenchvm_centos-$__version__ - -# if image exists skip building -echo "Checking if image exists in google storage..." -if command -v gsutil >/dev/null; then - if gsutil -q stat gs://$gs_url/$image_name.qcow2; then - echo "Image already exists at http://$gs_url/$image_name.qcow2" - echo "Build is skipped" - exit 0 - fi - echo "Image does not exist in google storage, starting build..." - echo -else - echo "Cannot check image availability in OPNFV artifact repository (gsutil not available)" -fi - -# check if image is already built locally -if [ -f $image_name.qcow2 ]; then - echo "Image $image_name.qcow2 already exists locally" -else - - # install diskimage-builder - if [ -d dib-venv ]; then - . dib-venv/bin/activate +__version__=0.13 +loopvm_image_name=nfvbenchvm_centos-$__version__ +generator_image_name=nfvbenchvm_centos-generator-$__version__ + +function build_image { + # if image exists skip building + echo "Checking if image exists in google storage..." + if command -v gsutil >/dev/null; then + if gsutil -q stat gs://$gs_url/$1.qcow2; then + echo "Image already exists at http://$gs_url/$1.qcow2" + echo "Build is skipped" + exit 0 + fi + echo "Image does not exist in google storage, starting build..." + echo else - virtualenv dib-venv - . dib-venv/bin/activate - pip install diskimage-builder + echo "Cannot check image availability in OPNFV artifact repository (gsutil not available)" fi - # Add nfvbenchvm_centos elements directory to the DIB elements path - export ELEMENTS_PATH=`pwd`/elements - - # canned user/password for direct login - export DIB_DEV_USER_USERNAME=nfvbench - export DIB_DEV_USER_PASSWORD=nfvbench - export DIB_DEV_USER_PWDLESS_SUDO=Y - - # Set the data sources to have ConfigDrive only - export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" - - # Configure VPP REPO - export DIB_YUM_REPO_CONF=$ELEMENTS_PATH/nfvbenchvm/fdio-release.repo + # check if image is already built locally + if [ -f $1.qcow2 ]; then + echo "Image $1.qcow2 already exists locally" + else - # Use ELRepo to have latest kernel - export DIB_USE_ELREPO_KERNEL=True + # install diskimage-builder + if [ -d dib-venv ]; then + . dib-venv/bin/activate + else + virtualenv dib-venv + . dib-venv/bin/activate + pip install diskimage-builder + fi + + # Add nfvbenchvm_centos elements directory to the DIB elements path + export ELEMENTS_PATH=`pwd`/elements + + # canned user/password for direct login + export DIB_DEV_USER_USERNAME=nfvbench + export DIB_DEV_USER_PASSWORD=nfvbench + export DIB_DEV_USER_PWDLESS_SUDO=Y + + # Set the data sources to have ConfigDrive only + export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" + + # Configure VPP REPO + export DIB_YUM_REPO_CONF=$ELEMENTS_PATH/nfvbenchvm/fdio-release.repo + + # Use ELRepo to have latest kernel + # only for loop vm image + if [ $1 = $loopvm_image_name ]; then + export DIB_USE_ELREPO_KERNEL=True + export DIB_DEV_IMAGE=loopvm + else + export DIB_USE_ELREPO_KERNEL=False + export DIB_DEV_IMAGE=generator + # get current git branch to build image with current code + export GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + # retrieve TREX_VER from Dockerfile + export TREX_VER=$(awk '/ENV TREX_VER/ {print $3}' ../../docker/Dockerfile | sed 's/"//g' | sed 's/\r//g') + fi + + echo "Building $1.qcow2..." + time disk-image-create -o $1 centos7 nfvbenchvm + fi - echo "Building $image_name.qcow2..." - time disk-image-create -o $image_name centos7 nfvbenchvm -fi + ls -l $1.qcow2 -ls -l $image_name.qcow2 + if [ $verify_only -eq 1 ]; then + echo "Image verification SUCCESS" + echo "NO upload to google storage (-v)" + else + if command -v gsutil >/dev/null; then + echo "Uploading $1.qcow2..." + gsutil cp $1.qcow2 gs://$gs_url/$1.qcow2 + echo "You can access to image at http://$gs_url/$1.qcow2" + else + echo "Cannot upload new image to the OPNFV artifact repository (gsutil not available)" + exit 1 + fi + fi +} -if [ $verify_only -eq 1 ]; then - echo "Image verification SUCCESS" - echo "NO upload to google storage (-v)" +if [ ! $generator_only -eq 1 ] && [ ! $loopvm_only -eq 1 ]; then + echo "Build loop VM image" + build_image $loopvm_image_name + echo "Build generator image" + build_image $generator_image_name else - if command -v gsutil >/dev/null; then - echo "Uploading $image_name.qcow2..." - gsutil cp $image_name.qcow2 gs://$gs_url/$image_name.qcow2 - echo "You can access to image at http://$gs_url/$image_name.qcow2" - else - echo "Cannot upload new image to the OPNFV artifact repository (gsutil not available)" - exit 1 + if [ $loopvm_only -eq 1 ]; then + echo "Build loop VM image" + build_image $loopvm_image_name + fi + if [ $generator_only -eq 1 ]; then + echo "Build generator image" + build_image $generator_image_name fi -fi +fi \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/51-add-cpu-isolation b/nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/51-add-cpu-isolation index f81485d..12ffcdc 100755 --- a/nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/51-add-cpu-isolation +++ b/nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/51-add-cpu-isolation @@ -1,4 +1,5 @@ #!/bin/bash -grubby --update-kernel=ALL --args="isolcpus=1 rcu_nocbs=1 nohz=on nohz_full=1 nmi_watchdog=0" -grubby --update-kernel=ALL --args="default_hugepagesz=1G hugepagesz=1G hugepages=2" +grubby --update-kernel=ALL --args="isolcpus=1-7 rcu_nocbs=1 nohz=on nohz_full=1 nmi_watchdog=0" +grubby --update-kernel=ALL --args="default_hugepagesz=1G hugepagesz=1G hugepages=4" +grubby --update-kernel=ALL --args="intel_iommu=on iommu=pt" diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml b/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml index a5868fa..60efec1 100644 --- a/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml +++ b/nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml @@ -4,7 +4,6 @@ tuna: wget: screen: telnet: -python-devel: libyaml-devel: numactl-libs: numactl-devel: @@ -14,4 +13,8 @@ vpp-config: kernel-firmware: kernel-headers: kernel-devel: -openssh-server: \ No newline at end of file +openssh-server: +dpdk-tools: +git: +python3-dev: +libpython3.6-dev: \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/02-testpmd-script b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/02-testpmd-script index 6f133d1..f18ed60 100755 --- a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/02-testpmd-script +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/02-testpmd-script @@ -1,5 +1,9 @@ #!/bin/bash +if [ $DIB_DEV_IMAGE != "loopvm" ]; then + exit 0 +fi + DPDK=dpdk-18.02 DPDK_UNTAR=dpdk-18.02 @@ -15,8 +19,6 @@ export RTE_KERNELDIR=/lib/modules/$kernel_version/build export ARCH=x86 make -j4 install T=x86_64-native-linuxapp-gcc -cp usertools/dpdk-devbind.py ../dpdk -# cp tools/dpdk_nic_bind.py ../dpdk/dpdk-devbind.py cp x86_64-native-linuxapp-gcc/app/testpmd ../dpdk cp x86_64-native-linuxapp-gcc/kmod/igb_uio.ko ../dpdk echo "set promisc all off" > /dpdk/testpmd_cmd.txt diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-add-execute-attribute b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-add-execute-attribute deleted file mode 100755 index a13ab95..0000000 --- a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-add-execute-attribute +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -chmod +x /etc/rc.d/rc.local -chmod +x /etc/sysconfig/network-scripts/ifcfg-eth0 diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-copy-rc-local b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-copy-rc-local new file mode 100644 index 0000000..3311530 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-copy-rc-local @@ -0,0 +1,8 @@ +#!/bin/bash + +# set accurate rc.local file corresponding to current image built +if [ $DIB_DEV_IMAGE = "loopvm" ]; then + mv /etc/rc.d/rc.local.loopvm /etc/rc.d/rc.local +else + mv /etc/rc.d/rc.local.generator /etc/rc.d/rc.local +fi \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/04-add-execute-attribute b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/04-add-execute-attribute new file mode 100644 index 0000000..666d9dc --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/04-add-execute-attribute @@ -0,0 +1,7 @@ +#!/bin/bash + +chmod +x /etc/rc.d/rc.local +chmod +x /etc/sysconfig/network-scripts/ifcfg-eth0 +chmod +x /etc/profile.d/nfvbench.sh +chmod +x /nfvbench/configure-nfvbench.sh +chmod +x /nfvbench/start-nfvbench.sh diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/50-pip-package b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/50-pip-package index 2d74ff5..9707841 100755 --- a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/50-pip-package +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/50-pip-package @@ -2,7 +2,7 @@ cd /tmp wget https://bootstrap.pypa.io/get-pip.py -python get-pip.py +python3 get-pip.py -pip install setuptools wheel -pip install pyyaml +pip3 install setuptools wheel pbr +pip3 install pyyaml diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/51-cloudcfg-edit b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/51-cloudcfg-edit index dc51030..1a0dec9 100755 --- a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/51-cloudcfg-edit +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/51-cloudcfg-edit @@ -7,6 +7,9 @@ user = "cloud-user" with open(cloudcfg) as f: cfg = yaml.safe_load(f) +# allow SSH password auth +cfg['ssh_pwauth'] = "1" + try: if cfg['system_info']['default_user']['name']: synver = "2" diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/52-nfvbench-script b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/52-nfvbench-script new file mode 100644 index 0000000..9162952 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/52-nfvbench-script @@ -0,0 +1,31 @@ +#!/bin/bash + +if [ $DIB_DEV_IMAGE != "generator" ]; then + exit 0 +fi + +# TRex installation +mkdir -p /opt/trex +mkdir /var/log/nfvbench + +wget --no-cache --no-check-certificate https://trex-tgn.cisco.com/trex/release/$TREX_VER.tar.gz +tar xzf $TREX_VER.tar.gz -C /opt/trex +rm -f /$TREX_VER.tar.gz +rm -f /opt/trex/$TREX_VER/trex_client_$TREX_VER.tar.gz +cp -a /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex /usr/local/lib/python3.6/site-packages/ +rm -rf /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex + +# NFVbench installation +cd /opt +git clone https://gerrit.opnfv.org/gerrit/nfvbench +cd nfvbench/ +pip3 install . --use-deprecated=legacy-resolver +cp xtesting/testcases.yaml /usr/local/lib/python3.6/site-packages/xtesting/ci/testcases.yaml +python3 ./docker/cleanup_generators.py +rm -rf /opt/nfvbench/.git +rm -rf /opt/nfvbench/nfvbench +# symlink to NFVbench sources +ln -s /usr/local/lib/python3.6/site-packages/nfvbench /opt/nfvbench/nfvbench +# persist env variables +echo "export TREX_VER=\"$TREX_VER\"" >> /etc/profile.d/nfvbench.sh +echo "export TREX_EXT_LIBS=\"/opt/trex/$TREX_VER/external_libs\"" >> /etc/profile.d/nfvbench.sh diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/53-sshd-script b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/53-sshd-script new file mode 100644 index 0000000..64e8877 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/53-sshd-script @@ -0,0 +1,4 @@ +#!/bin/bash + +# Set UseDNS no value in sshd_config to reduce time to connect +echo "UseDNS no" >> /etc/ssh/sshd_config \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/99-cleanup b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/99-cleanup index 14e9f27..e48ca52 100755 --- a/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/99-cleanup +++ b/nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/99-cleanup @@ -1,3 +1,3 @@ #!/bin/bash -yum erase -y python-devel libyaml-devel numactl-devel kernel-devel kernel-headers kernel-lt-headers kernel-lt-devel gcc +yum erase -y python-devel libyaml-devel numactl-devel kernel-devel kernel-headers kernel-lt-headers kernel-lt-devel gcc git python3-dev libpython3.6-dev diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf new file mode 100644 index 0000000..f32633f --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf @@ -0,0 +1 @@ +options vfio enable_unsafe_noiommu_mode=1 diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf new file mode 100644 index 0000000..7ce4214 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf @@ -0,0 +1 @@ +vfio-pci diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh new file mode 100644 index 0000000..a9bf588 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh @@ -0,0 +1 @@ +#!/bin/bash diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local deleted file mode 100644 index d69cd0e..0000000 --- a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/bash - -touch /var/lock/subsys/local - -# Waiting for cloud-init to generate $TESTPMD_CONF, retry 60 seconds -NFVBENCH_CONF=/etc/nfvbenchvm.conf -retry=30 -until [ $retry -eq 0 ]; do - if [ -f $NFVBENCH_CONF ]; then break; fi - retry=$[$retry-1] - sleep 2 -done -if [ ! -f $NFVBENCH_CONF ]; then - exit 0 -fi - -# Parse and obtain all configurations -echo "Generating configurations for forwarder..." -eval $(cat $NFVBENCH_CONF) -touch /nfvbench_configured.flag - -# WE assume there are at least 2 cores available for the VM -CPU_CORES=$(grep -c ^processor /proc/cpuinfo) - -# We need at least 1 admin core. -if [ $CPU_CORES -le 2 ]; then - ADMIN_CORES=1 -else - # If the number of cores is even we - # reserve 2 cores for admin (second being idle) so the number of - # workers is either 1 (if CPU_CORES is 2) or always even - if (( $CPU_CORES % 2 )); then - ADMIN_CORES=1 - else - ADMIN_CORES=2 - fi -fi -# 2 vcpus: AW (core 0: Admin, core 1: Worker) -# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker) -# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused) -# 5 vcpus: AWWWW -# 6 vcpus: AWWWWU -WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES) -# worker cores are all cores except the admin core (core 0) and the eventual unused core -# AW -> 1 -# AWW -> 1,2 -# AWWU -> 1,2 -WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES) -# always use all cores -CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc) - -logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)" - -# CPU isolation optimizations -echo 1 > /sys/bus/workqueue/devices/writeback/cpumask -echo 1 > /sys/devices/virtual/workqueue/cpumask -echo 1 > /proc/irq/default_smp_affinity -for irq in `ls /proc/irq/`; do - if [ -f /proc/irq/$irq/smp_affinity ]; then - echo 1 > /proc/irq/$irq/smp_affinity - fi -done - -# Isolate all cores that are reserved for workers -tuna -c $WORKER_CORE_LIST --isolate - -NET_PATH=/sys/class/net - -get_pci_address() { - # device mapping for CentOS Linux 7: - # lspci: - # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device - # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device - # /sys/class/net: - # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 - # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 - - mac=$1 - for f in $(ls $NET_PATH/); do - if grep -q "$mac" $NET_PATH/$f/address; then - pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5) - # some virtual interfaces match on MAC and do not have a PCI address - if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then - # Found matching interface - logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac" - break - else - pci_addr="" - fi - fi; - done - if [ -z "$pci_addr" ]; then - echo "ERROR: Cannot find pci address for MAC $mac" >&2 - logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac" - return 1 - fi - echo $pci_addr - return 0 -} - -get_eth_port() { - # device mapping for CentOS Linux 7: - # lspci: - # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device - # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device - # /sys/class/net: - # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 - # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 - - mac=$1 - for f in $(ls $NET_PATH/); do - if grep -q "$mac" $NET_PATH/$f/address; then - eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8) - # some virtual interfaces match on MAC and do not have a PCI address - if [ "$eth_port" -a "$eth_port" != "N/A" ]; then - # Found matching interface - logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac" - break - else - eth_port="" - fi - fi; - done - if [ -z "$eth_port" ]; then - echo "ERROR: Cannot find eth port for MAC $mac" >&2 - logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac" - return 1 - fi - echo $eth_port - return 0 -} - -# Set VM MANAGEMENT port up and running -if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then - if [ $INTF_MAC_MGMT ]; then - ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) - else - ETH_PORT="eth0" - fi - ip addr add $INTF_MGMT_CIDR dev $ETH_PORT - ip link set $ETH_PORT up - ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT -else - echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF" -fi - -# Set dynamically interfaces mac values, if VM is spawn without using NFVBench -# and management interface is used on eth0 -if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then - INTF_MAC1=$(ip l show eth1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) - INTF_MAC2=$(ip l show eth2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) -fi - - -# Sometimes the interfaces on the loopback VM will use different drivers, e.g. -# one from vswitch which is virtio based, one is from SRIOV VF. In this case, -# we have to make sure the forwarder uses them in the right order, which is -# especially important if the VM is in a PVVP chain. -if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then - PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1) - PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2) -else - echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" - logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" -fi - -if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then - logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)" - logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)" - # Configure the forwarder - if [ -z "`lsmod | grep igb_uio`" ]; then - modprobe uio - insmod /dpdk/igb_uio.ko - fi - if [ "$FORWARDER" == "testpmd" ]; then - echo "Configuring testpmd..." - # Binding ports to DPDK - /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1 - /dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2 - screen -dmSL testpmd /dpdk/testpmd \ - -c $CORE_MASK \ - -n 4 \ - -- \ - --nb-ports=2 \ - --burst=32 \ - --txd=256 \ - --rxd=1024 \ - --eth-peer=0,$TG_MAC1 \ - --eth-peer=1,$TG_MAC2 \ - --forward-mode=mac \ - --nb-cores=$WORKER_CORES \ - --txq=$VIF_MQ_SIZE \ - --rxq=$VIF_MQ_SIZE \ - --max-pkt-len=9000 \ - --cmdline-file=/dpdk/testpmd_cmd.txt - echo "testpmd running in screen 'testpmd'" - logger "NFVBENCHVM: testpmd running in screen 'testpmd'" - else - echo "Configuring vpp..." - cp /vpp/startup.conf /etc/vpp/startup.conf - cp /vpp/vm.conf /etc/vpp/vm.conf - - sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf - sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf - sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf - sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf - sed -i "s/{{NUM_MBUFS}}/${NUM_MBUFS}/g" /etc/vpp/startup.conf - service vpp start - sleep 10 - - INTFS=`vppctl show int | grep Ethernet | xargs` - INTF_1=`echo $INTFS | awk '{ print $1 }'` - INTF_2=`echo $INTFS | awk '{ print $4 }'` - if [ -z "${TG_MAC1}" ]; then - # vm.conf does not support lines commented with #, so - # we need to remove the line to set the static ARP entry. - sed -i "/{{TG_MAC1}}/d" /etc/vpp/vm.conf - else - sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf - fi - if [ -z "${TG_MAC2}" ]; then - sed -i "/{{TG_MAC2}}/d" /etc/vpp/vm.conf - else - sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf - fi - sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf - sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf - sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf - service vpp restart - logger "NFVBENCHVM: vpp service restarted" - fi -else - echo "ERROR: Cannot find PCI Address from MAC" - echo "$INTF_MAC1: $PCI_ADDRESS_1" - echo "$INTF_MAC2: $PCI_ADDRESS_2" - logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC" -fi - -# Set SSH config -logger $(cat /etc/ssh/sshd_config | grep "PasswordAuthentication") -sed -i 's/PasswordAuthentication no/#PasswordAuthentication no/g' /etc/ssh/sshd_config -sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config -logger $(cat /etc/ssh/sshd_config | grep "PasswordAuthentication") -service sshd restart diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator new file mode 100644 index 0000000..0746fd6 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator @@ -0,0 +1,109 @@ +#!/bin/bash + +touch /var/lock/subsys/local + +# Waiting for cloud-init to generate $NFVBENCH_CONF, retry 60 seconds +NFVBENCH_CONF=/etc/nfvbenchvm.conf +OPENRC=/etc/nfvbench/openrc +retry=30 +until [ $retry -eq 0 ]; do + if [ -f $NFVBENCH_CONF ]; then break; fi + retry=$[$retry-1] + sleep 2 +done +if [ ! -f $NFVBENCH_CONF ]; then + exit 0 +fi + +# Parse and obtain all configurations +echo "Generating configurations for NFVbench and TRex..." +eval $(cat $NFVBENCH_CONF) +touch /nfvbench_configured.flag + +if [ -f $OPENRC ]; then + source $OPENRC +fi + +# Add DNS entry +if [ $DNS_SERVERS ]; then + IFS="," read -a dns <<< $DNS_SERVERS + for d in "${dns[@]}"; do + echo "nameserver $d" >> /etc/resolv.conf + done +fi + +# CPU isolation optimizations +echo 1 > /sys/bus/workqueue/devices/writeback/cpumask +echo 1 > /sys/devices/virtual/workqueue/cpumask +echo 1 > /proc/irq/default_smp_affinity +for irq in `ls /proc/irq/`; do + if [ -f /proc/irq/$irq/smp_affinity ]; then + echo 1 > /proc/irq/$irq/smp_affinity + fi +done + +NET_PATH=/sys/class/net + +get_eth_port() { + # device mapping for CentOS Linux 7: + # lspci: + # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device + # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device + # /sys/class/net: + # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 + # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 + + mac=$1 + for f in $(ls $NET_PATH/); do + if grep -q "$mac" $NET_PATH/$f/address; then + eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8) + # some virtual interfaces match on MAC and do not have a PCI address + if [ "$eth_port" -a "$eth_port" != "N/A" ]; then + # Found matching interface + logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac" + break + else + eth_port="" + fi + fi; + done + if [ -z "$eth_port" ]; then + echo "ERROR: Cannot find eth port for MAC $mac" >&2 + logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac" + return 1 + fi + echo $eth_port + return 0 +} + +# Set VM MANAGEMENT port up and running +if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then + if [ $INTF_MAC_MGMT ]; then + ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) + elif [ -f $OPENRC ] && [ "$PORT_MGMT_NAME" ]; then + $INTF_MAC_MGMT=$(openstack port list | grep $PORT_MGMT_NAME | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) + else + ETH_PORT="" + fi + if [ -z "$ETH_PORT" ]; then + echo "ERROR: Cannot find eth port for management port" >&2 + logger "NFVBENCHVM ERROR: Cannot find eth port for management port" + return 1 + fi + ip addr add $INTF_MGMT_CIDR dev $ETH_PORT + ip link set $ETH_PORT up + ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT +else + echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF" +fi + +/nfvbench/configure-nfvbench.sh + +if [ $ACTION ]; then + /nfvbench/start-nfvbench.sh $ACTION +else + /nfvbench/start-nfvbench.sh +fi + +exit 0 \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm new file mode 100644 index 0000000..a83e7ba --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.loopvm @@ -0,0 +1,247 @@ +#!/bin/bash + +touch /var/lock/subsys/local + +# Waiting for cloud-init to generate $NFVBENCH_CONF, retry 60 seconds +NFVBENCH_CONF=/etc/nfvbenchvm.conf +retry=30 +until [ $retry -eq 0 ]; do + if [ -f $NFVBENCH_CONF ]; then break; fi + retry=$[$retry-1] + sleep 2 +done +if [ ! -f $NFVBENCH_CONF ]; then + exit 0 +fi + +# Parse and obtain all configurations +echo "Generating configurations for forwarder..." +eval $(cat $NFVBENCH_CONF) +touch /nfvbench_configured.flag + +# WE assume there are at least 2 cores available for the VM +CPU_CORES=$(grep -c ^processor /proc/cpuinfo) + +# We need at least 1 admin core. +if [ $CPU_CORES -le 2 ]; then + ADMIN_CORES=1 +else + # If the number of cores is even we + # reserve 2 cores for admin (second being idle) so the number of + # workers is either 1 (if CPU_CORES is 2) or always even + if (( $CPU_CORES % 2 )); then + ADMIN_CORES=1 + else + ADMIN_CORES=2 + fi +fi +# 2 vcpus: AW (core 0: Admin, core 1: Worker) +# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker) +# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused) +# 5 vcpus: AWWWW +# 6 vcpus: AWWWWU +WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES) +# worker cores are all cores except the admin core (core 0) and the eventual unused core +# AW -> 1 +# AWW -> 1,2 +# AWWU -> 1,2 +WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES) +# always use all cores +CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc) + +logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)" + +# CPU isolation optimizations +echo 1 > /sys/bus/workqueue/devices/writeback/cpumask +echo 1 > /sys/devices/virtual/workqueue/cpumask +echo 1 > /proc/irq/default_smp_affinity +for irq in `ls /proc/irq/`; do + if [ -f /proc/irq/$irq/smp_affinity ]; then + echo 1 > /proc/irq/$irq/smp_affinity + fi +done + +# Isolate all cores that are reserved for workers +tuna -c $WORKER_CORE_LIST --isolate + +NET_PATH=/sys/class/net + +get_pci_address() { + # device mapping for CentOS Linux 7: + # lspci: + # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device + # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device + # /sys/class/net: + # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 + # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 + + mac=$1 + for f in $(ls $NET_PATH/); do + if grep -q "$mac" $NET_PATH/$f/address; then + pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5) + # some virtual interfaces match on MAC and do not have a PCI address + if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then + # Found matching interface + logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac" + break + else + pci_addr="" + fi + fi; + done + if [ -z "$pci_addr" ]; then + echo "ERROR: Cannot find pci address for MAC $mac" >&2 + logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac" + return 1 + fi + echo $pci_addr + return 0 +} + +get_eth_port() { + # device mapping for CentOS Linux 7: + # lspci: + # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device + # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device + # /sys/class/net: + # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 + # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 + + mac=$1 + for f in $(ls $NET_PATH/); do + if grep -q "$mac" $NET_PATH/$f/address; then + eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8) + # some virtual interfaces match on MAC and do not have a PCI address + if [ "$eth_port" -a "$eth_port" != "N/A" ]; then + # Found matching interface + logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac" + break + else + eth_port="" + fi + fi; + done + if [ -z "$eth_port" ]; then + echo "ERROR: Cannot find eth port for MAC $mac" >&2 + logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac" + return 1 + fi + echo $eth_port + return 0 +} + +# Set VM MANAGEMENT port up and running +if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then + if [ $INTF_MAC_MGMT ]; then + ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) + else + ETH_PORT="eth0" + fi + ip addr add $INTF_MGMT_CIDR dev $ETH_PORT + ip link set $ETH_PORT up + ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT +else + echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF" +fi + +# Set dynamically interfaces mac values, if VM is spawn without using NFVBench +# and management interface is used on eth0 +if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then + INTF_MAC1=$(ip l show eth1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + INTF_MAC2=$(ip l show eth2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) +fi + + +# Sometimes the interfaces on the loopback VM will use different drivers, e.g. +# one from vswitch which is virtio based, one is from SRIOV VF. In this case, +# we have to make sure the forwarder uses them in the right order, which is +# especially important if the VM is in a PVVP chain. +if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then + PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1) + PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2) +else + echo "ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" + logger "NFVBENCHVM ERROR: VM MAC Addresses missing in $NFVBENCH_CONF" +fi + +if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then + logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)" + logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)" + # Configure the forwarder + if [ -z "`lsmod | grep igb_uio`" ]; then + modprobe uio + insmod /dpdk/igb_uio.ko + fi + if [ "$FORWARDER" == "testpmd" ]; then + echo "Configuring testpmd..." + # Binding ports to DPDK + dpdk-devbind -b igb_uio $PCI_ADDRESS_1 + dpdk-devbind -b igb_uio $PCI_ADDRESS_2 + screen -dmSL testpmd /dpdk/testpmd \ + -c $CORE_MASK \ + -n 4 \ + -- \ + --nb-ports=2 \ + --burst=32 \ + --txd=256 \ + --rxd=1024 \ + --eth-peer=0,$TG_MAC1 \ + --eth-peer=1,$TG_MAC2 \ + --forward-mode=mac \ + --nb-cores=$WORKER_CORES \ + --txq=$VIF_MQ_SIZE \ + --rxq=$VIF_MQ_SIZE \ + --max-pkt-len=9000 \ + --cmdline-file=/dpdk/testpmd_cmd.txt + echo "testpmd running in screen 'testpmd'" + logger "NFVBENCHVM: testpmd running in screen 'testpmd'" + elif [ "$FORWARDER" == "vpp" ]; then + echo "Configuring vpp..." + cp /vpp/startup.conf /etc/vpp/startup.conf + cp /vpp/vm.conf /etc/vpp/vm.conf + + sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf + sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf + sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf + sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf + sed -i "s/{{NUM_MBUFS}}/${NUM_MBUFS}/g" /etc/vpp/startup.conf + service vpp start + sleep 10 + + INTFS=`vppctl show int | grep Ethernet | xargs` + INTF_1=`echo $INTFS | awk '{ print $1 }'` + INTF_2=`echo $INTFS | awk '{ print $4 }'` + if [ -z "${TG_MAC1}" ]; then + # vm.conf does not support lines commented with #, so + # we need to remove the line to set the static ARP entry. + sed -i "/{{TG_MAC1}}/d" /etc/vpp/vm.conf + else + sed -i "s/{{TG_MAC1}}/${TG_MAC1}/g" /etc/vpp/vm.conf + fi + if [ -z "${TG_MAC2}" ]; then + sed -i "/{{TG_MAC2}}/d" /etc/vpp/vm.conf + else + sed -i "s/{{TG_MAC2}}/${TG_MAC2}/g" /etc/vpp/vm.conf + fi + sed -i "s/{{INTF_1}}/${INTF_1//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{INTF_2}}/${INTF_2//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{VNF_GATEWAY1_CIDR}}/${VNF_GATEWAY1_CIDR//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{VNF_GATEWAY2_CIDR}}/${VNF_GATEWAY2_CIDR//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{TG_NET1}}/${TG_NET1//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{TG_NET2}}/${TG_NET2//\//\/}/g" /etc/vpp/vm.conf + sed -i "s/{{TG_GATEWAY1_IP}}/${TG_GATEWAY1_IP}/g" /etc/vpp/vm.conf + sed -i "s/{{TG_GATEWAY2_IP}}/${TG_GATEWAY2_IP}/g" /etc/vpp/vm.conf + service vpp restart + logger "NFVBENCHVM: vpp service restarted" + else + echo "ERROR: Unknown forwarder value. Accepted values: testpmd or vpp" + exit 1 + fi +else + echo "ERROR: Cannot find PCI Address from MAC" + echo "$INTF_MAC1: $PCI_ADDRESS_1" + echo "$INTF_MAC2: $PCI_ADDRESS_2" + logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC" +fi + +exit 0 \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service new file mode 100644 index 0000000..e952070 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service @@ -0,0 +1,12 @@ +[Unit] +Description=nfvbench service +After=network.target + +[Service] +Type=forking +User=root +RemainAfterExit=yes +ExecStart=/bin/bash -a -c "source /etc/profile.d/nfvbench.sh && /usr/bin/screen -dmSL nfvbench /usr/local/bin/nfvbench -c /etc/nfvbench/nfvbench.conf --server" + +[Install] +WantedBy=multi-user.target diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh new file mode 100644 index 0000000..5ec584b --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/configure-nfvbench.sh @@ -0,0 +1,263 @@ +#!/bin/bash + +set -e + +NFVBENCH_CONF=/etc/nfvbenchvm.conf +E2E_CFG=/etc/nfvbench/e2e.cfg +LOOPBACK_CFG=/etc/nfvbench/loopback.cfg +NFVBENCH_CFG=/etc/nfvbench/nfvbench.cfg +OPENRC=/etc/nfvbench/openrc + +# Parse and obtain all configurations +eval $(cat $NFVBENCH_CONF) + +if [ -f $OPENRC ]; then + source $OPENRC +fi + +# WE assume there are at least 2 cores available for the VM +CPU_CORES=$(grep -c ^processor /proc/cpuinfo) + +# We need at least 2 admin cores (one master and another latency). +if [ $CPU_CORES -le 3 ]; then + ADMIN_CORES=2 +else + # If the number of cores is even we + # reserve 3 cores for admin (third being idle) so the number of + # workers is either 1 (if CPU_CORES is 4) or always even + if (( $CPU_CORES % 2 )); then + ADMIN_CORES=2 + else + ADMIN_CORES=3 + fi +fi +# 2 vcpus: AW (core 0: Admin, core 1: Worker) +# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker) +# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused) +# 5 vcpus: AWWWW +# 6 vcpus: AWWWWU +WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES) +# worker cores are all cores except the admin core (core 0) and the eventual unused core +# AW -> 1 +# AWW -> 1,2 +# AWWU -> 1,2 +WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES) +# always use all cores +CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc) + +logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)" + +# Isolate all cores that are reserved for workers +tuna -c $WORKER_CORE_LIST --isolate + +NET_PATH=/sys/class/net + +get_pci_address() { + # device mapping for CentOS Linux 7: + # lspci: + # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device + # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device + # /sys/class/net: + # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 + # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 + + mac=$1 + for f in $(ls $NET_PATH/); do + if grep -q "$mac" $NET_PATH/$f/address; then + pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5) + # some virtual interfaces match on MAC and do not have a PCI address + if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then + # Found matching interface + logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac" + break + else + pci_addr="" + fi + fi; + done + if [ -z "$pci_addr" ]; then + echo "ERROR: Cannot find pci address for MAC $mac" >&2 + logger "NFVBENCHVM ERROR: Cannot find pci address for MAC $mac" + return 1 + fi + echo $pci_addr + return 0 +} + +get_interfaces_mac_values(){ + # Set dynamically interfaces mac values, if VM is spawn with SRIOV PF ports + # and openstack API are accessible + if [ -z "$LOOPBACK_INTF_MAC1" ] && [ -z "$LOOPBACK_INTF_MAC2" ]; then + if [ -f $OPENRC ] && [ "$LOOPBACK_PORT_NAME1" ] && [ "$LOOPBACK_PORT_NAME2" ]; then + LOOPBACK_INTF_MAC1=$(openstack port list | grep $LOOPBACK_PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + LOOPBACK_INTF_MAC2=$(openstack port list | grep $LOOPBACK_PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + fi + fi + if [ -z "$E2E_INTF_MAC1" ] && [ -z "$E2E_INTF_MAC2" ]; then + if [ -f $OPENRC ] && [ "$E2E_PORT_NAME1" ] && [ "$E2E_PORT_NAME2" ]; then + E2E_INTF_MAC1=$(openstack port list | grep $E2E_PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + E2E_INTF_MAC2=$(openstack port list | grep $E2E_PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + fi + fi + if [ -z "$INTF_MAC1" ] && [ -z "$INTF_MAC2" ]; then + if [ -f $OPENRC ] && [ "$PORT_NAME1" ] && [ "$PORT_NAME2" ]; then + INTF_MAC1=$(openstack port list | grep $PORT_NAME1 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + INTF_MAC2=$(openstack port list | grep $PORT_NAME2 | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) + fi + fi +} + +get_interfaces_pci_address(){ + # Sometimes the interfaces on the generator VM will use different physical networks. In this case, + # we have to make sure the generator uses them in the right order. + if [ $LOOPBACK_INTF_MAC1 ] && [ $LOOPBACK_INTF_MAC2 ]; then + LOOPBACK_PCI_ADDRESS_1=$(get_pci_address $LOOPBACK_INTF_MAC1) + LOOPBACK_PCI_ADDRESS_2=$(get_pci_address $LOOPBACK_INTF_MAC2) + + echo LOOPBACK_PCI_ADDRESS_1=$LOOPBACK_PCI_ADDRESS_1 >> $NFVBENCH_CONF + echo LOOPBACK_PCI_ADDRESS_2=$LOOPBACK_PCI_ADDRESS_2 >> $NFVBENCH_CONF + fi + if [ $E2E_INTF_MAC1 ] && [ $E2E_INTF_MAC2 ]; then + E2E_PCI_ADDRESS_1=$(get_pci_address $E2E_INTF_MAC1) + E2E_PCI_ADDRESS_2=$(get_pci_address $E2E_INTF_MAC2) + + echo E2E_PCI_ADDRESS_1=$E2E_PCI_ADDRESS_1 >> $NFVBENCH_CONF + echo E2E_PCI_ADDRESS_2=$E2E_PCI_ADDRESS_2 >> $NFVBENCH_CONF + fi + if [ $INTF_MAC1 ] && [ $INTF_MAC2 ]; then + PCI_ADDRESS_1=$(get_pci_address $INTF_MAC1) + PCI_ADDRESS_2=$(get_pci_address $INTF_MAC2) + + echo PCI_ADDRESS_1=$PCI_ADDRESS_1 >> $NFVBENCH_CONF + echo PCI_ADDRESS_2=$PCI_ADDRESS_2 >> $NFVBENCH_CONF + fi +} + +bind_interfaces(){ + if [ $LOOPBACK_PCI_ADDRESS_1 ]; then + dpdk-devbind -b vfio-pci $LOOPBACK_PCI_ADDRESS_1 + fi + if [ $LOOPBACK_PCI_ADDRESS_2 ]; then + dpdk-devbind -b vfio-pci $LOOPBACK_PCI_ADDRESS_2 + fi + if [ $E2E_PCI_ADDRESS_1 ]; then + dpdk-devbind -b vfio-pci $E2E_PCI_ADDRESS_1 + fi + if [ $E2E_PCI_ADDRESS_2 ]; then + dpdk-devbind -b vfio-pci $E2E_PCI_ADDRESS_2 + fi + if [ $PCI_ADDRESS_1 ]; then + dpdk-devbind -b vfio-pci $PCI_ADDRESS_1 + fi + if [ $PCI_ADDRESS_2 ]; then + dpdk-devbind -b vfio-pci $PCI_ADDRESS_2 + fi +} + +configure_loopback_mode(){ + if [ $LOOPBACK_PCI_ADDRESS_1 ] && [ $LOOPBACK_PCI_ADDRESS_2 ]; then + logger "NFVBENCHVM: loopback - Using pci $LOOPBACK_PCI_ADDRESS_1 ($LOOPBACK_INTF_MAC1)" + logger "NFVBENCHVM: loopback - Using pci $LOOPBACK_PCI_ADDRESS_2 ($LOOPBACK_INTF_MAC2)" + + echo "Configuring nfvbench and TRex for loopback mode..." + # execute env script to avoid no ENV in screen and a nfvbench error + source /etc/profile.d/nfvbench.sh + sed -i "s/{{PCI_ADDRESS_1}}/$LOOPBACK_PCI_ADDRESS_1/g" /etc/nfvbench/loopback.cfg + sed -i "s/{{PCI_ADDRESS_2}}/$LOOPBACK_PCI_ADDRESS_2/g" /etc/nfvbench/loopback.cfg + sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/loopback.cfg + CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES))) + sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/loopback.cfg + else + echo "ERROR: Cannot find PCI Address from MAC" + echo "$LOOPBACK_INTF_MAC1: $LOOPBACK_PCI_ADDRESS_1" + echo "$LOOPBACK_INTF_MAC2: $LOOPBACK_PCI_ADDRESS_2" + logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC (loopback mode)" + fi + +} + +configure_e2e_mode(){ + if [ $E2E_PCI_ADDRESS_1 ] && [ $E2E_PCI_ADDRESS_2 ]; then + logger "NFVBENCHVM: e2e - Using pci $E2E_PCI_ADDRESS_1 ($E2E_INTF_MAC1)" + logger "NFVBENCHVM: e2e - Using pci $E2E_PCI_ADDRESS_2 ($E2E_INTF_MAC2)" + + echo "Configuring nfvbench and TRex for e2e mode..." + # execute env script to avoid no ENV in screen and a nfvbench error + source /etc/profile.d/nfvbench.sh + sed -i "s/{{PCI_ADDRESS_1}}/$E2E_PCI_ADDRESS_1/g" /etc/nfvbench/e2e.cfg + sed -i "s/{{PCI_ADDRESS_2}}/$E2E_PCI_ADDRESS_2/g" /etc/nfvbench/e2e.cfg + sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/e2e.cfg + CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES))) + sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/e2e.cfg + else + echo "ERROR: Cannot find PCI Address from MAC" + echo "$E2E_INTF_MAC1: $E2E_PCI_ADDRESS_1" + echo "$E2E_INTF_MAC2: $E2E_PCI_ADDRESS_2" + logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC (e2e mode)" + fi +} + +configure_nfvbench(){ + if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then + logger "NFVBENCHVM: Using pci $PCI_ADDRESS_1 ($INTF_MAC1)" + logger "NFVBENCHVM: Using pci $PCI_ADDRESS_2 ($INTF_MAC2)" + + echo "Configuring nfvbench and TRex..." + # execute env script to avoid no ENV in screen and a nfvbench error + source /etc/profile.d/nfvbench.sh + + if [ $DEFAULT ]; then + cp /nfvbench/nfvbench.conf /etc/nfvbench/nfvbench.cfg + fi + sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/nfvbench/nfvbench.cfg + sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/nfvbench/nfvbench.cfg + sed -i "s/{{CORES}}/$WORKER_CORES/g" /etc/nfvbench/nfvbench.cfg + CORE_THREADS=$(seq -s, 2 $((2+$WORKER_CORES))) + sed -i "s/{{CORE_THREADS}}/$CORE_THREADS/g" /etc/nfvbench/nfvbench.cfg + + else + echo "ERROR: Cannot find PCI Address from MAC" + echo "$INTF_MAC1: $PCI_ADDRESS_1" + echo "$INTF_MAC2: $PCI_ADDRESS_2" + logger "NFVBENCHVM ERROR: Cannot find PCI Address from MAC" + fi +} + +# Check if config files are provided by config drive (CLI command) or Ansible script +# and configure NFVbench accordingly to these files +if [ -f $E2E_CFG ]; then + if [ -z $E2E_PCI_ADDRESS_1 ] && [ -z $E2E_PCI_ADDRESS_2 ]; then + get_interfaces_mac_values + get_interfaces_pci_address + bind_interfaces + fi + configure_e2e_mode +fi +if [ -f $LOOPBACK_CFG ]; then + if [ -z $LOOPBACK_PCI_ADDRESS_1 ] && [ -z $LOOPBACK_PCI_ADDRESS_2 ]; then + get_interfaces_mac_values + get_interfaces_pci_address + bind_interfaces + fi + configure_loopback_mode +fi +# if nfvbench.cfg is provided by config drive (CLI command) or Ansible script +# configure nfvbench using this file otherwise untemplate default config if no file exists +if [ -f $NFVBENCH_CFG ]; then + if [ -z $PCI_ADDRESS_1 ] && [ -z $PCI_ADDRESS_2 ]; then + get_interfaces_mac_values + get_interfaces_pci_address + bind_interfaces + fi + configure_nfvbench +elif [ ! -f $E2E_CFG ] && [ ! -f $LOOPBACK_CFG ]; then + if [ -z $PCI_ADDRESS_1 ] && [ -z $PCI_ADDRESS_2 ]; then + get_interfaces_mac_values + get_interfaces_pci_address + bind_interfaces + fi + DEFAULT=true + configure_nfvbench +fi + +exit 0 \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf new file mode 100644 index 0000000..c1ca23e --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf @@ -0,0 +1,25 @@ +traffic_generator: + generator_profile: + - name: trex-local + tool: TRex + ip: 127.0.0.1 + zmq_pub_port: 4500 + zmq_rpc_port: 4501 + software_mode: false + + cores: {{CORES}} + platform: + master_thread_id: '0' + latency_thread_id: '1' + dual_if: + - socket: 0 + threads: [{{CORE_THREADS}}] + + interfaces: + - port: 0 + pci: "{{PCI_ADDRESS_1}}" + switch: + - port: 1 + pci: "{{PCI_ADDRESS_2}}" + switch: + intf_speed: \ No newline at end of file diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh new file mode 100644 index 0000000..1f6fa28 --- /dev/null +++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh @@ -0,0 +1,51 @@ +#!/bin/bash + + +restart_nfvbench_service(){ + service nfvbench restart + echo "NFVbench running in screen 'nfvbench'" + logger "NFVBENCHVM: NFVbench running in screen 'nfvbench'" +} + +start_nfvbench(){ + ln -sfn /etc/nfvbench/nfvbench.cfg /etc/nfvbench/nfvbench.conf + restart_nfvbench_service +} + +start_nfvbench_e2e_mode(){ + ln -sfn /etc/nfvbench/e2e.cfg /etc/nfvbench/nfvbench.conf + restart_nfvbench_service +} + +start_nfvbench_loopback_mode(){ + ln -sfn /etc/nfvbench/loopback.cfg /etc/nfvbench/nfvbench.conf + restart_nfvbench_service +} + +usage() { + echo "Usage: $0 action" + echo "action (optional):" + echo "e2e start NFVbench with E2E config file" + echo "loopback start NFVbench with loopback config file" + echo "" + echo "If no action is given NFVbench will start with default config file" + exit 1 +} + +# ---------------------------------------------------------------------------- +# Parse command line options and configure the script +# ---------------------------------------------------------------------------- +if [ "$#" -lt 1 ]; then + start_nfvbench + exit 0 +else + if [ $1 = "e2e" ]; then + start_nfvbench_e2e_mode + exit 0 + elif [ $1 = "loopback" ]; then + start_nfvbench_loopback_mode + exit 0 + else + usage + fi +fi -- cgit 1.2.3-korg