aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/build_vm.yml44
-rw-r--r--ansible/group_vars/all.yml9
-rw-r--r--ansible/install-inventory.ini20
-rw-r--r--ansible/install.yaml79
-rw-r--r--ansible/roles/build_yardstick_image/tasks/post_build.yml21
-rw-r--r--ansible/roles/build_yardstick_image/tasks/pre_build.yml12
-rw-r--r--ansible/roles/install_image_dependencies/defaults/main.yml2
-rw-r--r--dashboard/RFC2544_2Port.json76
-rw-r--r--dashboard/RFC2544_2Port_Multiframesize.json63
-rw-r--r--docs/testing/user/userguide/13-nsb-installation.rst252
-rw-r--r--docs/testing/user/userguide/14-nsb-operation.rst34
-rw-r--r--docs/testing/user/userguide/nsb/nsb-list-of-tcs.rst1
-rw-r--r--docs/testing/user/userguide/nsb/tc_vpp_baremetal_crypto_ipsec.rst113
-rwxr-xr-xyardstick/network_services/vnf_generic/vnf/vcmts_vnf.py273
-rwxr-xr-xyardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py651
15 files changed, 1539 insertions, 111 deletions
diff --git a/ansible/build_vm.yml b/ansible/build_vm.yml
new file mode 100644
index 000000000..9774f9ff5
--- /dev/null
+++ b/ansible/build_vm.yml
@@ -0,0 +1,44 @@
+- name: Prepare to build VM
+ hosts: jumphost
+ become: yes
+ vars:
+ img_prop_item: "{{ IMG_PROPERTY }}"
+ img_arch: "{{ YARD_IMG_ARCH }}"
+
+ tasks:
+ - name: Include pre-build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: pre_build.yml
+ when:
+ - installation_mode != inst_mode_container
+
+
+- name: Build VM in chroot
+ hosts: chroot_image
+ connection: chroot
+ become: yes
+ vars:
+ img_property: "{{ IMG_PROPERTY }}"
+ environment: "{{ proxy_env }}"
+
+ tasks:
+ - name: Include image build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: "cloudimg_modify_{{ img_property }}.yml"
+ when:
+ - installation_mode != inst_mode_container
+
+
+- name: Clear up after VM is built
+ hosts: jumphost
+ become: yes
+
+ tasks:
+ - name: Include post-build
+ include_role:
+ name: build_yardstick_image
+ tasks_from: post_build.yml
+ when:
+ - installation_mode != inst_mode_container
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index cd12bf02e..e94d24023 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -7,7 +7,16 @@ release: "{{ OS_RELEASE | default('xenial') }}"
normal_image_file: "{{ workspace }}/yardstick-image.img"
nsb_image_file: "{{ workspace }}/yardstick-nsb-image.img"
ubuntu_image_file: /tmp/workspace/yardstick/yardstick-trusty-server.raw
+arch_amd64: "amd64"
+arch_arm64: "arm64"
+inst_mode_baremetal: "baremetal"
+inst_mode_container: "container"
+inst_mode_container_pull: "container_pull"
+ubuntu_archive:
+ "amd64": "http://archive.ubuntu.com/ubuntu/"
+ "arm64": "http://ports.ubuntu.com/ubuntu-ports/"
installation_mode: "{{ INSTALLATION_MODE | default('baremetal') }}"
+yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}"
proxy_env:
PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/bin
http_proxy: "{{ lookup('env', 'http_proxy') }}"
diff --git a/ansible/install-inventory.ini b/ansible/install-inventory.ini
index bcd57db65..07d2c13f4 100644
--- a/ansible/install-inventory.ini
+++ b/ansible/install-inventory.ini
@@ -8,24 +8,20 @@ localhost ansible_connection=local
[yardstick:children]
jumphost
-[yardstick-standalone]
-# standalone-node ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
-
[yardstick-baremetal]
-# baremetal-node ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
+# baremetal-node ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
+
+[yardstick-standalone]
+# standalone-node ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
[all:vars]
-arch_amd64=amd64
-arch_arm64=arm64
-inst_mode_baremetal=baremetal
-inst_mode_container=container
-inst_mode_container_pull=container_pull
-ubuntu_archive={"amd64": "http://archive.ubuntu.com/ubuntu/", "arm64": "http://ports.ubuntu.com/ubuntu-ports/"}
-# When IMG_PROPERTY is passed neither normal nor nsb set "path_to_vm=/path/to/image" to add it to OpenStack
-# path_to_img=/tmp/workspace/yardstick-image.img
# Uncomment credentials below if needed
# ansible_user=root
# ansible_ssh_pass=root
+# ansible_ssh_private_key_file=/root/.ssh/id_rsa
+
+# When IMG_PROPERTY is passed neither normal nor nsb set "path_to_vm=/path/to/image" to add it to OpenStack
+# path_to_img=/tmp/workspace/yardstick-image.img
# List of CPUs to be isolated (not used by default)
# Grub line will be extended with: "isolcpus=<ISOL_CPUS> nohz=on nohz_full=<ISOL_CPUS> rcu_nocbs=1<ISOL_CPUS>"
diff --git a/ansible/install.yaml b/ansible/install.yaml
index 558c48609..a78a11f68 100644
--- a/ansible/install.yaml
+++ b/ansible/install.yaml
@@ -15,7 +15,6 @@
- hosts: jumphost
become: yes
vars:
- yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}"
virtual_environment: "{{ VIRTUAL_ENVIRONMENT | default(False) }}"
nsb_dir: "{{ NSB_DIR | default('/opt/nsb_bin/') }}"
@@ -105,54 +104,59 @@
- docker
- barometer_collectd
-
- name: Prepare to build VM
- hosts: jumphost
+ hosts: yardstick-standalone
become: yes
vars:
img_prop_item: "{{ IMG_PROPERTY }}"
img_arch: "{{ YARD_IMG_ARCH }}"
-
- tasks:
- - name: Include pre-build
- include_role:
- name: build_yardstick_image
- tasks_from: pre_build.yml
- when:
- - installation_mode != inst_mode_container
- - IMG_PROPERTY == 'nsb' or IMG_PROPERTY == 'normal'
-
-
-- name: Build VM in chroot
- hosts: chroot_image
- connection: chroot
- become: yes
- vars:
- img_property: "{{ IMG_PROPERTY }}"
environment: "{{ proxy_env }}"
tasks:
- - name: Include image build
- include_role:
- name: build_yardstick_image
- tasks_from: "cloudimg_modify_{{ img_property }}.yml"
+ - file:
+ dest: /tmp/ansible
+ state: directory
+ mode: 0755
+
+ - name: Copy ansible folder to remote DUT
+ copy:
+ src: "."
+ dest: /tmp/ansible
+
+ - name: Install DUT related packages
+ apt:
+ name: "{{ packages }}"
+ vars:
+ packages:
+ - qemu-kvm
+ - libvirt-bin
+ - bridge-utils
+ - fping
+ - genisoimage
+
+ # There is a bug with the easy install ansible module in Ubuntu 16.04 linux.
+ # Refer https://github.com/ansible/ansible/issues/23534
+ - name: Install pip
+ shell: easy_install -U pip
when:
- - installation_mode != inst_mode_container
- - IMG_PROPERTY == 'nsb' or IMG_PROPERTY == 'normal'
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_major_version|int <= 16
+ - name: Install python-pip
+ action: "{{ ansible_pkg_mgr }} name=python-pip state=present"
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_major_version|int >= 17
-- name: Clear up after VM is built
- hosts: jumphost
- become: yes
+ - name: Update pip ansible docker
+ pip:
+ name: "ansible==2.5.5"
- tasks:
- - name: Include post-build
- include_role:
- name: build_yardstick_image
- tasks_from: post_build.yml
- when:
- - installation_mode != inst_mode_container
- - IMG_PROPERTY == 'nsb' or IMG_PROPERTY == 'normal'
+ # This is used as workaround as ansible doesn't support chroot on remote server
+ - name: Run ansible on remote
+ shell: "ansible-playbook -e OS_RELEASE={{ OS_RELEASE }} -e IMAGE_PROPERTY={{ IMAGE_PROPERTY }} -e YARD_IMAGE_ARCH={{ YARD_IMAGE_ARCH }} -i install-inventory.ini build_vm.yml -vv"
+ args:
+ chdir: "/tmp/ansible"
- name: Add OpenStack variables, image
@@ -216,6 +220,5 @@
volumes:
- "{{ openrc_file|default('/dev/null') }}:/etc/yardstick/openstack.creds:ro"
- /var/run/docker.sock:/var/run/docker.sock
- - /opt:/opt
- /etc/localtime:/etc/localtime:ro
when: installation_mode == inst_mode_container_pull
diff --git a/ansible/roles/build_yardstick_image/tasks/post_build.yml b/ansible/roles/build_yardstick_image/tasks/post_build.yml
index c6888f8df..abbf57c03 100644
--- a/ansible/roles/build_yardstick_image/tasks/post_build.yml
+++ b/ansible/roles/build_yardstick_image/tasks/post_build.yml
@@ -35,6 +35,10 @@
state: unmounted
- mount:
+ name: "{{ mountdir }}/run"
+ state: unmounted
+
+- mount:
name: "{{ mountdir }}"
state: unmounted
@@ -44,3 +48,20 @@
- debug:
msg: "yardstick image = {{ imgfile }}"
+
+- set_fact:
+ imgdest: "/var/lib/libvirt/images/{{ imgfile | basename}}"
+ name: "{{ (imgfile | basename | splitext)[0] }}"
+ ext: "{{ (imgfile | basename | splitext)[1] }}"
+
+- name: Verify if imgfile exists in libvirt images
+ stat:
+ path: "{{ imgdest }}"
+ register: imgdest_stat
+
+- set_fact:
+ imgdest: "/var/lib/libvirt/images/{{ name }}_autogen{{ ext }}"
+ when: imgdest_stat.stat.exists
+
+- name: Copy image to libvirt images
+ shell: "cp {{ imgfile }} {{ imgdest }}"
diff --git a/ansible/roles/build_yardstick_image/tasks/pre_build.yml b/ansible/roles/build_yardstick_image/tasks/pre_build.yml
index 2dae38060..3ac8e90e9 100644
--- a/ansible/roles/build_yardstick_image/tasks/pre_build.yml
+++ b/ansible/roles/build_yardstick_image/tasks/pre_build.yml
@@ -45,6 +45,7 @@
with_items:
# order matters
- "{{ mountdir }}/proc"
+ - "{{ mountdir }}/run"
- "{{ mountdir }}"
- "/mnt/{{ release }}"
@@ -178,6 +179,17 @@
fstab: "{{ fake_fstab }}"
state: mounted
+- name: mount chroot /run
+ mount:
+ src: /run
+ name: "{{ mountdir }}/run"
+ fstype: tmpfs
+ opts: bind
+ # !!!!!!! this is required otherwise we add entries to /etc/fstab
+ # and prevent the system from booting
+ fstab: "{{ fake_fstab }}"
+ state: mounted
+
- name: if arm copy qemu-aarch64-static into chroot
copy:
src: /usr/bin/qemu-aarch64-static
diff --git a/ansible/roles/install_image_dependencies/defaults/main.yml b/ansible/roles/install_image_dependencies/defaults/main.yml
index 42951bf6d..558e68a9b 100644
--- a/ansible/roles/install_image_dependencies/defaults/main.yml
+++ b/ansible/roles/install_image_dependencies/defaults/main.yml
@@ -30,6 +30,7 @@ install_dependencies:
- libxss-dev
- expect
- libnuma-dev
+ - curl
RedHat:
- bc
- fio
@@ -50,3 +51,4 @@ install_dependencies:
- sysstat
- unzip
- python-devel
+ - curl
diff --git a/dashboard/RFC2544_2Port.json b/dashboard/RFC2544_2Port.json
index e6f3265f9..de9448cee 100644
--- a/dashboard/RFC2544_2Port.json
+++ b/dashboard/RFC2544_2Port.json
@@ -1,14 +1,50 @@
{
+ "__inputs": [
+ {
+ "name": "DS_YARDSTICK",
+ "label": "yardstick",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "influxdb",
+ "pluginName": "InfluxDB"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "4.4.3"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "influxdb",
+ "name": "InfluxDB",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "table",
+ "name": "Table",
+ "version": ""
+ }
+ ],
"annotations": {
"list": [
{
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"enable": true,
"hide": false,
"iconColor": "rgb(248, 255, 0)",
"limit": 100,
"name": "status",
- "query": "SELECT tg__0.collect_stats.Status FROM \"tc_heat_rfc2544_ipv4_1rule_1flow_trex\" WHERE \"tg__0.collect_stats.Status\"='Success' AND task_id='$task_id'",
+ "query": "SELECT tg__0.collect_stats.Status FROM $test_name WHERE \"tg__0.collect_stats.Status\"='Success' AND task_id='$task_id'",
"showIn": 0,
"titleColumn": "Status",
"type": "alert"
@@ -19,7 +55,7 @@
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
- "id": 6,
+ "id": null,
"links": [],
"refresh": false,
"rows": [
@@ -32,7 +68,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"fill": 0,
"hideTimeOverride": true,
"id": 3,
@@ -66,8 +102,8 @@
"measurement": "/^$test_name$/",
"orderByTime": "ASC",
"policy": "default",
- "query": "SELECT \"tg__0.collect_stats.xe0.InBytes\", \"tg__0.collect_stats.xe0.OutBytes\", \"tg__0.collect_stats.xe1.InBytes\", \"tg__0.collect_stats.xe1.OutBytes\" FROM /^$test_name$/ WHERE \"task_id\" =~ /^$task_id$/ AND \"tg__0.collect_stats.PktSize\" =~ /^$framesize$/ AND $timeFilter",
- "rawQuery": false,
+ "query": "SELECT \"tg__0.collect_stats.xe0.InBytes\", \"tg__0.collect_stats.xe0.OutBytes\", \"tg__0.collect_stats.xe1.InBytes\", \"tg__0.collect_stats.xe1.OutBytes\" FROM /^$test_name$/ WHERE \"task_id\" =~ /^$task_id$/ AND $timeFilter",
+ "rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -166,7 +202,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"fill": 1,
"id": 1,
"legend": {
@@ -196,7 +232,7 @@
"alias": "$col",
"dsType": "influxdb",
"groupBy": [],
- "measurement": "tc_heat_rfc2544_ipv4_1rule_1flow_trex",
+ "measurement": "/^$test_name$/",
"orderByTime": "ASC",
"policy": "default",
"refId": "A",
@@ -273,7 +309,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": null,
+ "datasource": "${DS_YARDSTICK}",
"fill": 1,
"id": 5,
"legend": {
@@ -528,11 +564,8 @@
"list": [
{
"allValue": null,
- "current": {
- "text": "tc_heat_rfc2544_ipv4_1rule_1flow_trex",
- "value": "tc_heat_rfc2544_ipv4_1rule_1flow_trex"
- },
- "datasource": "yardstick",
+ "current": {},
+ "datasource": "${DS_YARDSTICK}",
"hide": 0,
"includeAll": false,
"label": "test_name",
@@ -551,18 +584,15 @@
},
{
"allValue": null,
- "current": {
- "text": "004b5387-74b3-4cf0-9597-5198db9e2731",
- "value": "004b5387-74b3-4cf0-9597-5198db9e2731"
- },
- "datasource": "yardstick",
+ "current": {},
+ "datasource": "${DS_YARDSTICK}",
"hide": 0,
"includeAll": false,
"label": "task_id",
"multi": false,
"name": "task_id",
"options": [],
- "query": "SHOW TAG VALUES FROM $test_name WITH KEY = \"task_id\" ",
+ "query": "SHOW TAG VALUES FROM $test_name WITH KEY = \"task_id\" ",
"refresh": 2,
"regex": "",
"sort": 0,
@@ -575,8 +605,8 @@
]
},
"time": {
- "from": "2019-03-05T12:44:02.829Z",
- "to": "2019-03-05T13:32:27.585Z"
+ "from": "2019-03-06T13:54:13.610Z",
+ "to": "2019-03-06T13:56:59.693Z"
},
"timepicker": {
"refresh_intervals": [
@@ -605,5 +635,5 @@
},
"timezone": "",
"title": "RFC2544",
- "version": 2
+ "version": 4
}
diff --git a/dashboard/RFC2544_2Port_Multiframesize.json b/dashboard/RFC2544_2Port_Multiframesize.json
index f08cf3d47..2d8e9522f 100644
--- a/dashboard/RFC2544_2Port_Multiframesize.json
+++ b/dashboard/RFC2544_2Port_Multiframesize.json
@@ -1,14 +1,44 @@
{
+ "__inputs": [
+ {
+ "name": "DS_YARDSTICK",
+ "label": "yardstick",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "influxdb",
+ "pluginName": "InfluxDB"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "4.4.3"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "influxdb",
+ "name": "InfluxDB",
+ "version": "1.0.0"
+ }
+ ],
"annotations": {
"list": [
{
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"enable": true,
"hide": false,
"iconColor": "rgb(248, 255, 0)",
"limit": 100,
"name": "status",
- "query": "SELECT tg__0.collect_stats.Status FROM \"tc_heat_rfc2544_ipv4_1rule_1flow_trex\" WHERE \"tg__0.collect_stats.Status\"='Success' AND task_id='$task_id'",
+ "query": "SELECT tg__0.collect_stats.Status FROM $test_name WHERE \"tg__0.collect_stats.Status\"='Success' AND task_id='$task_id'",
"showIn": 0,
"titleColumn": "Status",
"type": "alert"
@@ -19,7 +49,7 @@
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
- "id": 2,
+ "id": null,
"links": [],
"refresh": false,
"rows": [
@@ -32,7 +62,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"fill": 0,
"hideTimeOverride": true,
"id": 3,
@@ -172,7 +202,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "yardstick",
+ "datasource": "${DS_YARDSTICK}",
"fill": 1,
"id": 1,
"legend": {
@@ -202,7 +232,7 @@
"alias": "$col",
"dsType": "influxdb",
"groupBy": [],
- "measurement": "tc_heat_rfc2544_ipv4_1rule_1flow_trex",
+ "measurement": "/^$test_name$/",
"orderByTime": "ASC",
"policy": "default",
"refId": "A",
@@ -285,7 +315,7 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": null,
+ "datasource": "${DS_YARDSTICK}",
"fill": 1,
"id": 5,
"legend": {
@@ -408,7 +438,7 @@
"bars": true,
"dashLength": 10,
"dashes": false,
- "datasource": null,
+ "datasource": "${DS_YARDSTICK}",
"fill": 1,
"id": 4,
"legend": {
@@ -723,11 +753,8 @@
"list": [
{
"allValue": null,
- "current": {
- "text": "tc_heat_rfc2544_ipv4_1rule_1flow_trex",
- "value": "tc_heat_rfc2544_ipv4_1rule_1flow_trex"
- },
- "datasource": "yardstick",
+ "current": {},
+ "datasource": "${DS_YARDSTICK}",
"hide": 0,
"includeAll": false,
"label": "test_name",
@@ -746,11 +773,8 @@
},
{
"allValue": null,
- "current": {
- "text": "fdb337ec-11ea-410f-b45e-83f30edb7590",
- "value": "fdb337ec-11ea-410f-b45e-83f30edb7590"
- },
- "datasource": "yardstick",
+ "current": {},
+ "datasource": "${DS_YARDSTICK}",
"hide": 0,
"includeAll": false,
"label": "task_id",
@@ -770,7 +794,6 @@
{
"allValue": null,
"current": {
- "tags": [],
"text": "64B + 128B + 512B",
"value": [
"64B",
@@ -881,5 +904,5 @@
},
"timezone": "",
"title": "RFC2544 Multi framesize",
- "version": 14
+ "version": 15
}
diff --git a/docs/testing/user/userguide/13-nsb-installation.rst b/docs/testing/user/userguide/13-nsb-installation.rst
index 0487dad9a..694521d2b 100644
--- a/docs/testing/user/userguide/13-nsb-installation.rst
+++ b/docs/testing/user/userguide/13-nsb-installation.rst
@@ -21,6 +21,7 @@ NSB Installation
.. _OVS-DPDK: http://docs.openvswitch.org/en/latest/intro/install/dpdk/
.. _devstack: https://docs.openstack.org/devstack/pike/>
+.. _OVS-DPDK-versions: http://docs.openvswitch.org/en/latest/faq/releases/
Abstract
--------
@@ -95,9 +96,10 @@ The ``nsb_setup.sh`` allows to:
1. Install Yardstick in specified mode: bare metal or container.
Refer :doc:`04-installation`.
2. Install package dependencies on remote servers used as traffic generator or
- sample VNF. Add such servers to ``install-inventory.ini`` file to either
+ sample VNF. Install DPDK, sample VNFs, TREX, collectd.
+ Add such servers to ``install-inventory.ini`` file to either
``yardstick-standalone`` or ``yardstick-baremetal`` server groups.
- Configures IOMMU, hugepages, open file limits, CPU isolation, etc.
+ It configures IOMMU, hugepages, open file limits, CPU isolation, etc.
3. Build VM image either nsb or normal. The nsb VM image is used to run
Yardstick sample VNF tests, like vFW, vACL, vCGNAPT, etc.
The normal VM image is used to run Yardstick ping tests in OpenStack context.
@@ -136,21 +138,25 @@ Modify the Yardstick installation inventory used by Ansible::
[yardstick:children]
jumphost
- [yardstick-standalone]
- standalone ansible_host=192.168.2.51 ansible_connection=ssh
-
[yardstick-baremetal]
- baremetal ansible_host=192.168.2.52 ansible_connection=ssh
+ baremetal ansible_host=192.168.2.51 ansible_connection=ssh
+
+ [yardstick-standalone]
+ standalone ansible_host=192.168.2.52 ansible_connection=ssh
[all:vars]
- arch_amd64=amd64
- arch_arm64=arm64
- inst_mode_baremetal=baremetal
- inst_mode_container=container
- inst_mode_container_pull=container_pull
- ubuntu_archive={"amd64": "http://archive.ubuntu.com/ubuntu/", "arm64": "http://ports.ubuntu.com/ubuntu-ports/"}
- ansible_user=root
- ansible_ssh_pass=root # OR ansible_ssh_private_key_file=/root/.ssh/id_rsa
+ # Uncomment credentials below if needed
+ ansible_user=root
+ ansible_ssh_pass=root
+ # ansible_ssh_private_key_file=/root/.ssh/id_rsa
+ # When IMG_PROPERTY is passed neither normal nor nsb set
+ # "path_to_vm=/path/to/image" to add it to OpenStack
+ # path_to_img=/tmp/workspace/yardstick-image.img
+
+ # List of CPUs to be isolated (not used by default)
+ # Grub line will be extended with:
+ # "isolcpus=<ISOL_CPUS> nohz=on nohz_full=<ISOL_CPUS> rcu_nocbs=1<ISOL_CPUS>"
+ # ISOL_CPUS=2-27,30-55 # physical cpu's for all NUMA nodes, four cpu's reserved
.. warning::
@@ -178,14 +184,18 @@ Modify the Yardstick installation inventory used by Ansible::
.. note::
CPU isolation can be applied to the remote servers, like:
- ISOL_CPUS=2-27,30-55
- Uncomment and modify accordingly in ``install-inventory.ini`` file.
+ ISOL_CPUS=2-27,30-55. Uncomment and modify accordingly in
+ ``install-inventory.ini`` file.
By default ``nsb_setup.sh`` pulls Yardstick image based on Ubuntu 16.04 from
docker hub and starts container, builds NSB VM image based on Ubuntu 16.04,
installs packages to the servers given in ``yardstick-standalone`` and
``yardstick-baremetal`` host groups.
+To pull Yardstick built based on Ubuntu 18 run::
+
+ ./nsb_setup.sh -i opnfv/yardstick-ubuntu-18.04:latest
+
To change default behavior modify parameters for ``install.yaml`` in
``nsb_setup.sh`` file.
@@ -196,11 +206,15 @@ To execute an installation for a **BareMetal** or a **Standalone context**::
./nsb_setup.sh
-
To execute an installation for an **OpenStack** context::
./nsb_setup.sh <path to admin-openrc.sh>
+.. note::
+
+ Yardstick may not be operational after distributive linux kernel update if
+ it has been installed before. Run ``nsb_setup.sh`` again to resolve this.
+
.. warning::
The Yardstick VM image (NSB or normal) cannot be built inside a VM.
@@ -217,11 +231,75 @@ execute::
docker exec -it yardstick bash
+.. note::
+
+ It may be needed to configure tty in docker container to extend commandline
+ character length, for example:
+
+ stty size rows 58 cols 234
+
It will also automatically download all the packages needed for NSB Testing
-setup. Refer chapter :doc:`04-installation` for more on Docker
+setup. Refer chapter :doc:`04-installation` for more on Docker.
**Install Yardstick using Docker (recommended)**
+Bare Metal context example
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Let's assume there are three servers acting as TG, sample VNF DUT and jump host.
+
+Perform following steps to install NSB:
+
+1. Clone Yardstick repo to jump host.
+2. Add TG and DUT servers to ``yardstick-baremetal`` group in
+ ``install-inventory.ini`` file to install NSB and dependencies. Install
+ python on servers.
+3. Start deployment using docker image based on Ubuntu 16:
+
+.. code-block:: console
+
+ ./nsb_setup.sh
+
+4. Reboot bare metal servers.
+5. Enter to yardstick container and modify pod yaml file and run tests.
+
+Standalone context example for Ubuntu 18
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Let's assume there are three servers acting as TG, sample VNF DUT and jump host.
+Ubuntu 18 is installed on all servers.
+
+Perform following steps to install NSB:
+
+1. Clone Yardstick repo to jump host.
+2. Add TG server to ``yardstick-baremetal`` group in
+ ``install-inventory.ini`` file to install NSB and dependencies.
+ Add server where VM with sample VNF will be deployed to
+ ``yardstick-standalone`` group in ``install-inventory.ini`` file.
+ Target VM image named ``yardstick-nsb-image.img`` will be placed to
+ ``/var/lib/libvirt/images/``.
+ Install python on servers.
+3. Modify ``nsb_setup.sh`` on jump host:
+
+.. code-block:: console
+
+ ansible-playbook \
+ -e IMAGE_PROPERTY='nsb' \
+ -e OS_RELEASE='bionic' \
+ -e INSTALLATION_MODE='container_pull' \
+ -e YARD_IMAGE_ARCH='amd64' ${extra_args} \
+ -i install-inventory.ini install.yaml
+
+4. Start deployment with Yardstick docker images based on Ubuntu 18:
+
+.. code-block:: console
+
+ ./nsb_setup.sh -i opnfv/yardstick-ubuntu-18.04:latest -o <openrc_file>
+
+5. Reboot servers.
+6. Enter to yardstick container and modify pod yaml file and run tests.
+
+
System Topology
---------------
@@ -881,6 +959,144 @@ Update contexts section
cidr: '152.16.40.10/24'
gateway_ip: '152.16.100.20'
+OVS-DPDK configuration options
+++++++++++++++++++++++++++++++
+
+There are number of configuration options available for OVS-DPDK context in
+test case. Mostly they are used for performance tuning.
+
+OVS-DPDK properties:
+''''''''''''''''''''
+
+OVS-DPDK properties example under *ovs_properties* section:
+
+ .. code-block:: console
+
+ ovs_properties:
+ version:
+ ovs: 2.8.1
+ dpdk: 17.05.2
+ pmd_threads: 4
+ pmd_cpu_mask: "0x3c"
+ ram:
+ socket_0: 2048
+ socket_1: 2048
+ queues: 2
+ vpath: "/usr/local"
+ max_idle: 30000
+ lcore_mask: 0x02
+ dpdk_pmd-rxq-affinity:
+ 0: "0:2,1:2"
+ 1: "0:2,1:2"
+ 2: "0:3,1:3"
+ 3: "0:3,1:3"
+ vhost_pmd-rxq-affinity:
+ 0: "0:3,1:3"
+ 1: "0:3,1:3"
+ 2: "0:4,1:4"
+ 3: "0:4,1:4"
+
+OVS-DPDK properties description:
+
+ +-------------------------+-------------------------------------------------+
+ | Parameters | Detail |
+ +=========================+=================================================+
+ | version || Version of OVS and DPDK to be installed |
+ | || There is a relation between OVS and DPDK |
+ | | version which can be found at |
+ | | `OVS-DPDK-versions`_ |
+ | || By default OVS: 2.6.0, DPDK: 16.07.2 |
+ +-------------------------+-------------------------------------------------+
+ | lcore_mask || Core bitmask used during DPDK initialization |
+ | | where the non-datapath OVS-DPDK threads such |
+ | | as handler and revalidator threads run |
+ +-------------------------+-------------------------------------------------+
+ | pmd_cpu_mask || Core bitmask that sets which cores are used by |
+ | || OVS-DPDK for datapath packet processing |
+ +-------------------------+-------------------------------------------------+
+ | pmd_threads || Number of PMD threads used by OVS-DPDK for |
+ | | datapath |
+ | || This core mask is evaluated in Yardstick |
+ | || It will be used if pmd_cpu_mask is not given |
+ | || Default is 2 |
+ +-------------------------+-------------------------------------------------+
+ | ram || Amount of RAM to be used for each socket, MB |
+ | || Default is 2048 MB |
+ +-------------------------+-------------------------------------------------+
+ | queues || Number of RX queues used for DPDK physical |
+ | | interface |
+ +-------------------------+-------------------------------------------------+
+ | dpdk_pmd-rxq-affinity || RX queue assignment to PMD threads for DPDK |
+ | || e.g.: <port number> : <queue-id>:<core-id> |
+ +-------------------------+-------------------------------------------------+
+ | vhost_pmd-rxq-affinity || RX queue assignment to PMD threads for vhost |
+ | || e.g.: <port number> : <queue-id>:<core-id> |
+ +-------------------------+-------------------------------------------------+
+ | vpath || User path for openvswitch files |
+ | || Default is ``/usr/local`` |
+ +-------------------------+-------------------------------------------------+
+ | max_idle || The maximum time that idle flows will remain |
+ | | cached in the datapath, ms |
+ +-------------------------+-------------------------------------------------+
+
+
+VM image properties
+'''''''''''''''''''
+
+VM image properties example under *flavor* section:
+
+ .. code-block:: console
+
+ flavor:
+ images: <path>
+ ram: 8192
+ extra_specs:
+ machine_type: 'pc-i440fx-xenial'
+ hw:cpu_sockets: 1
+ hw:cpu_cores: 6
+ hw:cpu_threads: 2
+ hw_socket: 0
+ cputune: |
+ <cputune>
+ <vcpupin vcpu="0" cpuset="7"/>
+ <vcpupin vcpu="1" cpuset="8"/>
+ ...
+ <vcpupin vcpu="11" cpuset="18"/>
+ <emulatorpin cpuset="11"/>
+ </cputune>
+
+VM image properties description:
+
+ +-------------------------+-------------------------------------------------+
+ | Parameters | Detail |
+ +=========================+=================================================+
+ | images || Path to the VM image generated by |
+ | | ``nsb_setup.sh`` |
+ | || Default path is ``/var/lib/libvirt/images/`` |
+ | || Default file name ``yardstick-nsb-image.img`` |
+ | | or ``yardstick-image.img`` |
+ +-------------------------+-------------------------------------------------+
+ | ram || Amount of RAM to be used for VM |
+ | || Default is 4096 MB |
+ +-------------------------+-------------------------------------------------+
+ | hw:cpu_sockets || Number of sockets provided to the guest VM |
+ | || Default is 1 |
+ +-------------------------+-------------------------------------------------+
+ | hw:cpu_cores || Number of cores provided to the guest VM |
+ | || Default is 2 |
+ +-------------------------+-------------------------------------------------+
+ | hw:cpu_threads || Number of threads provided to the guest VM |
+ | || Default is 2 |
+ +-------------------------+-------------------------------------------------+
+ | hw_socket || Generate vcpu cpuset from given HW socket |
+ | || Default is 0 |
+ +-------------------------+-------------------------------------------------+
+ | cputune || Maps virtual cpu with logical cpu |
+ +-------------------------+-------------------------------------------------+
+ | machine_type || Machine type to be emulated in VM |
+ | || Default is 'pc-i440fx-xenial' |
+ +-------------------------+-------------------------------------------------+
+
OpenStack with SR-IOV support
-----------------------------
diff --git a/docs/testing/user/userguide/14-nsb-operation.rst b/docs/testing/user/userguide/14-nsb-operation.rst
index 941a0bb65..69ffb8a3b 100644
--- a/docs/testing/user/userguide/14-nsb-operation.rst
+++ b/docs/testing/user/userguide/14-nsb-operation.rst
@@ -640,3 +640,37 @@ A testcase can be started with the following command as an example:
.. code-block:: bash
yardstick task start /yardstick/samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
+
+Preparing test run of vIPSEC test case
+------------------------------------
+
+Location of vIPSEC test cases: ``samples/vnf_samples/nsut/ipsec/``.
+
+Before running a specific vIPSEC test case using NSB, some dependencies have to be
+preinstalled and properly configured.
+- VPP
+
+.. code-block:: console
+
+ export UBUNTU="xenial"
+ export RELEASE=".stable.1810"
+ sudo rm /etc/apt/sources.list.d/99fd.io.list
+ echo "deb [trusted=yes] https://nexus.fd.io/content/repositories/fd.io$RELEASE.ubuntu.$UBUNTU.main/ ./" | sudo tee -a /etc/apt/sources.list.d/99fd.io.list
+ sudo apt-get update
+ sudo apt-get install vpp vpp-lib vpp-plugin vpp-dbg vpp-dev vpp-api-java vpp-api-python vpp-api-lua
+
+- VAT templates
+
+ VAT templates is required for the VPP API.
+
+.. code-block:: console
+
+ mkdir -p /opt/nsb_bin/vpp/templates/
+ echo 'exec trace add dpdk-input 50' > /opt/nsb_bin/vpp/templates/enable_dpdk_traces.vat
+ echo 'exec trace add vhost-user-input 50' > /opt/nsb_bin/vpp/templates/enable_vhost_user_traces.vat
+ echo 'exec trace add memif-input 50' > /opt/nsb_bin/vpp/templates/enable_memif_traces.vat
+ cat > /opt/nsb_bin/vpp/templates/dump_interfaces.vat << EOL
+ sw_interface_dump
+ dump_interface_table
+ quit
+ EOL
diff --git a/docs/testing/user/userguide/nsb/nsb-list-of-tcs.rst b/docs/testing/user/userguide/nsb/nsb-list-of-tcs.rst
index 6c18c7d89..a578216da 100644
--- a/docs/testing/user/userguide/nsb/nsb-list-of-tcs.rst
+++ b/docs/testing/user/userguide/nsb/nsb-list-of-tcs.rst
@@ -36,3 +36,4 @@ NSB PROX Test Case Descriptions
tc_vfw_rfc2544
tc_vfw_rfc2544_correlated
tc_vfw_rfc3511
+ tc_vpp_baremetal_crypto_ipsec
diff --git a/docs/testing/user/userguide/nsb/tc_vpp_baremetal_crypto_ipsec.rst b/docs/testing/user/userguide/nsb/tc_vpp_baremetal_crypto_ipsec.rst
new file mode 100644
index 000000000..6a4a37697
--- /dev/null
+++ b/docs/testing/user/userguide/nsb/tc_vpp_baremetal_crypto_ipsec.rst
@@ -0,0 +1,113 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, 2019 Viosoft Corporation.
+
+***********************************************
+Yardstick Test Case Description: NSB VPP IPSEC
+***********************************************
+
++------------------------------------------------------------------------------+
+|NSB VPP test for vIPSEC characterization |
+| |
++--------------+---------------------------------------------------------------+
+|test case id | tc_baremetal_rfc2544_ipv4_{crypto_dev}_{crypto_alg} |
+| | |
+| | * crypto_dev = HW_cryptodev or SW_cryptodev; |
+| | * crypto_alg = aes-gcm or cbc-sha1; |
+| | |
++--------------+---------------------------------------------------------------+
+|metric | * Network Throughput NDR or PDR; |
+| | * Connections Per Second (CPS); |
+| | * Latency; |
+| | * Number of tunnels; |
+| | * TG Packets Out; |
+| | * TG Packets In; |
+| | * VNF Packets Out; |
+| | * VNF Packets In; |
+| | * Dropped packets; |
+| | |
++--------------+---------------------------------------------------------------+
+|test purpose | IPv4 IPsec tunnel mode performance test: |
+| | |
+| | * Finds and reports throughput NDR (Non Drop Rate) with zero |
+| | packet loss tolerance or throughput PDR (Partial Drop Rate) |
+| | with non-zero packet loss tolerance (LT) expressed in |
+| | number of packets transmitted. |
+| | |
+| | * The IPSEC test cases are implemented to run in baremetal |
+| | |
++--------------+---------------------------------------------------------------+
+|configuration | The IPSEC test cases are listed below: |
+| | |
+| | * tc_baremetal_rfc2544_ipv4_hw_aesgcm_IMIX_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_hw_aesgcm_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_hw_cbcsha1_IMIX_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_hw_cbcsha1_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_sw_aesgcm_IMIX_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_sw_aesgcm_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_sw_cbcsha1_IMIX_trex.yaml |
+| | * tc_baremetal_rfc2544_ipv4_sw_cbcsha1_trex.yaml |
+| | |
+| | Test duration is set as 500sec for each test. |
+| | Packet size set as 64 bytes or higher. |
+| | Number of tunnels set as 1 or higher. |
+| | Number of connections set as 1 or higher |
+| | These can be configured |
+| | |
++--------------+---------------------------------------------------------------+
+|test tool | Vector Packet Processing (VPP) |
+| | The VPP platform is an extensible framework that provides |
+| | out-of-the-box production quality switch/router functionality.|
+| | Its high performance, proven technology, its modularity and, |
+| | flexibility and rich feature set |
+| | |
++--------------+---------------------------------------------------------------+
+|applicability | This VPP IPSEC test cases can be configured with different: |
+| | |
+| | * packet sizes; |
+| | * test durations; |
+| | * tolerated loss; |
+| | * crypto device type; |
+| | * number of physical cores; |
+| | * number of tunnels; |
+| | * number of connections; |
+| | * encryption algorithms - integrity algorithm; |
+| | |
+| | Default values exist. |
+| | |
++--------------+---------------------------------------------------------------+
+|pre-test | For Baremetal tests cases VPP and DPDK must be installed in |
+|conditions | the hosts where the test is executed. The pod.yaml file must |
+| | have the necessary system and NIC information |
+| | |
++--------------+---------------------------------------------------------------+
+|test sequence | description and expected result |
+| | |
++--------------+---------------------------------------------------------------+
+|step 1 | For Baremetal test: The TG and VNF are started on the hosts |
+| | based on the pod file. |
+| | |
++--------------+---------------------------------------------------------------+
+|step 2 | Yardstick is connected with the TG and VNF by using ssh. |
+| | The test will resolve the topology and instantiate the VNF |
+| | and TG and collect the KPI's/metrics. |
+| | |
++--------------+---------------------------------------------------------------+
+|step 3 | Test packets are generated by TG on links to DUTs. If the |
+| | number of dropped packets is more than the tolerated loss |
+| | the line rate or throughput is halved. This is done until |
+| | the dropped packets are within an acceptable tolerated loss. |
+| | |
+| | The KPI is the number of packets per second for a packet size |
+| | specified in the test case with an accepted minimal packet |
+| | loss for the default configuration. |
+| | |
++--------------+---------------------------------------------------------------+
+|step 4 | In Baremetal test: The test quits the application and unbind |
+| | the DPDK ports. |
+| | |
++--------------+---------------------------------------------------------------+
+|test verdict | The test case will achieve a Throughput with an accepted |
+| | minimal tolerated packet loss. |
++--------------+---------------------------------------------------------------+ \ No newline at end of file
diff --git a/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
new file mode 100755
index 000000000..0b48ef4e9
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vcmts_vnf.py
@@ -0,0 +1,273 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import yaml
+
+from influxdb import InfluxDBClient
+
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.network_services.utils import get_nsb_option
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InfluxDBHelper(object):
+
+ INITIAL_VALUE = 'now() - 1m'
+
+ def __init__(self, vcmts_influxdb_ip, vcmts_influxdb_port):
+ self._vcmts_influxdb_ip = vcmts_influxdb_ip
+ self._vcmts_influxdb_port = vcmts_influxdb_port
+ self._last_upstream_rx = self.INITIAL_VALUE
+ self._last_values_time = dict()
+
+ def start(self):
+ self._read_client = InfluxDBClient(host=self._vcmts_influxdb_ip,
+ port=self._vcmts_influxdb_port,
+ database='collectd')
+ self._write_client = InfluxDBClient(host=constants.INFLUXDB_IP,
+ port=constants.INFLUXDB_PORT,
+ database='collectd')
+
+ def _get_last_value_time(self, measurement):
+ if measurement in self._last_values_time:
+ return self._last_values_time[measurement]
+ return self.INITIAL_VALUE
+
+ def _set_last_value_time(self, measurement, time):
+ self._last_values_time[measurement] = "'" + time + "'"
+
+ def _query_measurement(self, measurement):
+ # There is a delay before influxdb flushes the data
+ query = "SELECT * FROM " + measurement + " WHERE time > " \
+ + self._get_last_value_time(measurement) \
+ + " ORDER BY time ASC;"
+ query_result = self._read_client.query(query)
+ if len(query_result.keys()) == 0:
+ return None
+ return query_result.get_points(measurement)
+
+ def _rw_measurment(self, measurement, columns):
+ query_result = self._query_measurement(measurement)
+ if query_result == None:
+ return
+
+ points_to_write = list()
+ for entry in query_result:
+ point = {
+ "measurement": measurement,
+ "tags": {
+ "type": entry['type'],
+ "host": entry['host']
+ },
+ "time": entry['time'],
+ "fields": {}
+ }
+
+ for column in columns:
+ if column == 'value':
+ point["fields"][column] = float(entry[column])
+ else:
+ point["fields"][column] = entry[column]
+
+ points_to_write.append(point)
+ self._set_last_value_time(measurement, entry['time'])
+
+ # Write the points to yardstick database
+ if self._write_client.write_points(points_to_write):
+ LOG.debug("%d new points written to '%s' measurement",
+ len(points_to_write), measurement)
+
+ def copy_kpi(self):
+ self._rw_measurment("cpu_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("cpufreq_value", ["type_instance", "value"])
+ self._rw_measurment("downstream_rx", ["value"])
+ self._rw_measurment("downstream_tx", ["value"])
+ self._rw_measurment("downstream_value", ["value"])
+ self._rw_measurment("ds_per_cm_value", ["instance", "value"])
+ self._rw_measurment("intel_rdt_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("turbostat_value", ["instance", "type_instance", "value"])
+ self._rw_measurment("upstream_rx", ["value"])
+ self._rw_measurment("upstream_tx", ["value"])
+ self._rw_measurment("upstream_value", ["value"])
+
+
+class VcmtsdSetupEnvHelper(SetupEnvHelper):
+
+ BASE_PARAMETERS = "export LD_LIBRARY_PATH=/opt/collectd/lib:;"\
+ + "export CMK_PROC_FS=/host/proc;"
+
+ def build_us_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=shared" \
+ + " /vcmts-config/run_upstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_us'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_ds_parameters(self, pod_cfg):
+ return self.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + pod_cfg['cpu_socket_id'] \
+ + " --pool=" + pod_cfg['ds_core_type'] \
+ + " /vcmts-config/run_downstream.sh " + pod_cfg['sg_id'] \
+ + " " + pod_cfg['ds_core_type'] \
+ + " " + pod_cfg['ds_core_pool_index'] \
+ + " " + pod_cfg['num_ofdm'] + "ofdm" \
+ + " " + pod_cfg['num_subs'] + "cm" \
+ + " " + pod_cfg['cm_crypto'] \
+ + " " + pod_cfg['qat'] \
+ + " " + pod_cfg['net_ds'] \
+ + " " + pod_cfg['power_mgmt']
+
+ def build_cmd(self, stream_dir, pod_cfg):
+ if stream_dir == 'ds':
+ return self.build_ds_parameters(pod_cfg)
+ else:
+ return self.build_us_parameters(pod_cfg)
+
+ def run_vcmtsd(self, stream_dir, pod_cfg):
+ cmd = self.build_cmd(stream_dir, pod_cfg)
+ LOG.debug("Executing %s", cmd)
+ self.ssh_helper.send_command(cmd)
+
+ def setup_vnf_environment(self):
+ pass
+
+
+class VcmtsVNF(GenericVNF):
+
+ RUN_WAIT = 4
+
+ def __init__(self, name, vnfd):
+ super(VcmtsVNF, self).__init__(name, vnfd)
+ self.name = name
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.scenario_helper = ScenarioHelper(self.name)
+ self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path)
+
+ self.setup_helper = VcmtsdSetupEnvHelper(self.vnfd_helper,
+ self.ssh_helper,
+ self.scenario_helper)
+
+ def extract_pod_cfg(self, vcmts_pods_cfg, sg_id):
+ for pod_cfg in vcmts_pods_cfg:
+ if pod_cfg['sg_id'] == sg_id:
+ return pod_cfg
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
+ self.scenario_helper.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+
+ options = scenario_cfg.get('options', {})
+
+ try:
+ self.vcmts_influxdb_ip = options['vcmts_influxdb_ip']
+ self.vcmts_influxdb_port = options['vcmts_influxdb_port']
+ except KeyError:
+ raise KeyError("Missing destination InfluxDB details in scenario" \
+ " section of the task definition file")
+
+ try:
+ vcmtsd_values_filepath = options['vcmtsd_values']
+ except KeyError:
+ raise KeyError("Missing vcmtsd_values key in scenario options" \
+ "section of the task definition file")
+
+ if not os.path.isfile(vcmtsd_values_filepath):
+ raise RuntimeError("The vcmtsd_values file path provided " \
+ "does not exists")
+
+ # The yaml_loader.py (SafeLoader) underlying regex has an issue
+ # with reading PCI addresses (processed as double). so the
+ # BaseLoader is used here.
+ with open(vcmtsd_values_filepath) as stream:
+ vcmtsd_values = yaml.load(stream, Loader=yaml.BaseLoader)
+
+ if vcmtsd_values == None:
+ raise RuntimeError("Error reading vcmtsd_values file provided (" +
+ vcmtsd_values_filepath + ")")
+
+ vnf_options = options.get(self.name, {})
+ sg_id = str(vnf_options['sg_id'])
+ stream_dir = vnf_options['stream_dir']
+
+ try:
+ vcmts_pods_cfg = vcmtsd_values['topology']['vcmts_pods']
+ except KeyError:
+ raise KeyError("Missing vcmts_pods key in the " \
+ "vcmtsd_values file provided")
+
+ pod_cfg = self.extract_pod_cfg(vcmts_pods_cfg, sg_id)
+ if pod_cfg == None:
+ raise exceptions.IncorrectConfig(error_msg="Service group " + sg_id + " not found")
+
+ self.setup_helper.run_vcmtsd(stream_dir, pod_cfg)
+
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
+ def wait_for_instantiate(self):
+ pass
+
+ def terminate(self):
+ pass
+
+ def scale(self, flavor=""):
+ pass
+
+ def collect_kpi(self):
+ self.influxdb_helper.copy_kpi()
+ return {"n/a": "n/a"}
+
+ def start_collect(self):
+ self.influxdb_helper = InfluxDBHelper(self.vcmts_influxdb_ip,
+ self.vcmts_influxdb_port)
+ self.influxdb_helper.start()
+
+ def stop_collect(self):
+ pass
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py
new file mode 100755
index 000000000..11e3d6e17
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vcmts_vnf.py
@@ -0,0 +1,651 @@
+# Copyright (c) 2019 Viosoft Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import mock
+import copy
+import os
+
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+from yardstick.network_services.vnf_generic.vnf import vcmts_vnf
+from yardstick.common import exceptions
+
+from influxdb.resultset import ResultSet
+
+NAME = "vnf__0"
+
+
+class TestInfluxDBHelper(unittest.TestCase):
+
+ def test___init__(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ self.assertEqual(influxdb_helper._vcmts_influxdb_ip, "localhost")
+ self.assertEqual(influxdb_helper._vcmts_influxdb_port, 8086)
+ self.assertIsNotNone(influxdb_helper._last_upstream_rx)
+ self.assertIsNotNone(influxdb_helper._last_values_time)
+
+ def test_start(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ influxdb_helper.start()
+ self.assertIsNotNone(influxdb_helper._read_client)
+ self.assertIsNotNone(influxdb_helper._write_client)
+
+ def test__get_last_value_time(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ self.assertEqual(influxdb_helper._get_last_value_time('cpu_value'),
+ vcmts_vnf.InfluxDBHelper.INITIAL_VALUE)
+
+ influxdb_helper._last_values_time['cpu_value'] = "RANDOM"
+ self.assertEqual(influxdb_helper._get_last_value_time('cpu_value'),
+ "RANDOM")
+
+ def test__set_last_value_time(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ influxdb_helper._set_last_value_time('cpu_value', '00:00')
+ self.assertEqual(influxdb_helper._last_values_time['cpu_value'],
+ "'00:00'")
+
+ def test__query_measurement(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ influxdb_helper._read_client = mock.MagicMock()
+
+ resulted_generator = mock.MagicMock()
+ resulted_generator.keys.return_value = []
+ influxdb_helper._read_client.query.return_value = resulted_generator
+ query_result = influxdb_helper._query_measurement('cpu_value')
+ self.assertIsNone(query_result)
+
+ resulted_generator = mock.MagicMock()
+ resulted_generator.keys.return_value = ["", ""]
+ resulted_generator.get_points.return_value = ResultSet({"":""})
+ influxdb_helper._read_client.query.return_value = resulted_generator
+ query_result = influxdb_helper._query_measurement('cpu_value')
+ self.assertIsNotNone(query_result)
+
+ def test__rw_measurment(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ influxdb_helper._query_measurement = mock.MagicMock()
+ influxdb_helper._query_measurement.return_value = None
+ influxdb_helper._rw_measurment('cpu_value', [])
+ self.assertEqual(len(influxdb_helper._last_values_time), 0)
+
+ entry = {
+ "type":"type",
+ "host":"host",
+ "time":"time",
+ "id": "1",
+ "value": "1.0"
+ }
+ influxdb_helper._query_measurement.return_value = [entry]
+ influxdb_helper._write_client = mock.MagicMock()
+ influxdb_helper._rw_measurment('cpu_value', ["id", "value"])
+ self.assertEqual(len(influxdb_helper._last_values_time), 1)
+ influxdb_helper._write_client.write_points.assert_called_once()
+
+ def test_copy_kpi(self):
+ influxdb_helper = vcmts_vnf.InfluxDBHelper("localhost", 8086)
+ influxdb_helper._rw_measurment = mock.MagicMock()
+ influxdb_helper.copy_kpi()
+ influxdb_helper._rw_measurment.assert_called()
+
+
+class TestVcmtsdSetupEnvHelper(unittest.TestCase):
+ POD_CFG = {
+ "cm_crypto": "aes",
+ "cpu_socket_id": "0",
+ "ds_core_pool_index": "2",
+ "ds_core_type": "exclusive",
+ "net_ds": "1a:02.1",
+ "net_us": "1a:02.0",
+ "num_ofdm": "1",
+ "num_subs": "100",
+ "power_mgmt": "pm_on",
+ "qat": "qat_off",
+ "service_group_config": "",
+ "sg_id": "0",
+ "vcmtsd_image": "vcmts-d:perf"
+ }
+
+ OPTIONS = {
+ "pktgen_values": "/tmp/pktgen_values.yaml",
+ "tg__0": {
+ "pktgen_id": 0
+ },
+ "vcmts_influxdb_ip": "10.80.5.150",
+ "vcmts_influxdb_port": 8086,
+ "vcmtsd_values": "/tmp/vcmtsd_values.yaml",
+ "vnf__0": {
+ "sg_id": 0,
+ "stream_dir": "us"
+ },
+ "vnf__1": {
+ "sg_id": 0,
+ "stream_dir": "ds"
+ }
+ }
+
+ def setUp(self):
+ vnfd_helper = VnfdHelper(
+ TestVcmtsVNF.VNFD['vnfd:vnfd-catalog']['vnfd'][0])
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.options = self.OPTIONS
+
+ self.setup_helper = vcmts_vnf.VcmtsdSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+
+ def _build_us_parameters(self):
+ return vcmts_vnf.VcmtsdSetupEnvHelper.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + str(self.POD_CFG['cpu_socket_id']) \
+ + " --pool=shared" \
+ + " /vcmts-config/run_upstream.sh " + self.POD_CFG['sg_id'] \
+ + " " + self.POD_CFG['ds_core_type'] \
+ + " " + str(self.POD_CFG['num_ofdm']) + "ofdm" \
+ + " " + str(self.POD_CFG['num_subs']) + "cm" \
+ + " " + self.POD_CFG['cm_crypto'] \
+ + " " + self.POD_CFG['qat'] \
+ + " " + self.POD_CFG['net_us'] \
+ + " " + self.POD_CFG['power_mgmt']
+
+ def test_build_us_parameters(self):
+ constructed = self._build_us_parameters()
+ result = self.setup_helper.build_us_parameters(self.POD_CFG)
+ self.assertEqual(constructed, result)
+
+ def _build_ds_parameters(self):
+ return vcmts_vnf.VcmtsdSetupEnvHelper.BASE_PARAMETERS + " " \
+ + " /opt/bin/cmk isolate --conf-dir=/etc/cmk" \
+ + " --socket-id=" + str(self.POD_CFG['cpu_socket_id']) \
+ + " --pool=" + self.POD_CFG['ds_core_type'] \
+ + " /vcmts-config/run_downstream.sh " + self.POD_CFG['sg_id'] \
+ + " " + self.POD_CFG['ds_core_type'] \
+ + " " + str(self.POD_CFG['ds_core_pool_index']) \
+ + " " + str(self.POD_CFG['num_ofdm']) + "ofdm" \
+ + " " + str(self.POD_CFG['num_subs']) + "cm" \
+ + " " + self.POD_CFG['cm_crypto'] \
+ + " " + self.POD_CFG['qat'] \
+ + " " + self.POD_CFG['net_ds'] \
+ + " " + self.POD_CFG['power_mgmt']
+
+ def test_build_ds_parameters(self):
+ constructed = self._build_ds_parameters()
+ result = self.setup_helper.build_ds_parameters(self.POD_CFG)
+ self.assertEqual(constructed, result)
+
+ def test_build_cmd(self):
+ us_constructed = self._build_us_parameters()
+ us_result = self.setup_helper.build_cmd('us', self.POD_CFG)
+ self.assertEqual(us_constructed, us_result)
+ ds_constructed = self._build_ds_parameters()
+ ds_result = self.setup_helper.build_cmd('ds', self.POD_CFG)
+ self.assertEqual(ds_constructed, ds_result)
+
+ def test_run_vcmtsd(self):
+ us_constructed = self._build_us_parameters()
+
+ vnfd_helper = VnfdHelper(
+ TestVcmtsVNF.VNFD['vnfd:vnfd-catalog']['vnfd'][0])
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.Mock()
+ scenario_helper.options = self.OPTIONS
+
+ setup_helper = vcmts_vnf.VcmtsdSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+
+ setup_helper.run_vcmtsd('us', self.POD_CFG)
+ ssh_helper.send_command.assert_called_with(us_constructed)
+
+ def test_setup_vnf_environment(self):
+ self.assertIsNone(self.setup_helper.setup_vnf_environment())
+
+class TestVcmtsVNF(unittest.TestCase):
+
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{
+ "benchmark": {
+ "kpi": [
+ "upstream/bits_per_second"
+ ]
+ },
+ "connection-point": [
+ {
+ "name": "xe0",
+ "type": "VPORT"
+ },
+ {
+ "name": "xe1",
+ "type": "VPORT"
+ }
+ ],
+ "description": "vCMTS Upstream-Downstream Kubernetes",
+ "id": "VcmtsVNF",
+ "mgmt-interface": {
+ "ip": "192.168.100.35",
+ "key_filename": "/tmp/yardstick_key-81dcca91",
+ "user": "root",
+ "vdu-id": "vcmtsvnf-kubernetes"
+ },
+ "name": "vcmtsvnf",
+ "short-name": "vcmtsvnf",
+ "vdu": [
+ {
+ "description": "vCMTS Upstream-Downstream Kubernetes",
+ "external-interface": [],
+ "id": "vcmtsvnf-kubernetes",
+ "name": "vcmtsvnf-kubernetes"
+ }
+ ],
+ "vm-flavor": {
+ "memory-mb": "4096",
+ "vcpu-count": "4"
+ }
+ }]
+ }
+ }
+
+ POD_CFG = [
+ {
+ "cm_crypto": "aes",
+ "cpu_socket_id": "0",
+ "ds_core_pool_index": "2",
+ "ds_core_type": "exclusive",
+ "net_ds": "1a:02.1",
+ "net_us": "1a:02.0",
+ "num_ofdm": "1",
+ "num_subs": "100",
+ "power_mgmt": "pm_on",
+ "qat": "qat_off",
+ "service_group_config": "",
+ "sg_id": "0",
+ "vcmtsd_image": "vcmts-d:perf"
+ },
+ ]
+
+ SCENARIO_CFG = {
+ "nodes": {
+ "tg__0": "pktgen0-k8syardstick-afae18b2",
+ "vnf__0": "vnf0us-k8syardstick-afae18b2",
+ "vnf__1": "vnf0ds-k8syardstick-afae18b2"
+ },
+ "options": {
+ "pktgen_values": "/tmp/pktgen_values.yaml",
+ "tg__0": {
+ "pktgen_id": 0
+ },
+ "vcmts_influxdb_ip": "10.80.5.150",
+ "vcmts_influxdb_port": 8086,
+ "vcmtsd_values": "/tmp/vcmtsd_values.yaml",
+ "vnf__0": {
+ "sg_id": 0,
+ "stream_dir": "us"
+ },
+ "vnf__1": {
+ "sg_id": 0,
+ "stream_dir": "ds"
+ }
+ },
+ "task_id": "afae18b2-9902-477f-8128-49afde7c3040",
+ "task_path": "samples/vnf_samples/nsut/cmts",
+ "tc": "tc_vcmts_k8s_pktgen",
+ "topology": "k8s_vcmts_topology.yaml",
+ "traffic_profile": "../../traffic_profiles/fixed.yaml",
+ "type": "NSPerf"
+ }
+
+ CONTEXT_CFG = {
+ "networks": {
+ "flannel": {
+ "name": "flannel"
+ },
+ "xe0": {
+ "name": "xe0"
+ },
+ "xe1": {
+ "name": "xe1"
+ }
+ },
+ "nodes": {
+ "tg__0": {
+ "VNF model": "../../vnf_descriptors/tg_vcmts_tpl.yaml",
+ "interfaces": {
+ "flannel": {
+ "local_ip": "192.168.24.110",
+ "local_mac": None,
+ "network_name": "flannel"
+ },
+ "xe0": {
+ "local_ip": "192.168.24.110",
+ "local_mac": None,
+ "network_name": "xe0"
+ },
+ "xe1": {
+ "local_ip": "192.168.24.110",
+ "local_mac": None,
+ "network_name": "xe1"
+ }
+ },
+ "ip": "192.168.24.110",
+ "key_filename": "/tmp/yardstick_key-afae18b2",
+ "member-vnf-index": "1",
+ "name": "pktgen0-k8syardstick-afae18b2",
+ "private_ip": "192.168.24.110",
+ "service_ports": [
+ {
+ "name": "ssh",
+ "node_port": 17153,
+ "port": 22,
+ "protocol": "TCP",
+ "target_port": 22
+ },
+ {
+ "name": "lua",
+ "node_port": 51250,
+ "port": 22022,
+ "protocol": "TCP",
+ "target_port": 22022
+ }
+ ],
+ "ssh_port": 17153,
+ "user": "root",
+ "vnfd-id-ref": "tg__0"
+ },
+ "vnf__0": {
+ "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml",
+ "interfaces": {
+ "flannel": {
+ "local_ip": "192.168.100.53",
+ "local_mac": None,
+ "network_name": "flannel"
+ },
+ "xe0": {
+ "local_ip": "192.168.100.53",
+ "local_mac": None,
+ "network_name": "xe0"
+ },
+ "xe1": {
+ "local_ip": "192.168.100.53",
+ "local_mac": None,
+ "network_name": "xe1"
+ }
+ },
+ "ip": "192.168.100.53",
+ "key_filename": "/tmp/yardstick_key-afae18b2",
+ "member-vnf-index": "3",
+ "name": "vnf0us-k8syardstick-afae18b2",
+ "private_ip": "192.168.100.53",
+ "service_ports": [
+ {
+ "name": "ssh",
+ "node_port": 34027,
+ "port": 22,
+ "protocol": "TCP",
+ "target_port": 22
+ },
+ {
+ "name": "lua",
+ "node_port": 32580,
+ "port": 22022,
+ "protocol": "TCP",
+ "target_port": 22022
+ }
+ ],
+ "ssh_port": 34027,
+ "user": "root",
+ "vnfd-id-ref": "vnf__0"
+ },
+ "vnf__1": {
+ "VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml",
+ "interfaces": {
+ "flannel": {
+ "local_ip": "192.168.100.52",
+ "local_mac": None,
+ "network_name": "flannel"
+ },
+ "xe0": {
+ "local_ip": "192.168.100.52",
+ "local_mac": None,
+ "network_name": "xe0"
+ },
+ "xe1": {
+ "local_ip": "192.168.100.52",
+ "local_mac": None,
+ "network_name": "xe1"
+ }
+ },
+ "ip": "192.168.100.52",
+ "key_filename": "/tmp/yardstick_key-afae18b2",
+ "member-vnf-index": "4",
+ "name": "vnf0ds-k8syardstick-afae18b2",
+ "private_ip": "192.168.100.52",
+ "service_ports": [
+ {
+ "name": "ssh",
+ "node_port": 58661,
+ "port": 22,
+ "protocol": "TCP",
+ "target_port": 22
+ },
+ {
+ "name": "lua",
+ "node_port": 58233,
+ "port": 22022,
+ "protocol": "TCP",
+ "target_port": 22022
+ }
+ ],
+ "ssh_port": 58661,
+ "user": "root",
+ "vnfd-id-ref": "vnf__1"
+ },
+ }
+ }
+
+ VCMTSD_VALUES_PATH = "/tmp/vcmtsd_values.yaml"
+
+ VCMTSD_VALUES = \
+ "serviceAccount: cmk-serviceaccount\n" \
+ "topology:\n" \
+ " vcmts_replicas: 16\n" \
+ " vcmts_pods:\n" \
+ " - service_group_config:\n" \
+ " sg_id: 0\n" \
+ " net_us: 18:02.0\n" \
+ " net_ds: 18:02.1\n" \
+ " num_ofdm: 4\n" \
+ " num_subs: 300\n" \
+ " cm_crypto: aes\n" \
+ " qat: qat_off\n" \
+ " power_mgmt: pm_on\n" \
+ " cpu_socket_id: 0\n" \
+ " ds_core_type: exclusive\n" \
+ " ds_core_pool_index: 0\n" \
+ " vcmtsd_image: vcmts-d:feat"
+
+ VCMTSD_VALUES_INCOMPLETE = \
+ "serviceAccount: cmk-serviceaccount\n" \
+ "topology:\n" \
+ " vcmts_replicas: 16"
+
+ def setUp(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.vnf = vcmts_vnf.VcmtsVNF(NAME, vnfd)
+
+ def test___init__(self, *args):
+ self.assertIsNotNone(self.vnf.setup_helper)
+
+ def test_extract_pod_cfg(self):
+ pod_cfg = self.vnf.extract_pod_cfg(self.POD_CFG, "0")
+ self.assertIsNotNone(pod_cfg)
+ self.assertEqual(pod_cfg['sg_id'], '0')
+ pod_cfg = self.vnf.extract_pod_cfg(self.POD_CFG, "1")
+ self.assertIsNone(pod_cfg)
+
+ def test_instantiate_missing_influxdb_info(self):
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ err_scenario_cfg['options'].pop('vcmts_influxdb_ip', None)
+ with self.assertRaises(KeyError):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ def test_instantiate_missing_vcmtsd_values_file(self):
+ if os.path.isfile(self.VCMTSD_VALUES_PATH):
+ os.remove(self.VCMTSD_VALUES_PATH)
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ err_scenario_cfg['options']['vcmtsd_values'] = self.VCMTSD_VALUES_PATH
+ with self.assertRaises(RuntimeError):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ def test_instantiate_empty_vcmtsd_values_file(self):
+ yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w')
+ yaml_sample.write("")
+ yaml_sample.close()
+
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ err_scenario_cfg['options']['vcmtsd_values'] = self.VCMTSD_VALUES_PATH
+ with self.assertRaises(RuntimeError):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ if os.path.isfile(self.VCMTSD_VALUES_PATH):
+ os.remove(self.VCMTSD_VALUES_PATH)
+
+ def test_instantiate_missing_vcmtsd_values_key(self):
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ err_scenario_cfg['options'].pop('vcmtsd_values', None)
+ with self.assertRaises(KeyError):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ def test_instantiate_invalid_vcmtsd_values(self):
+ yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w')
+ yaml_sample.write(self.VCMTSD_VALUES_INCOMPLETE)
+ yaml_sample.close()
+
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ with self.assertRaises(KeyError):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ if os.path.isfile(self.VCMTSD_VALUES_PATH):
+ os.remove(self.VCMTSD_VALUES_PATH)
+
+ def test_instantiate_invalid_sg_id(self):
+ yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w')
+ yaml_sample.write(self.VCMTSD_VALUES)
+ yaml_sample.close()
+
+ err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
+ err_scenario_cfg['options'][NAME]['sg_id'] = 8
+ with self.assertRaises(exceptions.IncorrectConfig):
+ self.vnf.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
+
+ if os.path.isfile(self.VCMTSD_VALUES_PATH):
+ os.remove(self.VCMTSD_VALUES_PATH)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.vcmts_vnf.VnfSshHelper')
+ def test_instantiate_all_valid(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnf = vcmts_vnf.VcmtsVNF(NAME, vnfd)
+
+ yaml_sample = open(self.VCMTSD_VALUES_PATH, 'w')
+ yaml_sample.write(self.VCMTSD_VALUES)
+ yaml_sample.close()
+
+ vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG)
+ self.assertEqual(vnf.vcmts_influxdb_ip, "10.80.5.150")
+ self.assertEqual(vnf.vcmts_influxdb_port, 8086)
+
+ if os.path.isfile(self.VCMTSD_VALUES_PATH):
+ os.remove(self.VCMTSD_VALUES_PATH)
+
+ def test__update_collectd_options(self):
+ scenario_cfg = {'options':
+ {'collectd':
+ {'interval': 3,
+ 'plugins':
+ {'plugin3': {'param': 3}}},
+ 'vnf__0':
+ {'collectd':
+ {'interval': 2,
+ 'plugins':
+ {'plugin3': {'param': 2},
+ 'plugin2': {'param': 2}}}}}}
+ context_cfg = {'nodes':
+ {'vnf__0':
+ {'collectd':
+ {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}}}}
+ expected = {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ self.vnf._update_collectd_options(scenario_cfg, context_cfg)
+ self.assertEqual(self.vnf.setup_helper.collectd_options, expected)
+
+ def test__update_options(self):
+ options1 = {'interval': 1,
+ 'param1': 'value1',
+ 'plugins':
+ {'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+ options2 = {'interval': 2,
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin2': {'param': 2},
+ 'plugin1': {'param': 2}}}
+ expected = {'interval': 1,
+ 'param1': 'value1',
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnf = vcmts_vnf.VcmtsVNF('vnf1', vnfd)
+ vnf._update_options(options2, options1)
+ self.assertEqual(options2, expected)
+
+ def test_wait_for_instantiate(self):
+ self.assertIsNone(self.vnf.wait_for_instantiate())
+
+ def test_terminate(self):
+ self.assertIsNone(self.vnf.terminate())
+
+ def test_scale(self):
+ self.assertIsNone(self.vnf.scale())
+
+ def test_collect_kpi(self):
+ self.vnf.influxdb_helper = mock.MagicMock()
+ self.vnf.collect_kpi()
+ self.vnf.influxdb_helper.copy_kpi.assert_called_once()
+
+ def test_start_collect(self):
+ self.vnf.vcmts_influxdb_ip = "localhost"
+ self.vnf.vcmts_influxdb_port = 8800
+
+ self.assertIsNone(self.vnf.start_collect())
+ self.assertIsNotNone(self.vnf.influxdb_helper)
+
+ def test_stop_collect(self):
+ self.assertIsNone(self.vnf.stop_collect())