aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--3rd_party/ixia/ixnetrfc2544.tcl192
-rw-r--r--3rd_party/ixia/ixnetrfc2544_bad_l2_crc.tcl6632
-rwxr-xr-x3rd_party/ixia/ixnetrfc2544v2.tcl4
-rwxr-xr-x3rd_party/ixia/ixnetrfc2544v2_random_ip_crc.tcl4
-rwxr-xr-x3rd_party/ixia/ixnetrfc2544v2_random_udp_crc.tcl4
-rwxr-xr-x3rd_party/ixia/pass_fail.tcl28
-rw-r--r--INFO.yaml67
-rwxr-xr-xcheck4
-rwxr-xr-xci/build-vsperf.sh37
-rw-r--r--conf/00_common.conf34
-rwxr-xr-xconf/01_testcases.conf146
-rw-r--r--conf/02_vswitch.conf29
-rw-r--r--conf/03_traffic.conf131
-rw-r--r--conf/04_vnf.conf29
-rw-r--r--conf/05_collector.conf51
-rw-r--r--conf/07_loadgen.conf18
-rw-r--r--conf/08_llcmanagement.conf62
-rw-r--r--conf/10_custom.conf7
-rw-r--r--conf/11_openstack.conf43
-rw-r--r--conf/12_k8s.conf41
-rw-r--r--conf/__init__.py20
-rw-r--r--conf/integration/01_testcases.conf598
-rw-r--r--conf/integration/01a_testcases_l34_vxlan.conf18
-rw-r--r--conf/integration/01b_dpdk_regression_tests.conf260
-rw-r--r--conf/integration/01c_trex_vm_tests.conf182
-rw-r--r--conf/integration/02_vswitch.conf6
-rw-r--r--conf/kubernetes/01_testcases.conf12
-rw-r--r--core/component_factory.py50
-rwxr-xr-xcore/loader/loader.py37
-rw-r--r--core/loader/loader_servant.py4
-rw-r--r--core/pktfwd_controller.py16
-rw-r--r--core/pod_controller.py93
-rw-r--r--core/results/results_constants.py13
-rw-r--r--core/traffic_controller.py9
-rw-r--r--core/traffic_controller_rfc2544.py5
-rw-r--r--core/traffic_controller_rfc2889.py2
-rw-r--r--core/vnf_controller.py3
-rw-r--r--core/vswitch_controller.py44
-rw-r--r--core/vswitch_controller_clean.py34
-rw-r--r--core/vswitch_controller_op2p.py66
-rw-r--r--core/vswitch_controller_p2p.py139
-rw-r--r--core/vswitch_controller_ptunp.py68
-rw-r--r--core/vswitch_controller_pxp.py142
-rw-r--r--docs/conf.py6
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst24
-rw-r--r--docs/k8s/index.rst40
-rw-r--r--docs/lma/index.rst18
-rw-r--r--docs/lma/logs/devguide.rst145
-rw-r--r--docs/lma/logs/images/elasticsearch.pngbin0 -> 36046 bytes
-rw-r--r--docs/lma/logs/images/fluentd-cs.pngbin0 -> 40226 bytes
-rw-r--r--docs/lma/logs/images/fluentd-ss.pngbin0 -> 18331 bytes
-rw-r--r--docs/lma/logs/images/nginx.pngbin0 -> 36737 bytes
-rw-r--r--docs/lma/logs/images/setup.pngbin0 -> 43503 bytes
-rw-r--r--docs/lma/logs/userguide.rst386
-rw-r--r--docs/lma/metrics/devguide.rst469
-rw-r--r--docs/lma/metrics/images/dataflow.pngbin0 -> 42443 bytes
-rw-r--r--docs/lma/metrics/images/setup.pngbin0 -> 15019 bytes
-rw-r--r--docs/lma/metrics/userguide.rst226
-rw-r--r--docs/openstack/index.rst39
-rw-r--r--docs/release/release-notes/release-notes.rst189
-rw-r--r--docs/requirements.txt2
-rw-r--r--docs/testing/developer/devguide/design/trafficgen_integration_guide.rst17
-rw-r--r--docs/testing/developer/devguide/design/vswitchperf_design.rst99
-rw-r--r--docs/testing/developer/devguide/index.rst6
-rw-r--r--docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst2
-rw-r--r--docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst124
-rw-r--r--docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst20
-rw-r--r--docs/testing/developer/devguide/results/scenario.rst2
-rw-r--r--docs/testing/user/configguide/index.rst5
-rw-r--r--docs/testing/user/configguide/installation.rst24
-rw-r--r--docs/testing/user/configguide/tools.rst227
-rw-r--r--docs/testing/user/configguide/trafficgen.rst189
-rw-r--r--docs/testing/user/userguide/index.rst2
-rw-r--r--docs/testing/user/userguide/integration.rst12
-rw-r--r--docs/testing/user/userguide/testlist.rst58
-rw-r--r--docs/testing/user/userguide/teststeps.rst37
-rw-r--r--docs/testing/user/userguide/testusage.rst186
-rw-r--r--docs/testing/user/userguide/trafficcapture.rst297
-rw-r--r--docs/xtesting/index.rst85
-rwxr-xr-xdocs/xtesting/vsperf-xtesting.pngbin0 -> 93202 bytes
-rw-r--r--pods/__init__.py19
-rw-r--r--pods/papi/__init__.py19
-rw-r--r--pods/papi/papi.py143
-rw-r--r--pods/pod/__init__.py18
-rw-r--r--pods/pod/pod.py63
-rw-r--r--pylintrc2
-rw-r--r--requirements.txt32
-rw-r--r--src/__init__.py1
-rwxr-xr-xsrc/dpdk/Makefile4
-rw-r--r--src/dpdk/dpdk.py4
-rw-r--r--src/dpdk/testpmd_proc.py6
-rw-r--r--src/ovs/dpctl.py2
-rw-r--r--src/ovs/ofctl.py37
-rw-r--r--src/package-list.mk10
-rw-r--r--src/trex/Makefile2
-rw-r--r--systems/README.md4
-rwxr-xr-xsystems/build_base_machine.sh31
-rwxr-xr-xsystems/centos/build_base_machine.sh13
-rwxr-xr-xsystems/centos/prepare_python_env.sh5
-rwxr-xr-xsystems/debian/build_base_machine.sh39
-rwxr-xr-xsystems/debian/prepare_python_env.sh28
-rw-r--r--systems/fedora/24/build_base_machine.sh1
-rw-r--r--systems/fedora/24/prepare_python_env.sh3
-rw-r--r--systems/fedora/25/build_base_machine.sh1
-rw-r--r--systems/fedora/25/prepare_python_env.sh1
-rw-r--r--systems/fedora/26/build_base_machine.sh1
-rw-r--r--systems/fedora/26/prepare_python_env.sh1
-rwxr-xr-xsystems/opensuse/42.2/build_base_machine.sh1
-rwxr-xr-xsystems/opensuse/42.2/prepare_python_env.sh1
-rwxr-xr-xsystems/opensuse/42.3/build_base_machine.sh1
-rwxr-xr-xsystems/opensuse/42.3/prepare_python_env.sh1
-rwxr-xr-xsystems/opensuse/prepare_python_env.sh1
-rwxr-xr-xsystems/rhel/7.2/build_base_machine.sh35
-rwxr-xr-xsystems/rhel/7.2/prepare_python_env.sh9
-rwxr-xr-xsystems/rhel/7.3/build_base_machine.sh35
-rwxr-xr-xsystems/rhel/7.3/prepare_python_env.sh7
-rwxr-xr-xsystems/rhel/7.5/build_base_machine.sh111
-rwxr-xr-xsystems/rhel/7.5/prepare_python_env.sh28
-rwxr-xr-xsystems/sles/15/build_base_machine.sh10
-rwxr-xr-xsystems/sles/15/prepare_python_env.sh1
-rwxr-xr-xsystems/ubuntu/14.04/build_base_machine.sh3
-rwxr-xr-xsystems/ubuntu/14.04/prepare_python_env.sh3
-rwxr-xr-xsystems/ubuntu/build_base_machine.sh1
-rw-r--r--testcases/__init__.py1
-rw-r--r--testcases/integration.py2
-rw-r--r--testcases/k8s_performance.py39
-rw-r--r--testcases/performance.py2
-rw-r--r--testcases/testcase.py146
-rwxr-xr-xtools/collectors/cadvisor/__init__.py17
-rw-r--r--tools/collectors/cadvisor/cadvisor.py218
-rwxr-xr-xtools/collectors/collectd/__init__.py17
-rw-r--r--tools/collectors/collectd/collectd.py294
-rw-r--r--tools/collectors/collectd/collectd_bucky.py770
-rwxr-xr-xtools/collectors/multicmd/__init__.py17
-rw-r--r--tools/collectors/multicmd/multicmd.py138
-rw-r--r--tools/collectors/sysmetrics/pidstat.py52
-rw-r--r--tools/confgenwizard/__init__.py0
-rw-r--r--tools/confgenwizard/nicinfo.py236
-rw-r--r--tools/confgenwizard/vsperfwiz.py736
-rw-r--r--tools/docker/client/__init__.py1
-rw-r--r--tools/docker/client/vsperf_client.py771
-rw-r--r--tools/docker/client/vsperfclient.conf39
-rw-r--r--tools/docker/deployment/auto/controller/Dockerfile23
-rw-r--r--tools/docker/deployment/auto/controller/list.env14
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/collectd.conf49
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml20
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py392
-rw-r--r--tools/docker/deployment/auto/docker-compose.yml22
-rw-r--r--tools/docker/deployment/interactive/controller/Dockerfile21
-rw-r--r--tools/docker/deployment/interactive/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py360
-rw-r--r--tools/docker/deployment/interactive/docker-compose.yml21
-rw-r--r--tools/docker/docs/architecture.txt70
-rw-r--r--tools/docker/docs/client.rst99
-rw-r--r--tools/docker/docs/test.rst86
-rw-r--r--tools/docker/libs/proto/__init__.py1
-rwxr-xr-xtools/docker/libs/proto/vsperf.proto109
-rw-r--r--tools/docker/libs/utils/__init__.py1
-rw-r--r--tools/docker/libs/utils/exceptions.py65
-rw-r--r--tools/docker/libs/utils/ssh.py546
-rw-r--r--tools/docker/libs/utils/utils.py41
-rwxr-xr-xtools/docker/prepare.sh33
-rw-r--r--tools/docker/results/README.md48
-rw-r--r--tools/docker/results/docker-compose.yml80
-rw-r--r--tools/docker/results/grafana/dashboards/container_metrics_dashboard.json1291
-rw-r--r--tools/docker/results/jupyter/Dockerfile16
-rw-r--r--tools/docker/results/logstash/pipeline/02-beats-input.conf6
-rw-r--r--tools/docker/results/logstash/pipeline/20-collectd-input.conf14
-rw-r--r--tools/docker/results/logstash/pipeline/30-output.conf7
-rw-r--r--tools/docker/results/notebooks/testresult-analysis.ipynb783
-rw-r--r--tools/docker/results/resultsdb/cases.json1
-rw-r--r--tools/docker/results/resultsdb/init_db.py110
-rw-r--r--tools/docker/results/resultsdb/pods.json382
-rw-r--r--tools/docker/results/resultsdb/projects.json8
-rw-r--r--tools/docker/testcontrol/auto/controller/Dockerfile23
-rw-r--r--tools/docker/testcontrol/auto/controller/list.env13
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf21
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py469
-rw-r--r--tools/docker/testcontrol/auto/docker-compose.yml22
-rw-r--r--tools/docker/testcontrol/interactive/controller/Dockerfile22
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/output.txt1
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py706
-rw-r--r--tools/docker/testcontrol/interactive/docker-compose.yml20
-rw-r--r--tools/docker/vsperf/Dockerfile37
-rw-r--r--tools/functions.py2
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/.ansible-lint3
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/README.md60
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/ansible.cfg9
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/hosts5
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml4
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml28
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml20
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml606
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml251
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml101
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml47
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml127
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml46
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml22
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml30
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml17
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml14
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml12
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml26
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml13
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml83
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml15
-rw-r--r--tools/llc_management/__init__.py17
-rw-r--r--tools/llc_management/rmd.py198
-rw-r--r--tools/lma/ansible-client/ansible.cfg17
-rw-r--r--tools/lma/ansible-client/hosts2
-rw-r--r--tools/lma/ansible-client/playbooks/clean.yaml25
-rw-r--r--tools/lma/ansible-client/playbooks/setup.yaml28
-rw-r--r--tools/lma/ansible-client/roles/clean-collectd/main.yml44
-rw-r--r--tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml28
-rw-r--r--tools/lma/ansible-client/roles/collectd/files/collectd.conf.j244
-rw-r--r--tools/lma/ansible-client/roles/collectd/tasks/main.yml60
-rw-r--r--tools/lma/ansible-client/roles/td-agent/files/td-agent.conf63
-rw-r--r--tools/lma/ansible-client/roles/td-agent/tasks/main.yml30
-rw-r--r--tools/lma/ansible-server/ansible.cfg17
-rw-r--r--tools/lma/ansible-server/group_vars/all.yml27
-rw-r--r--tools/lma/ansible-server/hosts12
-rw-r--r--tools/lma/ansible-server/playbooks/clean.yaml52
-rw-r--r--tools/lma/ansible-server/playbooks/setup.yaml44
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml34
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml65
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml26
-rw-r--r--tools/lma/ansible-server/roles/clean-logging/tasks/main.yml193
-rw-r--r--tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml48
-rw-r--r--tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml44
-rw-r--r--tools/lma/ansible-server/roles/k8s-master/tasks/main.yml49
-rw-r--r--tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml72
-rw-r--r--tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml24
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml48
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml68
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml132
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml76
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml231
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml23
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml525
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml34
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml65
-rw-r--r--tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml23
-rw-r--r--tools/lma/ansible-server/roles/logging/files/namespace.yaml17
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml36
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml68
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml28
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml58
-rw-r--r--tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml105
-rw-r--r--tools/lma/ansible-server/roles/logging/files/storageClass.yaml73
-rw-r--r--tools/lma/ansible-server/roles/logging/tasks/main.yml165
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml37
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml62
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml41
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml62
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml42
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml79
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml30
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml51
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml68
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml31
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml36
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml36
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml26
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml18
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml80
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml609
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml73
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml30
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml34
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml73
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/tasks/main.yml273
-rw-r--r--tools/lma/ansible-server/roles/nfs/tasks/main.yml42
-rw-r--r--tools/lma/jupyter-notebooks/Causation-Analysis.ipynb784
-rw-r--r--tools/lma/logs/dockerfile/elastalert/Dockerfile23
-rw-r--r--tools/lma/logs/dockerfile/fluentd/Dockerfile23
-rw-r--r--tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb308
-rw-r--r--tools/lma/metrics/dashboard/cpu_usage_using.json750
-rw-r--r--tools/lma/metrics/dashboard/memory_using.json337
-rw-r--r--tools/lma/metrics/dashboard/ovs_stats_using.json854
-rw-r--r--tools/lma/metrics/dashboard/rdt_using.json833
-rw-r--r--tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb644
-rw-r--r--tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb913
-rw-r--r--tools/lma/yamllintrc25
-rw-r--r--tools/load_gen/stress_ng/stress_ng.py3
-rw-r--r--tools/load_gen/stressorvm/__init__.py16
-rw-r--r--tools/load_gen/stressorvm/stressor_vm.py155
-rw-r--r--tools/md-testvnf/config.json11
-rw-r--r--tools/md-testvnf/http/ks.cfg88
-rw-r--r--tools/md-testvnf/playbook.yml36
-rw-r--r--tools/md-testvnf/scripts/ansible.sh7
-rwxr-xr-xtools/md-testvnf/scripts/deploycentostools.sh364
-rw-r--r--tools/md-testvnf/scripts/sshConfig.sh10
-rw-r--r--tools/md-testvnf/testVNF_image.json72
-rw-r--r--tools/module_manager.py2
-rw-r--r--tools/namespace.py9
-rw-r--r--tools/networkcard.py2
-rw-r--r--tools/os_deploy_tgen/__init__.py17
-rw-r--r--tools/os_deploy_tgen/osclients/__init__.py17
-rw-r--r--tools/os_deploy_tgen/osclients/glance.py34
-rwxr-xr-xtools/os_deploy_tgen/osclients/heat.py156
-rw-r--r--tools/os_deploy_tgen/osclients/neutron.py34
-rw-r--r--tools/os_deploy_tgen/osclients/nova.py213
-rw-r--r--tools/os_deploy_tgen/osclients/openstack.py82
-rw-r--r--tools/os_deploy_tgen/osdt.py601
-rw-r--r--tools/os_deploy_tgen/templates/hotfiles.md13
-rw-r--r--tools/os_deploy_tgen/templates/l2.hot89
-rw-r--r--tools/os_deploy_tgen/templates/l2_1c_1i.yaml8
-rw-r--r--tools/os_deploy_tgen/templates/l2_1c_2i.yaml10
-rw-r--r--tools/os_deploy_tgen/templates/l2_2c_2i.yaml10
-rw-r--r--tools/os_deploy_tgen/templates/l2_old.hot93
-rw-r--r--tools/os_deploy_tgen/templates/l2fip.hot122
-rw-r--r--tools/os_deploy_tgen/templates/l2up.hot126
-rw-r--r--tools/os_deploy_tgen/templates/l3.hot125
-rw-r--r--tools/os_deploy_tgen/templates/l3_1c_2i.yaml11
-rw-r--r--tools/os_deploy_tgen/templates/l3_2c_2i.yaml11
-rw-r--r--tools/os_deploy_tgen/templates/scenario.yaml44
-rw-r--r--tools/os_deploy_tgen/utilities/__init__.py17
-rw-r--r--tools/os_deploy_tgen/utilities/utils.py183
-rwxr-xr-xtools/pkt_gen/dummy/dummy.py20
-rwxr-xr-xtools/pkt_gen/ixia/ixia.py41
-rwxr-xr-xtools/pkt_gen/ixnet/ixnet.py96
-rw-r--r--tools/pkt_gen/moongen/moongen.py45
-rw-r--r--tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py270
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py55
-rwxr-xr-xtools/pkt_gen/trafficgen/trafficgen.py5
-rw-r--r--tools/pkt_gen/trex/trex_client.py (renamed from tools/pkt_gen/trex/trex.py)423
-rw-r--r--tools/pkt_gen/xena/XenaDriver.py155
-rw-r--r--tools/pkt_gen/xena/json/xena_json.py124
-rwxr-xr-xtools/pkt_gen/xena/xena.py32
-rw-r--r--tools/report/report.py1
-rw-r--r--tools/report/report_foot.rst4
-rw-r--r--tools/report/report_rst.jinja4
-rw-r--r--tools/systeminfo.py4
-rw-r--r--tools/tasks.py7
-rw-r--r--tools/teststepstools.py10
-rw-r--r--tools/veth.py7
-rw-r--r--tox.ini17
-rw-r--r--vnfs/__init__.py1
-rw-r--r--vnfs/qemu/__init__.py1
-rw-r--r--vnfs/qemu/qemu.py46
-rw-r--r--vnfs/vnf/__init__.py2
-rw-r--r--vnfs/vnf/vnf.py4
-rwxr-xr-xvsperf308
-rw-r--r--vswitches/__init__.py1
-rw-r--r--vswitches/ovs.py247
-rw-r--r--vswitches/ovs_dpdk_vhost.py32
-rw-r--r--vswitches/ovs_vanilla.py25
-rw-r--r--vswitches/vpp_dpdk_vhost.py138
-rw-r--r--vswitches/vswitch.py59
-rw-r--r--xtesting/baremetal/Dockerfile36
-rw-r--r--xtesting/baremetal/exceptions.py65
-rw-r--r--xtesting/baremetal/requirements.txt2
-rw-r--r--xtesting/baremetal/setup.cfg10
-rw-r--r--xtesting/baremetal/setup.py9
-rw-r--r--xtesting/baremetal/site.yml13
-rw-r--r--xtesting/baremetal/ssh.py546
-rw-r--r--xtesting/baremetal/testcases.yaml16
-rw-r--r--xtesting/baremetal/utils.py41
-rw-r--r--xtesting/baremetal/vsperf.conf21
-rw-r--r--xtesting/baremetal/vsperf_controller.py194
-rw-r--r--xtesting/openstack/Dockerfile61
-rw-r--r--xtesting/openstack/cloud.rc10
-rw-r--r--xtesting/openstack/setup.cfg10
-rw-r--r--xtesting/openstack/setup.py9
-rw-r--r--xtesting/openstack/site.yml13
-rw-r--r--xtesting/openstack/testcases.yaml19
-rw-r--r--xtesting/openstack/vsperfostack.conf80
-rwxr-xr-xxtesting/openstack/vsperfostack.py85
390 files changed, 39053 insertions, 2006 deletions
diff --git a/.gitignore b/.gitignore
index 97dbd7d0..c162c945 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,7 +47,6 @@ coverage.xml
*.log
# Sphinx documentation
-docs/_build/
# PyBuilder
target/
@@ -58,7 +57,6 @@ target/
*~
.*.sw?
-/docs_build/
/docs_output/
/releng/
/src/dpdk/dpdk/
@@ -78,3 +76,4 @@ tags
!/src/l2fwd/LICENSE.txt
!/src/l2fwd/Makefile
+docs/_build/*
diff --git a/3rd_party/ixia/ixnetrfc2544.tcl b/3rd_party/ixia/ixnetrfc2544.tcl
index c47e8fc1..fbc05f95 100644
--- a/3rd_party/ixia/ixnetrfc2544.tcl
+++ b/3rd_party/ixia/ixnetrfc2544.tcl
@@ -1,7 +1,7 @@
#!/usr/bin/env tclsh
# Copyright (c) 2014, Ixia
-# Copyright (c) 2015-2017, Intel Corporation
+# Copyright (c) 2015-2018, Intel Corporation, Tieto
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@ lappend auto_path [list $lib_path]
# verify that the IXIA chassis spec is given
-set reqVars [list "machine" "port" "user" "chassis" "card" "port1" "port2" "output_dir" "bidir"]
+set reqVars [list "machine" "port" "user" "chassis_east" "card_east" "port_east" "chassis_west" "card_west" "port_west" "output_dir" "bidir" "frame_size_list"]
set rfc2544test ""
foreach var $reqVars {
@@ -59,6 +59,7 @@ foreach var $reqVars {
set ::IxNserver $machine
set ::IxNport $port
set ::biDirect $bidir
+set frameSizeList $frame_size_list
# change to windows path format and append directory
set output_dir [string map {"/" "\\"} $output_dir]
@@ -66,14 +67,17 @@ set output_dir "$output_dir\\rfctests"
puts "Output directory is $output_dir"
proc startRfc2544Test { testSpec trafficSpec } {
- # Start RFC2544 quicktest.
+ # Start RFC2544 quickte"$output_dir\\rfctests"st.
# Configure global variables. See documentation on 'global' for more
# information on why this is necessary
# https://www.tcl.tk/man/tcl8.5/tutorial/Tcl13.html
global rfc2544test
+ global qt
+ global frameSizeList
global sg_rfc2544throughput
global sg_rfc2544back2back
+ global output_dir
# Suffix for stack names
# This variable should be incremented after setting sg_stack like:
@@ -90,13 +94,16 @@ proc startRfc2544Test { testSpec trafficSpec } {
set duration [dict get $testSpec duration]
# check if only one tgen port is requested
- if {($::port1 == $::port2)} {
- set twoPorts 0
- set selfDestined True
- } else {
- set twoPorts 1
- set selfDestined False
- }
+ set twoPorts 1
+ set selfDestined False
+ if {($::chassis_east == $::chassis_west)} {
+ if {($::card_east == $::card_west)} {
+ if {($::port_east == $::port_west)} {
+ set twoPorts 0
+ set selfDestined True
+ }}
+ }
+
# RFC2544 to IXIA terminology mapping (it affects Ixia configuration inside this script):
# Test => Trial
@@ -163,30 +170,18 @@ proc startRfc2544Test { testSpec trafficSpec } {
set trafficSpec_vlan [dict get $trafficSpec vlan]
set frameSize [dict get $trafficSpec_l2 framesize]
- set srcMac [dict get $trafficSpec_l2 srcmac]
+ set srcMac [dict get $trafficSpec_l2 srcmac]
set dstMac [dict get $trafficSpec_l2 dstmac]
+ set srcPort [dict get $trafficSpec_l4 srcport]
+ set dstPort [dict get $trafficSpec_l4 dstport]
set proto [dict get $trafficSpec_l3 proto]
set srcIp [dict get $trafficSpec_l3 srcip]
set dstIp [dict get $trafficSpec_l3 dstip]
+ set vlanEnabled [dict get $trafficSpec_vlan enabled]
+ set l3Enabled [dict get $trafficSpec_l3 enabled]
+ set l4Enabled [dict get $trafficSpec_l4 enabled]
- set srcPort [dict get $trafficSpec_l4 srcport]
- set dstPort [dict get $trafficSpec_l4 dstport]
-
- set l3Enabled [dict get $trafficSpec_l3 enabled]
- set l4Enabled [dict get $trafficSpec_l4 enabled]
- set vlanEnabled [dict get $trafficSpec_vlan enabled]
-
- if {$vlanEnabled == 1 } {
- # these keys won't exist if vlan wasn't enabled
- set vlanId [dict get $trafficSpec_vlan id]
- set vlanUserPrio [dict get $trafficSpec_vlan priority]
- set vlanCfi [dict get $trafficSpec_vlan cfi]
- } else {
- set vlanId 0
- set vlanUserPrio 0
- set vlanCfi 0
- }
if {$frameSize < 68 } {
if {$rfc2544TestType == "back2back"} {
@@ -281,7 +276,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-csvLogPollIntervalMultiplier 1 \
-pollInterval 2 \
-guardrailEnabled True \
- -enableCsvLogging False \
+ -enableCsvLogging False\
-dataStorePollingIntervalMultiplier 1 \
-maxNumberOfStatsPerCustomGraph 16 \
-additionalFcoeStat1 fcoeInvalidDelimiter \
@@ -373,7 +368,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-useDefaultRootPath False \
-outputRootPath $::output_dir
sg_commit
- set sg_top [lindex [ixNet remapIds $sg_top] 0]
+ #set sg_top [lindex [ixNet remapIds $sg_top] 0]
set ixNetSG_Stack(0) $sg_top
###
@@ -1154,21 +1149,30 @@ proc startRfc2544Test { testSpec trafficSpec } {
-masterChassis {} \
-sequenceId 1 \
-cableLength 0 \
- -hostname $::chassis
+ -hostname $::chassis_east
+ sg_commit
+ set sg_chassis1 [ixNet add $ixNetSG_Stack(0)/availableHardware chassis]
+ ixNet setMultiAttrs $sg_chassis1 \
+ -masterChassis {} \
+ -sequenceId 2 \
+ -cableLength 0 \
+ -hostname $::chassis_west
sg_commit
set sg_chassis [lindex [ixNet remapIds $sg_chassis] 0]
set ixNetSG_Stack(1) $sg_chassis
+ set sg_chassis1 [lindex [ixNet remapIds $sg_chassis1] 0]
+ set ixNetSG_Stack(4) $sg_chassis1
#
- # configuring the object that corresponds to /availableHardware/chassis/card
+ # configuring the object that corresponds to /availableHardware/chassis/card_east
#
- set sg_card $ixNetSG_Stack(1)/card:$::card
- ixNet setMultiAttrs $sg_card \
+ set sg_card_east $ixNetSG_Stack(1)/card:$::card_east
+ ixNet setMultiAttrs $sg_card_east \
-aggregationMode normal
sg_commit
- set sg_card [lindex [ixNet remapIds $sg_card] 0]
- set ixNetSG_ref(19) $sg_card
- set ixNetSG_Stack(2) $sg_card
+ set sg_card_east [lindex [ixNet remapIds $sg_card_east] 0]
+ set ixNetSG_ref(19) $sg_card_east
+ set ixNetSG_Stack(2) $sg_card_east
#
# configuring the object that corresponds to /availableHardware/chassis/card/aggregation:1
@@ -1206,11 +1210,24 @@ proc startRfc2544Test { testSpec trafficSpec } {
sg_commit
set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
ixNet setMultiAttrs $ixNetSG_ref(2) \
- -connectedTo $ixNetSG_ref(19)/port:$::port1
+ -connectedTo $ixNetSG_ref(19)/port:$::port_east
sg_commit
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card_west
+ #
+ puts "ixNetSG_Stack(4) is $ixNetSG_Stack(4)"
+ set sg_card_west $ixNetSG_Stack(4)/card:$::card_west
+ ixNet setMultiAttrs $sg_card_west \
+ -aggregationMode normal
+ sg_commit
+ set sg_card_west [lindex [ixNet remapIds $sg_card_west] 0]
+ set ixNetSG_ref(20) $sg_card_west
+ set ixNetSG_Stack(4) $sg_card_west
+
if {$twoPorts} {
ixNet setMultiAttrs $ixNetSG_ref(10) \
- -connectedTo $ixNetSG_ref(19)/port:$::port2
+ -connectedTo $ixNetSG_ref(20)/port:$::port_west
sg_commit
}
sg_commit
@@ -1353,7 +1370,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-destinationMacMode manual
ixNet setMultiAttrs $sg_configElement/frameSize \
-weightedPairs {} \
- -fixedSize 64 \
+ -fixedSize $frameSizeList \
-incrementFrom 64 \
-randomMin 64 \
-randomMax 1518 \
@@ -2971,7 +2988,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-trackingEnabled False \
-valueType $L4ValueType \
-activeFieldChoice False \
- -startValue {0} \
+ -startValue $dstPort \
-countValue $L4CountValue
sg_commit
set sg_field [lindex [ixNet remapIds $sg_field] 0]
@@ -3080,7 +3097,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
ixNet setMultiAttrs $sg_tracking \
-offset 0 \
-oneToOneMesh False \
- -trackBy {} \
+ -trackBy {trackingenabled0} \
-values {} \
-fieldWidth thirtyTwoBits \
-protocolOffset {Root.0}
@@ -6276,12 +6293,16 @@ proc startRfc2544Test { testSpec trafficSpec } {
#
if {$rfc2544TestType == "throughput"} {
set sg_rfc2544throughput [ixNet add $ixNetSG_Stack(0)/quickTest rfc2544throughput]
+ ixNet commit
ixNet setMultiAttrs $sg_rfc2544throughput \
-name {QuickTest1} \
-mode existingMode \
-inputParameters {{}}
+ ixNet commit
+ set sizes [join $frameSizeList ","]
+ set sg_rfc2544throughput [lindex [ixNet remapIds $sg_rfc2544throughput] 0]
ixNet setMultiAttrs $sg_rfc2544throughput/testConfig \
- -protocolItem {} \
+ -protocolItem [list ] \
-enableMinFrameSize True \
-framesize $frameSize \
-reportTputRateUnit mbps \
@@ -6293,7 +6314,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-tolerance 0 \
-frameLossUnit {0} \
-staggeredStart False \
- -framesizeList $frameSize \
+ -framesizeList $sizes \
-frameSizeMode custom \
-rateSelect percentMaxRate \
-percentMaxRate 100 \
@@ -6318,7 +6339,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-txDelay 2 \
-delayAfterTransmit 2 \
-minRandomFrameSize 64 \
- -maxRandomFrameSize 1518 \
+ -maxRandomFrameSize 128 \
-countRandomFrameSize 1 \
-minIncrementFrameSize 64 \
-stepIncrementFrameSize 64 \
@@ -6415,9 +6436,9 @@ proc startRfc2544Test { testSpec trafficSpec } {
-dataErrorThresholdValue 0 \
-dataErrorThresholdMode average
sg_commit
+ ixNet commit
set sg_rfc2544throughput [lindex [ixNet remapIds $sg_rfc2544throughput] 0]
set ixNetSG_Stack(1) $sg_rfc2544throughput
-
#
# configuring the object that corresponds to /quickTest/rfc2544throughput:1/protocols
#
@@ -6438,6 +6459,12 @@ proc startRfc2544Test { testSpec trafficSpec } {
-includeMode inTest \
-itemType trafficItem
sg_commit
+
+ #
+ # configuring the results folder that corresponds to /quickTest/rfc2544throughput:1
+ #
+ ixNet setAttr $sg_rfc2544throughput -resultPath $output_dir
+ ixNet commit
set sg_trafficSelection [lindex [ixNet remapIds $sg_trafficSelection] 0]
ixNet commit
@@ -6466,7 +6493,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-tolerance 0 \
-frameLossUnit {0} \
-staggeredStart False \
- -framesizeList $frameSize \
+ -framesizeList [list $frameSize] \
-frameSizeMode custom \
-rateSelect percentMaxRate \
-percentMaxRate 100 \
@@ -6611,14 +6638,74 @@ proc startRfc2544Test { testSpec trafficSpec } {
}
ixNet exec apply $rfc2544test
after 5000
-
#
# starting the RFC2544 Throughput test
#
puts "Starting test..."
ixNet exec start $rfc2544test
+ puts "Checking if [ixNet getA $rfc2544test -name] started...."
+ set count 0
+ while { [ixNet getA $rfc2544test/results -isRunning] eq false } {
+ after 1000
+ if { $count > 60 } { error "QT failed to start after 1 minute" }
+ incr count
+ }
+ puts "Looking for statistics"
+ set results_file_name "Traffic Item Statistics"
+ set results_file_path [getResultFile $results_file_name]
+ return $results_file_path
}
+proc getResultFile { viewName } {
+ global output_dir
+ puts "Sleeping 20 seconds to have $viewName view"
+ after 20000
+ set root [ixNet getRoot]
+ set views [ixNet getList $root/statistics view]
+ foreach view $views {
+ if { [ixNet getA $view -caption] eq $viewName } {
+ set trafficView $view
+ break
+ }
+ }
+ puts "Checking that the $viewName view is ready"
+ set count 0
+ while { [ixNet getA $trafficView/data -isReady] eq false } {
+ after 1000
+ if { $count > 2 } { break }
+ incr count
+ }
+ puts "Success! $viewName view is ready! "
+ puts "Changing the CSV path"
+ set setAttr [ixNet setA $root/statistics -csvFilePath $output_dir]
+ if { $setAttr != "::ixNet::OK"} {
+ error "Error"
+ }
+ ixNet commit
+ puts "Enabling CSV logging"
+ set setAttr [ixNet setA $trafficView -enableCsvLogging True]
+ if { $setAttr != "::ixNet::OK"} {
+ error "Error"
+ }
+ ixNet commit
+ puts "Enabled CSV logging"
+ puts "Getting CSV file name for $trafficView view"
+ set csv_path [ixNet getA $root/statistics -csvFilePath]
+ set csv_name [ixNet getA $trafficView -csvFileName]
+ ixNet commit
+ return [file join $csv_path $csv_name]
+}
+
+proc copyFileResults { sourceFile destFile } {
+ puts "Coping the file $sourceFile to $destFile..."
+ set source [dict get $sourceFile source_file]
+ set dest [dict get $destFile dest_file]
+ if {[catch {ixNet exec copyFile [ixNet readFrom "$source" -ixNetRelative] [ixNet writeTo "$dest" -overwrite]} errMsg]} {
+ error "Error while copying results : '$errMsg'"
+ }
+}
+
+
proc waitForRfc2544Test { } {
# Wait for- and return results of- RFC2544 quicktest.
@@ -6626,7 +6713,14 @@ proc waitForRfc2544Test { } {
puts "Waiting for test to complete..."
set result [ixNet exec waitForTest $rfc2544test]
+ puts "Checking if [ixNet getA $rfc2544test -name] stopped"
+ set count 0
+ while { [ixNet getA $rfc2544test/results -isRunning] eq true } {
+ after 1000
+ if { $count > 60 } { error "QT failed to stop after 1 minute it finished" }
+ incr count
+ }
puts "Finished Test"
return "$result"
-}
+} \ No newline at end of file
diff --git a/3rd_party/ixia/ixnetrfc2544_bad_l2_crc.tcl b/3rd_party/ixia/ixnetrfc2544_bad_l2_crc.tcl
new file mode 100644
index 00000000..5c42ea50
--- /dev/null
+++ b/3rd_party/ixia/ixnetrfc2544_bad_l2_crc.tcl
@@ -0,0 +1,6632 @@
+#!/usr/bin/env tclsh
+
+# Copyright (c) 2014, Ixia
+# Copyright (c) 2015-2018, Intel Corporation, Tieto
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# This file is a modified version of a script generated by Ixia
+# IxNetwork.
+
+lappend auto_path [list $lib_path]
+
+###################################################################
+########################## Configuration ##########################
+###################################################################
+
+# verify that the IXIA chassis spec is given
+
+set reqVars [list "machine" "port" "user" "chassis" "card" "port1" "port2" "output_dir" "bidir"]
+set rfc2544test ""
+
+foreach var $reqVars {
+ set var_ns [namespace which -variable "$var"]
+ if { [string compare $var_ns ""] == 0 } {
+ errorMsg "The '$var' variable is undefined. Did you set it?"
+ return -1
+ }
+}
+
+# machine configuration
+
+set ::IxNserver $machine
+set ::IxNport $port
+set ::biDirect $bidir
+
+# change to windows path format and append directory
+set output_dir [string map {"/" "\\"} $output_dir]
+set output_dir "$output_dir\\rfctests"
+puts "Output directory is $output_dir"
+
+proc startRfc2544Test { testSpec trafficSpec } {
+ # Start RFC2544 quicktest.
+
+ # Configure global variables. See documentation on 'global' for more
+ # information on why this is necessary
+ # https://www.tcl.tk/man/tcl8.5/tutorial/Tcl13.html
+ global rfc2544test
+ global sg_rfc2544throughput
+ global sg_rfc2544back2back
+
+ # Suffix for stack names
+ # This variable should be incremented after setting sg_stack like:
+ # set sg_stack $ixNetSG_Stack(2)/stack:"protocolnamehere-$stack_number"
+ # incr stack_number
+ set stack_number 1
+
+ # flow spec
+
+ set rfc2544TestType [dict get $testSpec rfc2544TestType]
+
+ set binary [dict get $testSpec binary]
+
+ set duration [dict get $testSpec duration]
+
+ # check if only one tgen port is requested
+ if {($::port1 == $::port2)} {
+ set twoPorts 0
+ set selfDestined True
+ } else {
+ set twoPorts 1
+ set selfDestined False
+ }
+
+ # RFC2544 to IXIA terminology mapping (it affects Ixia configuration inside this script):
+ # Test => Trial
+ # Trial => Iteration
+ if {$binary} {
+ set numTests [dict get $testSpec tests]
+ set frameRate 100
+ set tolerance [dict get $testSpec lossrate]
+ set loadType binary
+ } else {
+ set numTests 1
+ set frameRate [dict get $testSpec framerate]
+ set tolerance 0.0
+ set loadType custom
+ }
+
+ set learningFrames [dict get $testSpec learningFrames]
+
+ set L2CountValue 1
+ set L2Increment False
+ set L3ValueType singleValue
+ set L3CountValue 1
+ set L4ValueType singleValue
+ set L4CountValue 1
+
+ if {$learningFrames} {
+ set learningFrequency oncePerTest
+ set fastPathEnable True
+ } else {
+ set learningFrequency never
+ set fastPathEnable False
+ }
+
+ set multipleStreams [dict get $testSpec multipleStreams]
+ set streamType [dict get $testSpec streamType]
+
+ if {($multipleStreams < 0)} {
+ set multipleStreams 0
+ }
+
+ if {$multipleStreams} {
+ if {($streamType == "L2")} {
+ set L2CountValue $multipleStreams
+ set L2Increment True
+ } elseif {($streamType == "L3")} {
+ set L3ValueType increment
+ set L3CountValue $multipleStreams
+ } else {
+ set L4ValueType increment
+ set L4CountValue $multipleStreams
+ }
+ }
+
+ set flowControl [dict get $testSpec flowControl]
+ set fastConvergence True
+ set convergenceDuration [expr $duration/10]
+
+ # traffic spec
+
+ # extract nested dictionaries
+ set trafficSpec_l2 [dict get $trafficSpec l2]
+ set trafficSpec_l3 [dict get $trafficSpec l3]
+ set trafficSpec_l4 [dict get $trafficSpec l4]
+ set trafficSpec_vlan [dict get $trafficSpec vlan]
+
+ set frameSize [dict get $trafficSpec_l2 framesize]
+ set srcMac [dict get $trafficSpec_l2 srcmac]
+ set dstMac [dict get $trafficSpec_l2 dstmac]
+
+ set proto [dict get $trafficSpec_l3 proto]
+ set srcIp [dict get $trafficSpec_l3 srcip]
+ set dstIp [dict get $trafficSpec_l3 dstip]
+
+ set srcPort [dict get $trafficSpec_l4 srcport]
+ set dstPort [dict get $trafficSpec_l4 dstport]
+
+ set l3Enabled [dict get $trafficSpec_l3 enabled]
+ set l4Enabled [dict get $trafficSpec_l4 enabled]
+ set vlanEnabled [dict get $trafficSpec_vlan enabled]
+
+ if {$vlanEnabled == 1 } {
+ # these keys won't exist if vlan wasn't enabled
+ set vlanId [dict get $trafficSpec_vlan id]
+ set vlanUserPrio [dict get $trafficSpec_vlan priority]
+ set vlanCfi [dict get $trafficSpec_vlan cfi]
+ } else {
+ set vlanId 0
+ set vlanUserPrio 0
+ set vlanCfi 0
+ }
+
+ if {$frameSize < 68 } {
+ if {$rfc2544TestType == "back2back"} {
+ puts "INFO: Packet size too small, packet size will be \
+ increased to 68 for this test"
+ }
+ }
+ # constants
+
+ set VERSION [package require IxTclNetwork]
+
+ ###################################################################
+ ############################ Operation ############################
+ ###################################################################
+
+ puts "Connecting to IxNetwork machine..."
+
+ ixNet connect $::IxNserver -port $::IxNport -version $VERSION
+
+ puts "Connected to IxNetwork machine"
+
+ puts "Configuring IxNetwork machine..."
+
+ set ::_sg_cc 0
+ proc sg_commit {} {ixNet commit}
+
+ ixNet rollback
+ ixNet setSessionParameter version 6.30.701.16
+ ixNet execute newConfig
+ set ixNetSG_Stack(0) [ixNet getRoot]
+
+ #
+ # setting global options
+ #
+ set sg_top [ixNet getRoot]
+ ixNet setMultiAttrs $sg_top/availableHardware \
+ -offChassisHwM {} \
+ -isOffChassis False
+ ixNet setMultiAttrs $sg_top/globals/preferences \
+ -connectPortsOnLoadConfig True \
+ -rebootPortsOnConnect False
+ ixNet setMultiAttrs $sg_top/globals/interfaces \
+ -arpOnLinkup True \
+ -nsOnLinkup True \
+ -sendSingleArpPerGateway True \
+ -sendSingleNsPerGateway True
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/checksums \
+ -dropRxL2FcsErrors False \
+ -correctTxL2FcsErrors False \
+ -alwaysCorrectWhenModifying True \
+ -correctTxChecksumOverIp False \
+ -correctTxIpv4Checksum False
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/rxRateLimit \
+ -enabled False \
+ -value 8 \
+ -units {kKilobitsPerSecond}
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/drop \
+ -enabled False \
+ -clusterSize 1 \
+ -percentRate 0
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/reorder \
+ -enabled False \
+ -clusterSize 1 \
+ -percentRate 0 \
+ -skipCount 1
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/duplicate \
+ -enabled False \
+ -clusterSize 1 \
+ -percentRate 0 \
+ -duplicateCount 1
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/bitError \
+ -enabled False \
+ -logRate 3 \
+ -skipEndOctets 0 \
+ -skipStartOctets 0
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/delay \
+ -enabled False \
+ -value 300 \
+ -units {kMicroseconds}
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/delayVariation \
+ -uniformSpread 0 \
+ -enabled False \
+ -units {kMicroseconds} \
+ -distribution {kUniform} \
+ -exponentialMeanArrival 0 \
+ -gaussianStandardDeviation 0
+ ixNet setMultiAttrs $sg_top/impairment/defaultProfile/customDelayVariation \
+ -enabled False \
+ -name {}
+ ixNet setMultiAttrs $sg_top/statistics \
+ -additionalFcoeStat2 fcoeInvalidFrames \
+ -csvLogPollIntervalMultiplier 1 \
+ -pollInterval 2 \
+ -guardrailEnabled True \
+ -enableCsvLogging False \
+ -dataStorePollingIntervalMultiplier 1 \
+ -maxNumberOfStatsPerCustomGraph 16 \
+ -additionalFcoeStat1 fcoeInvalidDelimiter \
+ -timestampPrecision 3 \
+ -enableDataCenterSharedStats False \
+ -timeSynchronization syncTimeToTestStart \
+ -enableAutoDataStore False
+ ixNet setMultiAttrs $sg_top/statistics/measurementMode \
+ -measurementMode mixedMode
+ ixNet setMultiAttrs $sg_top/eventScheduler \
+ -licenseServerLocation {127.0.0.1}
+ ixNet setMultiAttrs $sg_top/traffic \
+ -destMacRetryCount 1 \
+ -maxTrafficGenerationQueries 500 \
+ -enableStaggeredTransmit False \
+ -learningFrameSize $frameSize \
+ -useTxRxSync True \
+ -enableDestMacRetry True \
+ -enableMulticastScalingFactor False \
+ -destMacRetryDelay 5 \
+ -largeErrorThreshhold 2 \
+ -refreshLearnedInfoBeforeApply False \
+ -enableMinFrameSize True \
+ -macChangeOnFly False \
+ -waitTime 1 \
+ -enableInstantaneousStatsSupport False \
+ -learningFramesCount 10 \
+ -globalStreamControl continuous \
+ -displayMplsCurrentLabelValue False \
+ -mplsLabelLearningTimeout 30 \
+ -enableStaggeredStartDelay True \
+ -enableDataIntegrityCheck False \
+ -enableSequenceChecking False \
+ -globalStreamControlIterations 1 \
+ -enableStreamOrdering False \
+ -frameOrderingMode none \
+ -learningFramesRate 100
+ ixNet setMultiAttrs $sg_top/traffic/statistics/latency \
+ -enabled True \
+ -mode storeForward
+ ixNet setMultiAttrs $sg_top/traffic/statistics/interArrivalTimeRate \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/delayVariation \
+ -enabled False \
+ -statisticsMode rxDelayVariationErrorsAndRate \
+ -latencyMode storeForward \
+ -largeSequenceNumberErrorThreshold 2
+ ixNet setMultiAttrs $sg_top/traffic/statistics/sequenceChecking \
+ -enabled False \
+ -sequenceMode rxThreshold
+ ixNet setMultiAttrs $sg_top/traffic/statistics/advancedSequenceChecking \
+ -enabled False \
+ -advancedSequenceThreshold 1
+ ixNet setMultiAttrs $sg_top/traffic/statistics/cpdpConvergence \
+ -enabled False \
+ -dataPlaneJitterWindow 10485760 \
+ -dataPlaneThreshold 95 \
+ -enableDataPlaneEventsRateMonitor False \
+ -enableControlPlaneEvents False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/packetLossDuration \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/dataIntegrity \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/errorStats \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/prbs \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/iptv \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/traffic/statistics/l1Rates \
+ -enabled False
+ ixNet setMultiAttrs $sg_top/quickTest/globals \
+ -productLabel {Your switch/router name here} \
+ -serialNumber {Your switch/router serial number here} \
+ -version {Your firmware version here} \
+ -comments {} \
+ -titlePageComments {} \
+ -maxLinesToDisplay 100 \
+ -enableCheckLinkState False \
+ -enableAbortIfLinkDown False \
+ -enableSwitchToStats True \
+ -enableCapture False \
+ -enableSwitchToResult True \
+ -enableGenerateReportAfterRun False \
+ -enableRebootCpu False \
+ -saveCaptureBeforeRun False \
+ -linkDownTimeout 5 \
+ -sleepTimeAfterReboot 10 \
+ -useDefaultRootPath False \
+ -outputRootPath $::output_dir
+ sg_commit
+ set sg_top [lindex [ixNet remapIds $sg_top] 0]
+ set ixNetSG_Stack(0) $sg_top
+
+ ###
+ ### /vport area
+ ###
+
+ #
+ # configuring the object that corresponds to /vport:1
+ #
+ set sg_vport [ixNet add $ixNetSG_Stack(0) vport]
+ ixNet setMultiAttrs $sg_vport \
+ -transmitIgnoreLinkStatus False \
+ -txGapControlMode averageMode \
+ -type tenGigLan \
+ -connectedTo ::ixNet::OBJ-null \
+ -txMode interleaved \
+ -isPullOnly False \
+ -rxMode captureAndMeasure \
+ -name {10GE LAN - 001}
+ ixNet setMultiAttrs $sg_vport/l1Config \
+ -currentType tenGigLan
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan \
+ -ppm 0 \
+ -flowControlDirectedAddress "01 80 C2 00 00 01" \
+ -enablePPM False \
+ -autoInstrumentation endOfFrame \
+ -transmitClocking internal \
+ -txIgnoreRxLinkFaults False \
+ -loopback False \
+ -enableLASIMonitoring False \
+ -enabledFlowControl $flowControl
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan/oam \
+ -tlvType {00} \
+ -linkEvents False \
+ -enabled False \
+ -vendorSpecificInformation {00 00 00 00} \
+ -macAddress "00:00:00:00:00:00" \
+ -loopback False \
+ -idleTimer 5 \
+ -tlvValue {00} \
+ -enableTlvOption False \
+ -maxOAMPDUSize 64 \
+ -organizationUniqueIdentifier {000000}
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan/fcoe \
+ -supportDataCenterMode False \
+ -priorityGroupSize priorityGroupSize-8 \
+ -pfcPauseDelay 1 \
+ -pfcPriorityGroups {0 1 2 3 4 5 6 7} \
+ -flowControlType ieee802.1Qbb \
+ -enablePFCPauseDelay False
+ ixNet setMultiAttrs $sg_vport/l1Config/fortyGigLan \
+ -ppm 0 \
+ -flowControlDirectedAddress "01 80 C2 00 00 01" \
+ -enablePPM False \
+ -autoInstrumentation endOfFrame \
+ -transmitClocking internal \
+ -txIgnoreRxLinkFaults False \
+ -loopback False \
+ -enableLASIMonitoring False \
+ -enabledFlowControl $flowControl
+ ixNet setMultiAttrs $sg_vport/l1Config/fortyGigLan/fcoe \
+ -supportDataCenterMode False \
+ -priorityGroupSize priorityGroupSize-8 \
+ -pfcPauseDelay 1 \
+ -pfcPriorityGroups {0 1 2 3 4 5 6 7} \
+ -flowControlType ieee802.1Qbb \
+ -enablePFCPauseDelay False
+ ixNet setMultiAttrs $sg_vport/l1Config/OAM \
+ -tlvType {00} \
+ -linkEvents False \
+ -enabled False \
+ -vendorSpecificInformation {00 00 00 00} \
+ -macAddress "00:00:00:00:00:00" \
+ -loopback False \
+ -idleTimer 5 \
+ -tlvValue {00} \
+ -enableTlvOption False \
+ -maxOAMPDUSize 64 \
+ -organizationUniqueIdentifier {000000}
+ ixNet setMultiAttrs $sg_vport/l1Config/rxFilters/filterPalette \
+ -sourceAddress1Mask {00:00:00:00:00:00} \
+ -destinationAddress1Mask {00:00:00:00:00:00} \
+ -sourceAddress2 {00:00:00:00:00:00} \
+ -pattern2OffsetType fromStartOfFrame \
+ -pattern2Offset 20 \
+ -pattern1Mask {00} \
+ -sourceAddress2Mask {00:00:00:00:00:00} \
+ -destinationAddress2 {00:00:00:00:00:00} \
+ -destinationAddress1 {00:00:00:00:00:00} \
+ -sourceAddress1 {00:00:00:00:00:00} \
+ -pattern1 {00} \
+ -destinationAddress2Mask {00:00:00:00:00:00} \
+ -pattern2Mask {00} \
+ -pattern1Offset 20 \
+ -pattern2 {00} \
+ -pattern1OffsetType fromStartOfFrame
+ ixNet setMultiAttrs $sg_vport/protocols/arp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/bfd \
+ -enabled False \
+ -intervalValue 0 \
+ -packetsPerInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/bgp \
+ -autoFillUpDutIp False \
+ -disableReceivedUpdateValidation False \
+ -enableAdVplsPrefixLengthInBits False \
+ -enableExternalActiveConnect True \
+ -enableInternalActiveConnect True \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -externalRetries 0 \
+ -externalRetryDelay 120 \
+ -internalRetries 0 \
+ -internalRetryDelay 120 \
+ -mldpP2mpFecType 6 \
+ -triggerVplsPwInitiation False
+ ixNet setMultiAttrs $sg_vport/protocols/cfm \
+ -enableOptionalLmFunctionality False \
+ -enableOptionalTlvValidation True \
+ -enabled False \
+ -receiveCcm True \
+ -sendCcm True \
+ -suppressErrorsOnAis True
+ ixNet setMultiAttrs $sg_vport/protocols/eigrp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/elmi \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/igmp \
+ -enabled False \
+ -numberOfGroups 0 \
+ -numberOfQueries 0 \
+ -queryTimePeriod 0 \
+ -sendLeaveOnStop True \
+ -statsEnabled False \
+ -timePeriod 0
+ ixNet setMultiAttrs $sg_vport/protocols/isis \
+ -allL1RbridgesMac "01:80:c2:00:00:40" \
+ -emulationType isisL3Routing \
+ -enabled False \
+ -helloMulticastMac "01:80:c2:00:00:41" \
+ -lspMgroupPdusPerInterval 0 \
+ -nlpId 192 \
+ -rateControlInterval 0 \
+ -sendP2PHellosToUnicastMac True \
+ -spbAllL1BridgesMac "09:00:2b:00:00:05" \
+ -spbHelloMulticastMac "09:00:2b:00:00:05" \
+ -spbNlpId 192
+ ixNet setMultiAttrs $sg_vport/protocols/lacp \
+ -enablePreservePartnerInfo False \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/ldp \
+ -enableDiscardSelfAdvFecs False \
+ -enableHelloJitter True \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -helloHoldTime 15 \
+ -helloInterval 5 \
+ -keepAliveHoldTime 30 \
+ -keepAliveInterval 10 \
+ -p2mpCapabilityParam 1288 \
+ -p2mpFecType 6 \
+ -targetedHelloInterval 15 \
+ -targetedHoldTime 45 \
+ -useTransportLabelsForMplsOam False
+ ixNet setMultiAttrs $sg_vport/protocols/linkOam \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/lisp \
+ -burstIntervalInMs 0 \
+ -enabled False \
+ -ipv4MapRegisterPacketsPerBurst 0 \
+ -ipv4MapRequestPacketsPerBurst 0 \
+ -ipv4SmrPacketsPerBurst 0 \
+ -ipv6MapRegisterPacketsPerBurst 0 \
+ -ipv6MapRequestPacketsPerBurst 0 \
+ -ipv6SmrPacketsPerBurst 0
+ ixNet setMultiAttrs $sg_vport/protocols/mld \
+ -enableDoneOnStop True \
+ -enabled False \
+ -mldv2Report type143 \
+ -numberOfGroups 0 \
+ -numberOfQueries 0 \
+ -queryTimePeriod 0 \
+ -timePeriod 0
+ ixNet setMultiAttrs $sg_vport/protocols/mplsOam \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/mplsTp \
+ -apsChannelType {00 02 } \
+ -bfdCcChannelType {00 07 } \
+ -delayManagementChannelType {00 05 } \
+ -enableHighPerformanceMode True \
+ -enabled False \
+ -faultManagementChannelType {00 58 } \
+ -lossMeasurementChannelType {00 04 } \
+ -onDemandCvChannelType {00 09 } \
+ -pwStatusChannelType {00 0B } \
+ -y1731ChannelType {7F FA }
+ ixNet setMultiAttrs $sg_vport/protocols/ospf \
+ -enableDrOrBdr False \
+ -enabled False \
+ -floodLinkStateUpdatesPerInterval 0 \
+ -rateControlInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/ospfV3 \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/pimsm \
+ -bsmFramePerInterval 0 \
+ -crpFramePerInterval 0 \
+ -dataMdtFramePerInterval 0 \
+ -denyGrePimIpPrefix {0.0.0.0/32} \
+ -enableDiscardJoinPruneProcessing False \
+ -enableRateControl False \
+ -enabled False \
+ -helloMsgsPerInterval 0 \
+ -interval 0 \
+ -joinPruneMessagesPerInterval 0 \
+ -registerMessagesPerInterval 0 \
+ -registerStopMessagesPerInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/ping \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/rip \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/ripng \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/rsvp \
+ -enableControlLspInitiationRate False \
+ -enableShowTimeValue False \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -maxLspInitiationsPerSec 400 \
+ -useTransportLabelsForMplsOam False
+ ixNet setMultiAttrs $sg_vport/protocols/stp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/rateControlParameters \
+ -maxRequestsPerBurst 1 \
+ -maxRequestsPerSec 250 \
+ -minRetryInterval 10 \
+ -retryCount 3 \
+ -sendInBursts False \
+ -sendRequestsAsFastAsPossible False
+ ixNet setMultiAttrs $sg_vport/capture \
+ -controlCaptureTrigger {} \
+ -controlCaptureFilter {} \
+ -hardwareEnabled False \
+ -softwareEnabled False \
+ -displayFiltersDataCapture {} \
+ -displayFiltersControlCapture {} \
+ -controlBufferSize 30 \
+ -controlBufferBehaviour bufferLiveNonCircular
+ ixNet setMultiAttrs $sg_vport/protocolStack/options \
+ -routerSolicitationDelay 1 \
+ -routerSolicitationInterval 4 \
+ -routerSolicitations 3 \
+ -retransTime 1000 \
+ -dadTransmits 1 \
+ -dadEnabled True \
+ -ipv4RetransTime 3000 \
+ -ipv4McastSolicit 4
+ sg_commit
+ set sg_vport [lindex [ixNet remapIds $sg_vport] 0]
+ set ixNetSG_ref(2) $sg_vport
+ set ixNetSG_Stack(1) $sg_vport
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:1
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:1
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:2
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:2
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:3
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:3
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:4
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:4
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:5
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:5
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/l1Config/rxFilters/uds:6
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:6
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:1/protocols/static/lan:1
+ #
+ set sg_lan [ixNet add $ixNetSG_Stack(1)/protocols/static lan]
+ ixNet setMultiAttrs $sg_lan \
+ -atmEncapsulation ::ixNet::OBJ-null \
+ -count $L2CountValue \
+ -countPerVc 1 \
+ -enableIncrementMac $L2Increment \
+ -enableIncrementVlan False \
+ -enableSiteId False \
+ -enableVlan False \
+ -enabled True \
+ -frEncapsulation ::ixNet::OBJ-null \
+ -incrementPerVcVlanMode noIncrement \
+ -incrementVlanMode noIncrement \
+ -mac $srcMac \
+ -macRangeMode normal \
+ -numberOfVcs 1 \
+ -siteId 0 \
+ -skipVlanIdZero True \
+ -tpid {0x8100} \
+ -trafficGroupId ::ixNet::OBJ-null \
+ -vlanCount 1 \
+ -vlanId {1} \
+ -vlanPriority {0}
+ sg_commit
+ set sg_lan [lindex [ixNet remapIds $sg_lan] 0]
+
+ if {$twoPorts} {
+ #
+ # configuring the object that corresponds to /vport:2
+ #
+ set sg_vport [ixNet add $ixNetSG_Stack(0) vport]
+ ixNet setMultiAttrs $sg_vport \
+ -transmitIgnoreLinkStatus False \
+ -txGapControlMode averageMode \
+ -type tenGigLan \
+ -connectedTo ::ixNet::OBJ-null \
+ -txMode interleaved \
+ -isPullOnly False \
+ -rxMode captureAndMeasure \
+ -name {10GE LAN - 002}
+ ixNet setMultiAttrs $sg_vport/l1Config \
+ -currentType tenGigLan
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan \
+ -ppm 0 \
+ -flowControlDirectedAddress "01 80 C2 00 00 01" \
+ -enablePPM False \
+ -autoInstrumentation endOfFrame \
+ -transmitClocking internal \
+ -txIgnoreRxLinkFaults False \
+ -loopback False \
+ -enableLASIMonitoring False \
+ -enabledFlowControl $flowControl
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan/oam \
+ -tlvType {00} \
+ -linkEvents False \
+ -enabled False \
+ -vendorSpecificInformation {00 00 00 00} \
+ -macAddress "00:00:00:00:00:00" \
+ -loopback False \
+ -idleTimer 5 \
+ -tlvValue {00} \
+ -enableTlvOption False \
+ -maxOAMPDUSize 64 \
+ -organizationUniqueIdentifier {000000}
+ ixNet setMultiAttrs $sg_vport/l1Config/tenGigLan/fcoe \
+ -supportDataCenterMode False \
+ -priorityGroupSize priorityGroupSize-8 \
+ -pfcPauseDelay 1 \
+ -pfcPriorityGroups {0 1 2 3 4 5 6 7} \
+ -flowControlType ieee802.1Qbb \
+ -enablePFCPauseDelay False
+ ixNet setMultiAttrs $sg_vport/l1Config/fortyGigLan \
+ -ppm 0 \
+ -flowControlDirectedAddress "01 80 C2 00 00 01" \
+ -enablePPM False \
+ -autoInstrumentation endOfFrame \
+ -transmitClocking internal \
+ -txIgnoreRxLinkFaults False \
+ -loopback False \
+ -enableLASIMonitoring False \
+ -enabledFlowControl $flowControl
+ ixNet setMultiAttrs $sg_vport/l1Config/fortyGigLan/fcoe \
+ -supportDataCenterMode False \
+ -priorityGroupSize priorityGroupSize-8 \
+ -pfcPauseDelay 1 \
+ -pfcPriorityGroups {0 1 2 3 4 5 6 7} \
+ -flowControlType ieee802.1Qbb \
+ -enablePFCPauseDelay False
+ ixNet setMultiAttrs $sg_vport/l1Config/OAM \
+ -tlvType {00} \
+ -linkEvents False \
+ -enabled False \
+ -vendorSpecificInformation {00 00 00 00} \
+ -macAddress "00:00:00:00:00:00" \
+ -loopback False \
+ -idleTimer 5 \
+ -tlvValue {00} \
+ -enableTlvOption False \
+ -maxOAMPDUSize 64 \
+ -organizationUniqueIdentifier {000000}
+ ixNet setMultiAttrs $sg_vport/l1Config/rxFilters/filterPalette \
+ -sourceAddress1Mask {00:00:00:00:00:00} \
+ -destinationAddress1Mask {00:00:00:00:00:00} \
+ -sourceAddress2 {00:00:00:00:00:00} \
+ -pattern2OffsetType fromStartOfFrame \
+ -pattern2Offset 20 \
+ -pattern1Mask {00} \
+ -sourceAddress2Mask {00:00:00:00:00:00} \
+ -destinationAddress2 {00:00:00:00:00:00} \
+ -destinationAddress1 {00:00:00:00:00:00} \
+ -sourceAddress1 {00:00:00:00:00:00} \
+ -pattern1 {00} \
+ -destinationAddress2Mask {00:00:00:00:00:00} \
+ -pattern2Mask {00} \
+ -pattern1Offset 20 \
+ -pattern2 {00} \
+ -pattern1OffsetType fromStartOfFrame
+ ixNet setMultiAttrs $sg_vport/protocols/arp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/bfd \
+ -enabled False \
+ -intervalValue 0 \
+ -packetsPerInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/bgp \
+ -autoFillUpDutIp False \
+ -disableReceivedUpdateValidation False \
+ -enableAdVplsPrefixLengthInBits False \
+ -enableExternalActiveConnect True \
+ -enableInternalActiveConnect True \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -externalRetries 0 \
+ -externalRetryDelay 120 \
+ -internalRetries 0 \
+ -internalRetryDelay 120 \
+ -mldpP2mpFecType 6 \
+ -triggerVplsPwInitiation False
+ ixNet setMultiAttrs $sg_vport/protocols/cfm \
+ -enableOptionalLmFunctionality False \
+ -enableOptionalTlvValidation True \
+ -enabled False \
+ -receiveCcm True \
+ -sendCcm True \
+ -suppressErrorsOnAis True
+ ixNet setMultiAttrs $sg_vport/protocols/eigrp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/elmi \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/igmp \
+ -enabled False \
+ -numberOfGroups 0 \
+ -numberOfQueries 0 \
+ -queryTimePeriod 0 \
+ -sendLeaveOnStop True \
+ -statsEnabled False \
+ -timePeriod 0
+ ixNet setMultiAttrs $sg_vport/protocols/isis \
+ -allL1RbridgesMac "01:80:c2:00:00:40" \
+ -emulationType isisL3Routing \
+ -enabled False \
+ -helloMulticastMac "01:80:c2:00:00:41" \
+ -lspMgroupPdusPerInterval 0 \
+ -nlpId 192 \
+ -rateControlInterval 0 \
+ -sendP2PHellosToUnicastMac True \
+ -spbAllL1BridgesMac "09:00:2b:00:00:05" \
+ -spbHelloMulticastMac "09:00:2b:00:00:05" \
+ -spbNlpId 192
+ ixNet setMultiAttrs $sg_vport/protocols/lacp \
+ -enablePreservePartnerInfo False \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/ldp \
+ -enableDiscardSelfAdvFecs False \
+ -enableHelloJitter True \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -helloHoldTime 15 \
+ -helloInterval 5 \
+ -keepAliveHoldTime 30 \
+ -keepAliveInterval 10 \
+ -p2mpCapabilityParam 1288 \
+ -p2mpFecType 6 \
+ -targetedHelloInterval 15 \
+ -targetedHoldTime 45 \
+ -useTransportLabelsForMplsOam False
+ ixNet setMultiAttrs $sg_vport/protocols/linkOam \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/lisp \
+ -burstIntervalInMs 0 \
+ -enabled False \
+ -ipv4MapRegisterPacketsPerBurst 0 \
+ -ipv4MapRequestPacketsPerBurst 0 \
+ -ipv4SmrPacketsPerBurst 0 \
+ -ipv6MapRegisterPacketsPerBurst 0 \
+ -ipv6MapRequestPacketsPerBurst 0 \
+ -ipv6SmrPacketsPerBurst 0
+ ixNet setMultiAttrs $sg_vport/protocols/mld \
+ -enableDoneOnStop True \
+ -enabled False \
+ -mldv2Report type143 \
+ -numberOfGroups 0 \
+ -numberOfQueries 0 \
+ -queryTimePeriod 0 \
+ -timePeriod 0
+ ixNet setMultiAttrs $sg_vport/protocols/mplsOam \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/mplsTp \
+ -apsChannelType {00 02 } \
+ -bfdCcChannelType {00 07 } \
+ -delayManagementChannelType {00 05 } \
+ -enableHighPerformanceMode True \
+ -enabled False \
+ -faultManagementChannelType {00 58 } \
+ -lossMeasurementChannelType {00 04 } \
+ -onDemandCvChannelType {00 09 } \
+ -pwStatusChannelType {00 0B } \
+ -y1731ChannelType {7F FA }
+ ixNet setMultiAttrs $sg_vport/protocols/ospf \
+ -enableDrOrBdr False \
+ -enabled False \
+ -floodLinkStateUpdatesPerInterval 0 \
+ -rateControlInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/ospfV3 \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/pimsm \
+ -bsmFramePerInterval 0 \
+ -crpFramePerInterval 0 \
+ -dataMdtFramePerInterval 0 \
+ -denyGrePimIpPrefix {0.0.0.0/32} \
+ -enableDiscardJoinPruneProcessing False \
+ -enableRateControl False \
+ -enabled False \
+ -helloMsgsPerInterval 0 \
+ -interval 0 \
+ -joinPruneMessagesPerInterval 0 \
+ -registerMessagesPerInterval 0 \
+ -registerStopMessagesPerInterval 0
+ ixNet setMultiAttrs $sg_vport/protocols/ping \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/rip \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/ripng \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/protocols/rsvp \
+ -enableControlLspInitiationRate False \
+ -enableShowTimeValue False \
+ -enableVpnLabelExchangeOverLsp True \
+ -enabled False \
+ -maxLspInitiationsPerSec 400 \
+ -useTransportLabelsForMplsOam False
+ ixNet setMultiAttrs $sg_vport/protocols/stp \
+ -enabled False
+ ixNet setMultiAttrs $sg_vport/rateControlParameters \
+ -maxRequestsPerBurst 1 \
+ -maxRequestsPerSec 250 \
+ -minRetryInterval 10 \
+ -retryCount 3 \
+ -sendInBursts False \
+ -sendRequestsAsFastAsPossible False
+ ixNet setMultiAttrs $sg_vport/capture \
+ -controlCaptureTrigger {} \
+ -controlCaptureFilter {} \
+ -hardwareEnabled False \
+ -softwareEnabled False \
+ -displayFiltersDataCapture {} \
+ -displayFiltersControlCapture {} \
+ -controlBufferSize 30 \
+ -controlBufferBehaviour bufferLiveNonCircular
+ ixNet setMultiAttrs $sg_vport/protocolStack/options \
+ -routerSolicitationDelay 1 \
+ -routerSolicitationInterval 4 \
+ -routerSolicitations 3 \
+ -retransTime 1000 \
+ -dadTransmits 1 \
+ -dadEnabled True \
+ -ipv4RetransTime 3000 \
+ -ipv4McastSolicit 4
+ sg_commit
+ set sg_vport [lindex [ixNet remapIds $sg_vport] 0]
+ set ixNetSG_ref(10) $sg_vport
+ set ixNetSG_Stack(1) $sg_vport
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:1
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:1
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:2
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:2
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:3
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:3
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:4
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:4
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:5
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:5
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/l1Config/rxFilters/uds:6
+ #
+ set sg_uds $ixNetSG_Stack(1)/l1Config/rxFilters/uds:6
+ ixNet setMultiAttrs $sg_uds \
+ -destinationAddressSelector anyAddr \
+ -customFrameSizeTo 0 \
+ -customFrameSizeFrom 0 \
+ -error errAnyFrame \
+ -patternSelector anyPattern \
+ -sourceAddressSelector anyAddr \
+ -isEnabled True \
+ -frameSizeType any
+ sg_commit
+ set sg_uds [lindex [ixNet remapIds $sg_uds] 0]
+
+ #
+ # configuring the object that corresponds to /vport:2/protocols/static/lan:1
+ #
+ set sg_lan [ixNet add $ixNetSG_Stack(1)/protocols/static lan]
+ ixNet setMultiAttrs $sg_lan \
+ -atmEncapsulation ::ixNet::OBJ-null \
+ -count $L2CountValue \
+ -countPerVc 1 \
+ -enableIncrementMac $L2Increment \
+ -enableIncrementVlan False \
+ -enableSiteId False \
+ -enableVlan False \
+ -enabled True \
+ -frEncapsulation ::ixNet::OBJ-null \
+ -incrementPerVcVlanMode noIncrement \
+ -incrementVlanMode noIncrement \
+ -mac $dstMac \
+ -macRangeMode normal \
+ -numberOfVcs 1 \
+ -siteId 0 \
+ -skipVlanIdZero True \
+ -tpid {0x8100} \
+ -trafficGroupId ::ixNet::OBJ-null \
+ -vlanCount 1 \
+ -vlanId {1} \
+ -vlanPriority {0}
+ sg_commit
+ set sg_lan [lindex [ixNet remapIds $sg_lan] 0]
+ }
+
+ ###
+ ### /availableHardware area
+ ###
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis"
+ #
+ set sg_chassis [ixNet add $ixNetSG_Stack(0)/availableHardware chassis]
+ ixNet setMultiAttrs $sg_chassis \
+ -masterChassis {} \
+ -sequenceId 1 \
+ -cableLength 0 \
+ -hostname $::chassis
+ sg_commit
+ set sg_chassis [lindex [ixNet remapIds $sg_chassis] 0]
+ set ixNetSG_Stack(1) $sg_chassis
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card
+ #
+ set sg_card $ixNetSG_Stack(1)/card:$::card
+ ixNet setMultiAttrs $sg_card \
+ -aggregationMode normal
+ sg_commit
+ set sg_card [lindex [ixNet remapIds $sg_card] 0]
+ set ixNetSG_ref(19) $sg_card
+ set ixNetSG_Stack(2) $sg_card
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card/aggregation:1
+ #
+ set sg_aggregation $ixNetSG_Stack(2)/aggregation:1
+ ixNet setMultiAttrs $sg_aggregation \
+ -mode normal
+ sg_commit
+ set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card/aggregation:2
+ #
+ set sg_aggregation $ixNetSG_Stack(2)/aggregation:2
+ ixNet setMultiAttrs $sg_aggregation \
+ -mode normal
+ sg_commit
+ set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card/aggregation:3
+ #
+ set sg_aggregation $ixNetSG_Stack(2)/aggregation:3
+ ixNet setMultiAttrs $sg_aggregation \
+ -mode normal
+ sg_commit
+ set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card/aggregation:4
+ #
+ set sg_aggregation $ixNetSG_Stack(2)/aggregation:4
+ ixNet setMultiAttrs $sg_aggregation \
+ -mode normal
+ sg_commit
+ set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
+ ixNet setMultiAttrs $ixNetSG_ref(2) \
+ -connectedTo $ixNetSG_ref(19)/port:$::port1
+ sg_commit
+ if {$twoPorts} {
+ ixNet setMultiAttrs $ixNetSG_ref(10) \
+ -connectedTo $ixNetSG_ref(19)/port:$::port2
+ sg_commit
+ }
+ sg_commit
+
+ ###
+ ### /impairment area
+ ###
+
+ #
+ # configuring the object that corresponds to /impairment/profile:3
+ #
+ set sg_profile [ixNet add $ixNetSG_Stack(0)/impairment profile]
+ ixNet setMultiAttrs $sg_profile \
+ -enabled False \
+ -name {Impairment Profile 1} \
+ -links {} \
+ -allLinks True \
+ -priority 1
+ ixNet setMultiAttrs $sg_profile/checksums \
+ -dropRxL2FcsErrors False \
+ -correctTxL2FcsErrors False \
+ -alwaysCorrectWhenModifying True \
+ -correctTxChecksumOverIp False \
+ -correctTxIpv4Checksum False
+ ixNet setMultiAttrs $sg_profile/rxRateLimit \
+ -enabled False \
+ -value 8 \
+ -units {kKilobitsPerSecond}
+ ixNet setMultiAttrs $sg_profile/drop \
+ -enabled True \
+ -clusterSize 1 \
+ -percentRate 0
+ ixNet setMultiAttrs $sg_profile/reorder \
+ -enabled False \
+ -clusterSize 1 \
+ -percentRate 0 \
+ -skipCount 1
+ ixNet setMultiAttrs $sg_profile/duplicate \
+ -enabled False \
+ -clusterSize 1 \
+ -percentRate 0 \
+ -duplicateCount 1
+ ixNet setMultiAttrs $sg_profile/bitError \
+ -enabled False \
+ -logRate 3 \
+ -skipEndOctets 0 \
+ -skipStartOctets 0
+ ixNet setMultiAttrs $sg_profile/delay \
+ -enabled True \
+ -value 300 \
+ -units {kMicroseconds}
+ ixNet setMultiAttrs $sg_profile/delayVariation \
+ -uniformSpread 0 \
+ -enabled False \
+ -units {kMicroseconds} \
+ -distribution {kUniform} \
+ -exponentialMeanArrival 0 \
+ -gaussianStandardDeviation 0
+ ixNet setMultiAttrs $sg_profile/customDelayVariation \
+ -enabled False \
+ -name {}
+ sg_commit
+ set sg_profile [lindex [ixNet remapIds $sg_profile] 0]
+ set ixNetSG_Stack(1) $sg_profile
+
+ #
+ # configuring the object that corresponds to /impairment/profile:3/fixedClassifier:1
+ #
+ set sg_fixedClassifier [ixNet add $ixNetSG_Stack(1) fixedClassifier]
+ sg_commit
+ set sg_fixedClassifier [lindex [ixNet remapIds $sg_fixedClassifier] 0]
+
+ ###
+ ### /traffic area
+ ###
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1
+ #
+ set sg_trafficItem [ixNet add $ixNetSG_Stack(0)/traffic trafficItem]
+ ixNet setMultiAttrs $sg_trafficItem \
+ -transportRsvpTePreference one \
+ -trafficItemType l2L3 \
+ -biDirectional $::biDirect \
+ -mergeDestinations True \
+ -hostsPerNetwork 1 \
+ -transmitMode interleaved \
+ -ordinalNo 0 \
+ -trafficType {ethernetVlan} \
+ -interAsLdpPreference two \
+ -allowSelfDestined $selfDestined \
+ -enabled True \
+ -maxNumberOfVpnLabelStack 2 \
+ -interAsBgpPreference one \
+ -suspend False \
+ -transportLdpPreference two \
+ -egressEnabled False \
+ -enableDynamicMplsLabelValues False \
+ -routeMesh oneToOne \
+ -name {Traffic Item 1} \
+ -srcDestMesh oneToOne
+ sg_commit
+ set sg_trafficItem [lindex [ixNet remapIds $sg_trafficItem] 0]
+ set ixNetSG_ref(26) $sg_trafficItem
+ set ixNetSG_Stack(1) $sg_trafficItem
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/endpointSet:1
+ #
+ set sg_endpointSet [ixNet add $ixNetSG_Stack(1) endpointSet]
+ if {$twoPorts} {
+ ixNet setMultiAttrs $sg_endpointSet \
+ -destinations [list $ixNetSG_ref(10)/protocols] \
+ -destinationFilter {} \
+ -sourceFilter {} \
+ -trafficGroups {} \
+ -sources [list $ixNetSG_ref(2)/protocols] \
+ -name {EndpointSet-1}
+ } else {
+ ixNet setMultiAttrs $sg_endpointSet \
+ -destinations [list $ixNetSG_ref(2)/protocols] \
+ -destinationFilter {} \
+ -sourceFilter {} \
+ -trafficGroups {} \
+ -sources [list $ixNetSG_ref(2)/protocols] \
+ -name {EndpointSet-1}
+ }
+ sg_commit
+ set sg_endpointSet [lindex [ixNet remapIds $sg_endpointSet] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1
+ #
+ set sg_configElement $ixNetSG_Stack(1)/configElement:1
+ ixNet setMultiAttrs $sg_configElement \
+ -crc badCrc \
+ -preambleCustomSize 8 \
+ -enableDisparityError False \
+ -preambleFrameSizeMode auto \
+ -destinationMacMode manual
+ ixNet setMultiAttrs $sg_configElement/frameSize \
+ -weightedPairs {} \
+ -fixedSize 64 \
+ -incrementFrom 64 \
+ -randomMin 64 \
+ -randomMax 1518 \
+ -quadGaussian {} \
+ -type fixed \
+ -presetDistribution cisco \
+ -incrementStep 1 \
+ -incrementTo 1518
+ ixNet setMultiAttrs $sg_configElement/frameRate \
+ -bitRateUnitsType bitsPerSec \
+ -rate 10 \
+ -enforceMinimumInterPacketGap 0 \
+ -type percentLineRate \
+ -interPacketGapUnitsType nanoseconds
+ ixNet setMultiAttrs $sg_configElement/framePayload \
+ -type incrementByte \
+ -customRepeat True \
+ -customPattern {}
+ ixNet setMultiAttrs $sg_configElement/frameRateDistribution \
+ -streamDistribution applyRateToAll \
+ -portDistribution applyRateToAll
+ ixNet setMultiAttrs $sg_configElement/transmissionControl \
+ -frameCount 1 \
+ -minGapBytes 12 \
+ -interStreamGap 0 \
+ -interBurstGap 0 \
+ -interBurstGapUnits nanoseconds \
+ -type continuous \
+ -duration 1 \
+ -repeatBurst 1 \
+ -enableInterStreamGap False \
+ -startDelayUnits bytes \
+ -iterationCount 1 \
+ -burstPacketCount 1 \
+ -enableInterBurstGap False \
+ -startDelay 0
+ sg_commit
+ set sg_configElement [lindex [ixNet remapIds $sg_configElement] 0]
+ set ixNetSG_Stack(2) $sg_configElement
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"
+ #
+ set sg_stack $ixNetSG_Stack(2)/stack:"ethernet-$stack_number"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+ incr stack_number
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"/field:"ethernet.header.destinationAddress-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.destinationAddress-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $dstMac \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:01} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue $dstMac \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue $dstMac \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"/field:"ethernet.header.sourceAddress-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.sourceAddress-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $srcMac \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:00} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue $srcMac \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue $srcMac \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"/field:"ethernet.header.etherType-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.etherType-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {800} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0xFFFF}} \
+ -stepValue {0xFFFF} \
+ -fixedBits {0xFFFF} \
+ -fieldValue {800} \
+ -auto True \
+ -randomMask {0xFFFF} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0xFFFF} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"/field:"ethernet.header.pfcQueue-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.pfcQueue-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ if {$vlanEnabled == 1 } {
+ set sg_stack $ixNetSG_Stack(2)/stack:"vlan-$stack_number"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+ incr stack_number
+
+ set sg_field $ixNetSG_Stack(3)/field:"vlan.header.vlanTag.vlanUserPriority-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $vlanUserPrio \
+ -seed 1 \
+ -optionalEnabled true \
+ -fullMesh false \
+ -valueList [list 0] \
+ -stepValue 0 \
+ -fixedBits 0 \
+ -fieldValue $vlanUserPrio \
+ -auto false \
+ -randomMask 0 \
+ -trackingEnabled false \
+ -valueType singleValue \
+ -activeFieldChoice false \
+ -startValue 0 \
+ -countValue 1
+
+ set sg_field $ixNetSG_Stack(3)/field:"vlan.header.vlanTag.cfi-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $vlanCfi \
+ -seed 1 \
+ -optionalEnabled true \
+ -fullMesh false \
+ -valueList [list 0] \
+ -stepValue 0 \
+ -fixedBits 0 \
+ -fieldValue $vlanCfi \
+ -auto false \
+ -randomMask 0 \
+ -trackingEnabled false \
+ -valueType singleValue \
+ -activeFieldChoice false \
+ -startValue 0 \
+ -countValue 1
+
+ set sg_field $ixNetSG_Stack(3)/field:"vlan.header.vlanTag.vlanID-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $vlanId \
+ -seed 1 \
+ -optionalEnabled true \
+ -fullMesh false \
+ -valueList [list 0] \
+ -stepValue 0 \
+ -fixedBits 0 \
+ -fieldValue $vlanId \
+ -auto false \
+ -randomMask 0 \
+ -trackingEnabled false \
+ -valueType singleValue \
+ -activeFieldChoice false \
+ -startValue 0 \
+ -countValue 1
+
+ set sg_field $ixNetSG_Stack(3)/field:"vlan.header.protocolID-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue ffff \
+ -seed 1 \
+ -optionalEnabled true \
+ -fullMesh false \
+ -valueList [list 0xffff] \
+ -stepValue 0xffff \
+ -fixedBits 0xffff \
+ -fieldValue ffff \
+ -auto true \
+ -randomMask 0xffff \
+ -trackingEnabled false \
+ -valueType singleValue \
+ -activeFieldChoice false \
+ -startValue 0xffff \
+ -countValue 1
+ }
+
+ if {$l3Enabled == 1 } {
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"
+ #
+ set sg_stack $ixNetSG_Stack(2)/stack:"ipv4-$stack_number"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+ incr stack_number
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.version-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.version-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.headerLength-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.headerLength-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {5} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.raw-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.raw-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.precedence-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.precedence-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {000 Routine} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.delay-5"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.delay-5"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.throughput-6"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.throughput-6"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.reliability-7"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.reliability-7"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.monetary-8"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.monetary-8"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.tos.unused-9"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.unused-9"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {Precedence 1} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {10} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{10}} \
+ -stepValue {10} \
+ -fixedBits {10} \
+ -fieldValue {Class 1, Low drop precedence} \
+ -auto False \
+ -randomMask {10} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {10} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {46} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{46}} \
+ -stepValue {46} \
+ -fixedBits {46} \
+ -fieldValue {46} \
+ -auto False \
+ -randomMask {46} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {46} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.totalLength-18"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.totalLength-18"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {46} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{20}} \
+ -stepValue {20} \
+ -fixedBits {20} \
+ -fieldValue {46} \
+ -auto True \
+ -randomMask {20} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {20} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.identification-19"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.identification-19"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.flags.reserved-20"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.reserved-20"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.flags.fragment-21"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.fragment-21"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {May fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.flags.lastFragment-22"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.lastFragment-22"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Last fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.fragmentOffset-23"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.fragmentOffset-23"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.ttl-24"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.ttl-24"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {64} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{64}} \
+ -stepValue {64} \
+ -fixedBits {64} \
+ -fieldValue {64} \
+ -auto False \
+ -randomMask {64} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {64} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.protocol-25"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.protocol-25"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {17} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{61}} \
+ -stepValue {61} \
+ -fixedBits {61} \
+ -fieldValue {UDP} \
+ -auto True \
+ -randomMask {61} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {61} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.checksum-26"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.checksum-26"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.srcIp-27"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.srcIp-27"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $srcIp \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.0} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue $srcIp \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0.0.0.0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.dstIp-28"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.dstIp-28"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $dstIp \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.1} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue $dstIp \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType $L3ValueType \
+ -activeFieldChoice False \
+ -startValue $dstIp \
+ -countValue $L3CountValue
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.nop-29"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.nop-29"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {1} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{1}} \
+ -stepValue {1} \
+ -fixedBits {1} \
+ -fieldValue {1} \
+ -auto False \
+ -randomMask {1} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {1} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.type-30"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.type-30"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {130} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{130}} \
+ -stepValue {130} \
+ -fixedBits {130} \
+ -fieldValue {130} \
+ -auto False \
+ -randomMask {130} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {130} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.length-31"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.length-31"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {11} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{11}} \
+ -stepValue {11} \
+ -fixedBits {11} \
+ -fieldValue {11} \
+ -auto False \
+ -randomMask {11} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {11} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.security-32"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.security-32"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Unclassified} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {131} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{131}} \
+ -stepValue {131} \
+ -fixedBits {131} \
+ -fieldValue {131} \
+ -auto False \
+ -randomMask {131} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {131} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.pointer-38"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.pointer-38"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routeData-39"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routeData-39"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {137} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{137}} \
+ -stepValue {137} \
+ -fixedBits {137} \
+ -fieldValue {137} \
+ -auto False \
+ -randomMask {137} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {137} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {7} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{7}} \
+ -stepValue {7} \
+ -fixedBits {7} \
+ -fieldValue {7} \
+ -auto False \
+ -randomMask {7} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {7} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {136} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{136}} \
+ -stepValue {136} \
+ -fixedBits {136} \
+ -fieldValue {136} \
+ -auto False \
+ -randomMask {136} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {136} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {68} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{68}} \
+ -stepValue {68} \
+ -fixedBits {68} \
+ -fieldValue {68} \
+ -auto False \
+ -randomMask {68} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {68} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {12} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{12}} \
+ -stepValue {12} \
+ -fixedBits {12} \
+ -fieldValue {12} \
+ -auto False \
+ -randomMask {12} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {12} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{5}} \
+ -stepValue {5} \
+ -fixedBits {5} \
+ -fieldValue {5} \
+ -auto False \
+ -randomMask {5} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {5} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Timestamps only, in consecutive 32-bit words} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.last-54"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.last-54"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {94} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x94}} \
+ -stepValue {0x94} \
+ -fixedBits {0x94} \
+ -fieldValue {94} \
+ -auto False \
+ -randomMask {0x94} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x94} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x04}} \
+ -stepValue {0x04} \
+ -fixedBits {0x04} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {0x04} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x04} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Router shall examine packet} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"/field:"ipv4.header.options.pad-58"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.pad-58"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+ }
+
+ if {$l4Enabled == 1 } {
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"udp-3"
+ #
+ set sg_stack $ixNetSG_Stack(2)/stack:"udp-$stack_number"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+ incr stack_number
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"udp-3"/field:"udp.header.srcPort-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.srcPort-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $srcPort \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {63} \
+ -fixedBits {63} \
+ -fieldValue $srcPort \
+ -auto False \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue $srcPort \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"udp-3"/field:"udp.header.dstPort-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.dstPort-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue $dstPort \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {1} \
+ -fixedBits {63} \
+ -fieldValue $dstPort \
+ -auto False \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType $L4ValueType \
+ -activeFieldChoice False \
+ -startValue $dstPort \
+ -countValue $L4CountValue
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"udp-3"/field:"udp.header.length-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.length-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {26} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {26} \
+ -auto True \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"udp-3"/field:"udp.header.checksum-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.checksum-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+ }
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"fcs-4"
+ #
+ set sg_stack $ixNetSG_Stack(2)/stack:"fcs-$stack_number"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+ incr stack_number
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/stack:"fcs-4"/field:"ethernet.fcs-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.fcs-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/configElement:1/transmissionDistribution
+ #
+ set sg_transmissionDistribution $ixNetSG_Stack(2)/transmissionDistribution
+ ixNet setMultiAttrs $sg_transmissionDistribution \
+ -distributions {}
+ sg_commit
+ set sg_transmissionDistribution [lindex [ixNet remapIds $sg_transmissionDistribution] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/transmissionDistribution
+ #
+ set sg_transmissionDistribution $ixNetSG_Stack(1)/transmissionDistribution
+ ixNet setMultiAttrs $sg_transmissionDistribution \
+ -distributions {}
+ sg_commit
+ set sg_transmissionDistribution [lindex [ixNet remapIds $sg_transmissionDistribution] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking
+ #
+ set sg_tracking $ixNetSG_Stack(1)/tracking
+ ixNet setMultiAttrs $sg_tracking \
+ -offset 0 \
+ -oneToOneMesh False \
+ -trackBy {} \
+ -values {} \
+ -fieldWidth thirtyTwoBits \
+ -protocolOffset {Root.0}
+ ixNet setMultiAttrs $sg_tracking/egress \
+ -offset {Outer VLAN Priority (3 bits)} \
+ -enabled False \
+ -customOffsetBits 0 \
+ -encapsulation {Ethernet} \
+ -customWidthBits 0
+ ixNet setMultiAttrs $sg_tracking/latencyBin \
+ -enabled False \
+ -binLimits {1 1.42 2 2.82 4 5.66 8 11.32} \
+ -numberOfBins 8
+ sg_commit
+ set sg_tracking [lindex [ixNet remapIds $sg_tracking] 0]
+ set ixNetSG_Stack(2) $sg_tracking
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ethernet-1"
+ #
+ set sg_stack $ixNetSG_Stack(2)/egress/fieldOffset/stack:"ethernet-1"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.destinationAddress-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.destinationAddress-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {00:00:00:00:00:00} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:00} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue {00:00:00:00:00:00} \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled True \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {00:00:00:00:00:00} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.sourceAddress-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.sourceAddress-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {00:00:00:00:00:00} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:00} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue {00:00:00:00:00:00} \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {00:00:00:00:00:00} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.etherType-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.etherType-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {800} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0xFFFF}} \
+ -stepValue {0xFFFF} \
+ -fixedBits {0xFFFF} \
+ -fieldValue {800} \
+ -auto True \
+ -randomMask {0xFFFF} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0xFFFF} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.pfcQueue-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.pfcQueue-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"
+ #
+ set sg_stack $ixNetSG_Stack(2)/egress/fieldOffset/stack:"ipv4-2"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.version-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.version-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.headerLength-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.headerLength-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {5} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.raw-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.raw-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.precedence-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.precedence-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {000 Routine} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.delay-5"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.delay-5"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.throughput-6"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.throughput-6"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.reliability-7"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.reliability-7"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.monetary-8"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.monetary-8"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.unused-9"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.unused-9"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {Precedence 1} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {10} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{10}} \
+ -stepValue {10} \
+ -fixedBits {10} \
+ -fieldValue {Class 1, Low drop precedence} \
+ -auto False \
+ -randomMask {10} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {10} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {46} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{46}} \
+ -stepValue {46} \
+ -fixedBits {46} \
+ -fieldValue {46} \
+ -auto False \
+ -randomMask {46} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {46} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.totalLength-18"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.totalLength-18"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {92} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{20}} \
+ -stepValue {20} \
+ -fixedBits {20} \
+ -fieldValue {92} \
+ -auto True \
+ -randomMask {20} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {20} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.identification-19"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.identification-19"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.reserved-20"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.reserved-20"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.fragment-21"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.fragment-21"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {May fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.lastFragment-22"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.lastFragment-22"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Last fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.fragmentOffset-23"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.fragmentOffset-23"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.ttl-24"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.ttl-24"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {64} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{64}} \
+ -stepValue {64} \
+ -fixedBits {64} \
+ -fieldValue {64} \
+ -auto False \
+ -randomMask {64} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {64} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.protocol-25"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.protocol-25"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {17} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{61}} \
+ -stepValue {61} \
+ -fixedBits {61} \
+ -fieldValue {UDP} \
+ -auto True \
+ -randomMask {61} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {61} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.checksum-26"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.checksum-26"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.srcIp-27"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.srcIp-27"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0.0.0.0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.0} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue {0.0.0.0} \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0.0.0.0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.dstIp-28"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.dstIp-28"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0.0.0.0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.0} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue {0.0.0.0} \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0.0.0.0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.nop-29"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.nop-29"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {1} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{1}} \
+ -stepValue {1} \
+ -fixedBits {1} \
+ -fieldValue {1} \
+ -auto False \
+ -randomMask {1} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {1} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.type-30"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.type-30"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {130} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{130}} \
+ -stepValue {130} \
+ -fixedBits {130} \
+ -fieldValue {130} \
+ -auto False \
+ -randomMask {130} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {130} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.length-31"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.length-31"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {11} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{11}} \
+ -stepValue {11} \
+ -fixedBits {11} \
+ -fieldValue {11} \
+ -auto False \
+ -randomMask {11} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {11} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.security-32"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.security-32"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Unclassified} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {131} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{131}} \
+ -stepValue {131} \
+ -fixedBits {131} \
+ -fieldValue {131} \
+ -auto False \
+ -randomMask {131} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {131} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.pointer-38"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.pointer-38"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routeData-39"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routeData-39"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {137} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{137}} \
+ -stepValue {137} \
+ -fixedBits {137} \
+ -fieldValue {137} \
+ -auto False \
+ -randomMask {137} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {137} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {7} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{7}} \
+ -stepValue {7} \
+ -fixedBits {7} \
+ -fieldValue {7} \
+ -auto False \
+ -randomMask {7} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {7} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {136} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{136}} \
+ -stepValue {136} \
+ -fixedBits {136} \
+ -fieldValue {136} \
+ -auto False \
+ -randomMask {136} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {136} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {68} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{68}} \
+ -stepValue {68} \
+ -fixedBits {68} \
+ -fieldValue {68} \
+ -auto False \
+ -randomMask {68} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {68} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {12} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{12}} \
+ -stepValue {12} \
+ -fixedBits {12} \
+ -fieldValue {12} \
+ -auto False \
+ -randomMask {12} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {12} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{5}} \
+ -stepValue {5} \
+ -fixedBits {5} \
+ -fieldValue {5} \
+ -auto False \
+ -randomMask {5} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {5} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Timestamps only, in consecutive 32-bit words} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.last-54"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.last-54"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {94} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x94}} \
+ -stepValue {0x94} \
+ -fixedBits {0x94} \
+ -fieldValue {94} \
+ -auto False \
+ -randomMask {0x94} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x94} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x04}} \
+ -stepValue {0x04} \
+ -fixedBits {0x04} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {0x04} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x04} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Router shall examine packet} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.pad-58"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.pad-58"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"udp-3"
+ #
+ set sg_stack $ixNetSG_Stack(2)/egress/fieldOffset/stack:"udp-3"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"udp-3"/field:"udp.header.srcPort-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.srcPort-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {63} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {63} \
+ -fixedBits {63} \
+ -fieldValue {Default} \
+ -auto True \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {63} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"udp-3"/field:"udp.header.dstPort-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.dstPort-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {63} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {63} \
+ -fixedBits {63} \
+ -fieldValue {Default} \
+ -auto True \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {63} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"udp-3"/field:"udp.header.length-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.length-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {72} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {72} \
+ -auto True \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"udp-3"/field:"udp.header.checksum-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.checksum-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"fcs-4"
+ #
+ set sg_stack $ixNetSG_Stack(2)/egress/fieldOffset/stack:"fcs-4"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/tracking/egress/fieldOffset/stack:"fcs-4"/field:"ethernet.fcs-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.fcs-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1
+ #
+ set sg_egressTracking [ixNet add $ixNetSG_Stack(1) egressTracking]
+ ixNet setMultiAttrs $sg_egressTracking \
+ -offset {Outer VLAN Priority (3 bits)} \
+ -customOffsetBits 0 \
+ -encapsulation {Ethernet} \
+ -customWidthBits 0
+ sg_commit
+ set sg_egressTracking [lindex [ixNet remapIds $sg_egressTracking] 0]
+ set ixNetSG_Stack(2) $sg_egressTracking
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ethernet-1"
+ #
+ set sg_stack $ixNetSG_Stack(2)/fieldOffset/stack:"ethernet-1"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.destinationAddress-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.destinationAddress-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {00:00:00:00:00:00} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:00} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue {00:00:00:00:00:00} \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled True \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {00:00:00:00:00:00} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.sourceAddress-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.sourceAddress-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {00:00:00:00:00:00} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{00:00:00:00:00:00}} \
+ -stepValue {00:00:00:00:00:00} \
+ -fixedBits {00:00:00:00:00:00} \
+ -fieldValue {00:00:00:00:00:00} \
+ -auto False \
+ -randomMask {00:00:00:00:00:00} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {00:00:00:00:00:00} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.etherType-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.etherType-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {800} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0xFFFF}} \
+ -stepValue {0xFFFF} \
+ -fixedBits {0xFFFF} \
+ -fieldValue {800} \
+ -auto True \
+ -randomMask {0xFFFF} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0xFFFF} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ethernet-1"/field:"ethernet.header.pfcQueue-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.header.pfcQueue-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"
+ #
+ set sg_stack $ixNetSG_Stack(2)/fieldOffset/stack:"ipv4-2"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.version-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.version-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.headerLength-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.headerLength-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {5} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.raw-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.raw-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.precedence-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.precedence-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {000 Routine} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.delay-5"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.delay-5"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.throughput-6"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.throughput-6"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.reliability-7"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.reliability-7"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.monetary-8"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.monetary-8"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Normal} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.tos.unused-9"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.tos.unused-9"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.defaultPHB-10"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.defaultPHB.unused-11"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.classSelectorPHB-12"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {Precedence 1} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.classSelectorPHB.unused-13"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.assuredForwardingPHB-14"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {10} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{10}} \
+ -stepValue {10} \
+ -fixedBits {10} \
+ -fieldValue {Class 1, Low drop precedence} \
+ -auto False \
+ -randomMask {10} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {10} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.assuredForwardingPHB.unused-15"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.expeditedForwardingPHB-16"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {46} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{46}} \
+ -stepValue {46} \
+ -fixedBits {46} \
+ -fieldValue {46} \
+ -auto False \
+ -randomMask {46} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {46} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.priority.ds.phb.expeditedForwardingPHB.unused-17"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.totalLength-18"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.totalLength-18"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {92} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{20}} \
+ -stepValue {20} \
+ -fixedBits {20} \
+ -fieldValue {92} \
+ -auto True \
+ -randomMask {20} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {20} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.identification-19"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.identification-19"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.reserved-20"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.reserved-20"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.fragment-21"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.fragment-21"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {May fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.flags.lastFragment-22"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.flags.lastFragment-22"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Last fragment} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.fragmentOffset-23"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.fragmentOffset-23"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.ttl-24"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.ttl-24"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {64} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{64}} \
+ -stepValue {64} \
+ -fixedBits {64} \
+ -fieldValue {64} \
+ -auto False \
+ -randomMask {64} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {64} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.protocol-25"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.protocol-25"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {17} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{61}} \
+ -stepValue {61} \
+ -fixedBits {61} \
+ -fieldValue {UDP} \
+ -auto True \
+ -randomMask {61} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {61} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.checksum-26"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.checksum-26"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.srcIp-27"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.srcIp-27"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0.0.0.0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.0} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue {0.0.0.0} \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0.0.0.0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.dstIp-28"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.dstIp-28"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0.0.0.0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0.0.0.0}} \
+ -stepValue {0.0.0.0} \
+ -fixedBits {0.0.0.0} \
+ -fieldValue {0.0.0.0} \
+ -auto False \
+ -randomMask {0.0.0.0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0.0.0.0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.nop-29"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.nop-29"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {1} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{1}} \
+ -stepValue {1} \
+ -fixedBits {1} \
+ -fieldValue {1} \
+ -auto False \
+ -randomMask {1} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice True \
+ -startValue {1} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.type-30"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.type-30"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {130} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{130}} \
+ -stepValue {130} \
+ -fixedBits {130} \
+ -fieldValue {130} \
+ -auto False \
+ -randomMask {130} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {130} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.length-31"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.length-31"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {11} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{11}} \
+ -stepValue {11} \
+ -fixedBits {11} \
+ -fieldValue {11} \
+ -auto False \
+ -randomMask {11} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {11} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.security-32"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.security-32"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Unclassified} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.compartments-33"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.handling-34"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.security.tcc-35"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.type-36"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {131} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{131}} \
+ -stepValue {131} \
+ -fixedBits {131} \
+ -fieldValue {131} \
+ -auto False \
+ -randomMask {131} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {131} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.lsrr.length-37"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.pointer-38"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.pointer-38"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routeData-39"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routeData-39"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.type-40"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {137} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{137}} \
+ -stepValue {137} \
+ -fixedBits {137} \
+ -fieldValue {137} \
+ -auto False \
+ -randomMask {137} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {137} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.ssrr.length-41"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.type-42"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {7} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{7}} \
+ -stepValue {7} \
+ -fixedBits {7} \
+ -fieldValue {7} \
+ -auto False \
+ -randomMask {7} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {7} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.recordRoute.length-43"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {8} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {8} \
+ -auto False \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.type-44"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {136} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{136}} \
+ -stepValue {136} \
+ -fixedBits {136} \
+ -fieldValue {136} \
+ -auto False \
+ -randomMask {136} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {136} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.length-45"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{4}} \
+ -stepValue {4} \
+ -fixedBits {4} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {4} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {4} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.streamId.id-46"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.type-47"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {68} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{68}} \
+ -stepValue {68} \
+ -fixedBits {68} \
+ -fieldValue {68} \
+ -auto False \
+ -randomMask {68} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {68} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.length-48"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {12} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{12}} \
+ -stepValue {12} \
+ -fixedBits {12} \
+ -fieldValue {12} \
+ -auto False \
+ -randomMask {12} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {12} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pointer-49"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {5} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{5}} \
+ -stepValue {5} \
+ -fixedBits {5} \
+ -fieldValue {5} \
+ -auto False \
+ -randomMask {5} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {5} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.overflow-50"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.flags-51"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Timestamps only, in consecutive 32-bit words} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.address-52"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.timestamp.pair.timestamp-53"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.last-54"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.last-54"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.type-55"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {94} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x94}} \
+ -stepValue {0x94} \
+ -fixedBits {0x94} \
+ -fieldValue {94} \
+ -auto False \
+ -randomMask {0x94} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x94} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.length-56"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {4} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0x04}} \
+ -stepValue {0x04} \
+ -fixedBits {0x04} \
+ -fieldValue {4} \
+ -auto False \
+ -randomMask {0x04} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0x04} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.nextOption.option.routerAlert.value-57"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {Router shall examine packet} \
+ -auto False \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"ipv4-2"/field:"ipv4.header.options.pad-58"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ipv4.header.options.pad-58"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled False \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"udp-3"
+ #
+ set sg_stack $ixNetSG_Stack(2)/fieldOffset/stack:"udp-3"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"udp-3"/field:"udp.header.srcPort-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.srcPort-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {63} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {63} \
+ -fixedBits {63} \
+ -fieldValue {Default} \
+ -auto True \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {63} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"udp-3"/field:"udp.header.dstPort-2"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.dstPort-2"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {63} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{63}} \
+ -stepValue {63} \
+ -fixedBits {63} \
+ -fieldValue {Default} \
+ -auto True \
+ -randomMask {63} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {63} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"udp-3"/field:"udp.header.length-3"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.length-3"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {72} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{8}} \
+ -stepValue {8} \
+ -fixedBits {8} \
+ -fieldValue {72} \
+ -auto True \
+ -randomMask {8} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {8} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"udp-3"/field:"udp.header.checksum-4"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"udp.header.checksum-4"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"fcs-4"
+ #
+ set sg_stack $ixNetSG_Stack(2)/fieldOffset/stack:"fcs-4"
+ sg_commit
+ set sg_stack [lindex [ixNet remapIds $sg_stack] 0]
+ set ixNetSG_Stack(3) $sg_stack
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/egressTracking:1/fieldOffset/stack:"fcs-4"/field:"ethernet.fcs-1"
+ #
+ set sg_field $ixNetSG_Stack(3)/field:"ethernet.fcs-1"
+ ixNet setMultiAttrs $sg_field \
+ -singleValue {0} \
+ -seed {1} \
+ -optionalEnabled True \
+ -fullMesh False \
+ -valueList {{0}} \
+ -stepValue {0} \
+ -fixedBits {0} \
+ -fieldValue {0} \
+ -auto True \
+ -randomMask {0} \
+ -trackingEnabled False \
+ -valueType singleValue \
+ -activeFieldChoice False \
+ -startValue {0} \
+ -countValue {1}
+ sg_commit
+ set sg_field [lindex [ixNet remapIds $sg_field] 0]
+
+ #
+ # configuring the object that corresponds to /traffic/trafficItem:1/dynamicUpdate
+ #
+ set sg_dynamicUpdate $ixNetSG_Stack(1)/dynamicUpdate
+ ixNet setMultiAttrs $sg_dynamicUpdate \
+ -enabledSessionAwareTrafficFields {} \
+ -enabledDynamicUpdateFields {}
+ sg_commit
+ set sg_dynamicUpdate [lindex [ixNet remapIds $sg_dynamicUpdate] 0]
+
+ ###
+ ### /quickTest area
+ ###
+
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544throughput:1
+ #
+ if {$rfc2544TestType == "throughput"} {
+ set sg_rfc2544throughput [ixNet add $ixNetSG_Stack(0)/quickTest rfc2544throughput]
+ ixNet setMultiAttrs $sg_rfc2544throughput \
+ -name {QuickTest1} \
+ -mode existingMode \
+ -inputParameters {{}}
+ ixNet setMultiAttrs $sg_rfc2544throughput/testConfig \
+ -protocolItem {} \
+ -enableMinFrameSize True \
+ -framesize $frameSize \
+ -reportTputRateUnit mbps \
+ -duration $duration \
+ -numtrials $numTests \
+ -trafficType constantLoading \
+ -burstSize 1 \
+ -framesPerBurstGap 1 \
+ -tolerance 0 \
+ -frameLossUnit {0} \
+ -staggeredStart False \
+ -framesizeList $frameSize \
+ -frameSizeMode custom \
+ -rateSelect percentMaxRate \
+ -percentMaxRate 100 \
+ -resolution 0.01 \
+ -forceRegenerate False \
+ -reportSequenceError False \
+ -ipv4rate 50 \
+ -ipv6rate 50 \
+ -loadRateList $frameRate \
+ -fixedLoadUnit percentMaxRate \
+ -loadRateValue 80 \
+ -incrementLoadUnit percentMaxRate \
+ -initialIncrementLoadRate 10 \
+ -stepIncrementLoadRate 10 \
+ -maxIncrementLoadRate 100 \
+ -randomLoadUnit percentMaxRate \
+ -minRandomLoadRate 10 \
+ -maxRandomLoadRate 80 \
+ -countRandomLoadRate 1 \
+ -minFpsRate 1000 \
+ -minKbpsRate 64 \
+ -txDelay 2 \
+ -delayAfterTransmit 2 \
+ -minRandomFrameSize 64 \
+ -maxRandomFrameSize 1518 \
+ -countRandomFrameSize 1 \
+ -minIncrementFrameSize 64 \
+ -stepIncrementFrameSize 64 \
+ -maxIncrementFrameSize 1518 \
+ -calculateLatency True \
+ -latencyType storeForward \
+ -calculateJitter False \
+ -enableDataIntegrity False \
+ -enableBackoffIteration False \
+ -enableSaturationIteration False \
+ -enableStopTestOnHighLoss False \
+ -enableBackoffUseAs% False \
+ -backoffIteration 1 \
+ -saturationIteration 1 \
+ -stopTestOnHighLoss 0 \
+ -loadType $loadType \
+ -stepLoadUnit percentMaxRate \
+ -customLoadUnit percentMaxRate \
+ -comboLoadUnit percentMaxRate \
+ -binaryLoadUnit percentMaxRate \
+ -initialBinaryLoadRate 100 \
+ -minBinaryLoadRate 1 \
+ -maxBinaryLoadRate 100 \
+ -binaryResolution 1 \
+ -binaryBackoff 50 \
+ -binaryTolerance $tolerance \
+ -binaryFrameLossUnit % \
+ -comboFrameLossUnit % \
+ -stepFrameLossUnit % \
+ -initialStepLoadRate 10 \
+ -maxStepLoadRate 100 \
+ -stepStepLoadRate 10 \
+ -stepTolerance 0 \
+ -initialComboLoadRate 10 \
+ -maxComboLoadRate 100 \
+ -minComboLoadRate 10 \
+ -stepComboLoadRate 10 \
+ -comboResolution 1 \
+ -comboBackoff 50 \
+ -comboTolerance 0 \
+ -binarySearchType linear \
+ -unchangedValueList {0} \
+ -enableFastConvergence $fastConvergence \
+ -fastConvergenceDuration $convergenceDuration \
+ -fastConvergenceThreshold 10 \
+ -framesizeFixedValue $frameSize \
+ -gap 3 \
+ -unchangedInitial False \
+ -generateTrackingOptionAggregationFiles False \
+ -enableExtraIterations False \
+ -extraIterationOffsets {10, -10} \
+ -usePercentOffsets False \
+ -imixDistribution weight \
+ -imixAdd {0} \
+ -imixDelete {0} \
+ -imixData {{{{64}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 40}}{{128}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 30}}{{256}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 30}}}} \
+ -imixEnabled False \
+ -imixTemplates none \
+ -framesizeImixList $frameSize \
+ -imixTrafficType {UNCHNAGED} \
+ -mapType {oneToOne} \
+ -supportedTrafficTypes {mac,ipv4,ipv6,ipmix}
+ ixNet setMultiAttrs $sg_rfc2544throughput/learnFrames \
+ -learnFrequency $learningFrequency \
+ -learnNumFrames 10 \
+ -learnRate 100 \
+ -learnWaitTime 1000 \
+ -learnFrameSize $frameSize \
+ -fastPathLearnFrameSize $frameSize \
+ -learnWaitTimeBeforeTransmit 0 \
+ -learnSendMacOnly False \
+ -learnSendRouterSolicitation False \
+ -fastPathEnable $fastPathEnable \
+ -fastPathRate 100 \
+ -fastPathNumFrames 10
+ ixNet setMultiAttrs $sg_rfc2544throughput/passCriteria \
+ -passCriteriaLoadRateMode average \
+ -passCriteriaLoadRateValue 100 \
+ -passCriteriaLoadRateScale mbps \
+ -enablePassFail False \
+ -enableRatePassFail False \
+ -enableLatencyPassFail False \
+ -enableStandardDeviationPassFail False \
+ -latencyThresholdValue 10 \
+ -latencyThresholdScale us \
+ -latencyThresholdMode average \
+ -latencyVariationThresholdValue 0 \
+ -latencyVariationThresholdScale us \
+ -latencyVarThresholdMode average \
+ -enableSequenceErrorsPassFail False \
+ -seqErrorsThresholdValue 0 \
+ -seqErrorsThresholdMode average \
+ -enableDataIntegrityPassFail False \
+ -dataErrorThresholdValue 0 \
+ -dataErrorThresholdMode average
+ sg_commit
+ set sg_rfc2544throughput [lindex [ixNet remapIds $sg_rfc2544throughput] 0]
+ set ixNetSG_Stack(1) $sg_rfc2544throughput
+
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544throughput:1/protocols
+ #
+ set sg_protocols $ixNetSG_Stack(1)/protocols
+ ixNet setMultiAttrs $sg_protocols \
+ -protocolState default \
+ -waitAfterStart 120 \
+ -waitAfterStop 30
+ sg_commit
+ set sg_protocols [lindex [ixNet remapIds $sg_protocols] 0]
+
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544throughput:1/trafficSelection:1
+ #
+ set sg_trafficSelection [ixNet add $ixNetSG_Stack(1) trafficSelection]
+ ixNet setMultiAttrs $sg_trafficSelection \
+ -id $ixNetSG_ref(26) \
+ -includeMode inTest \
+ -itemType trafficItem
+ sg_commit
+ set sg_trafficSelection [lindex [ixNet remapIds $sg_trafficSelection] 0]
+ ixNet commit
+
+ } elseif {$rfc2544TestType == "back2back"} {
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544back2back:2
+ #
+ set sg_rfc2544back2back [ixNet add $ixNetSG_Stack(0)/quickTest rfc2544back2back]
+ ixNet setMultiAttrs $sg_rfc2544back2back \
+ -name {B2B} \
+ -mode existingMode \
+ -inputParameters {{}}
+ ixNet setMultiAttrs $sg_rfc2544back2back/testConfig \
+ -protocolItem {} \
+ -framesize $frameSize \
+ -reportTputRateUnit mbps \
+ -rfc2544ImixDataQoS False \
+ -detailedResultsEnabled True \
+ -rfc2889ordering noOrdering \
+ -floodedFramesEnabled False \
+ -duration $duration \
+ -numtrials $numTests \
+ -trafficType constantLoading \
+ -burstSize 1 \
+ -framesPerBurstGap 1 \
+ -tolerance 0 \
+ -frameLossUnit {0} \
+ -staggeredStart False \
+ -framesizeList $frameSize \
+ -frameSizeMode custom \
+ -rateSelect percentMaxRate \
+ -percentMaxRate 100 \
+ -resolution 0.01 \
+ -forceRegenerate False \
+ -reportSequenceError False \
+ -ipv4rate 50 \
+ -ipv6rate 50 \
+ -loadRateList $frameRate \
+ -minFpsRate 1000 \
+ -minKbpsRate 64 \
+ -txDelay 2 \
+ -delayAfterTransmit 2 \
+ -minRandomFrameSize 64 \
+ -maxRandomFrameSize 1518 \
+ -countRandomFrameSize 1 \
+ -minIncrementFrameSize 64 \
+ -stepIncrementFrameSize 64 \
+ -maxIncrementFrameSize 1518 \
+ -calculateLatency False \
+ -calibrateLatency False \
+ -latencyType cutThrough \
+ -calculateJitter False \
+ -enableDataIntegrity False \
+ -loadType $loadType \
+ -binaryFrameLossUnit % \
+ -loadUnit percentMaxRate \
+ -customLoadUnit percentMaxRate \
+ -randomLoadUnit percentMaxRate \
+ -incrementLoadUnit percentMaxRate \
+ -binaryResolution 1000 \
+ -binaryBackoff 50 \
+ -binaryTolerance $tolerance \
+ -initialIncrementLoadRate 100 \
+ -stepIncrementLoadRate 10 \
+ -maxIncrementLoadRate 100 \
+ -minRandomLoadRate 10 \
+ -maxRandomLoadRate 80 \
+ -countRandomLoadRate 1 \
+ -numFrames {100000} \
+ -loadRate 100 \
+ -enableMinFrameSize True \
+ -gap 3 \
+ -generateTrackingOptionAggregationFiles False \
+ -sendFullyMeshed False \
+ -imixDistribution weight \
+ -imixAdd {0} \
+ -imixDelete {0} \
+ -imixData {{{{64}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 40}}{{128}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 30}}{{256}{{TOS S:0 S:0 S:0 S:0 S:0} S:0}{1 30}}}} \
+ -imixEnabled False \
+ -imixTemplates none \
+ -framesizeImixList $frameSize \
+ -imixTrafficType {UNCHNAGED} \
+ -ipRatioMode fixed \
+ -ipv4RatioList {10,25,50,75,90} \
+ -ipv6RatioList {90,75,50,25,10} \
+ -minIncrementIpv4Ratio {10} \
+ -stepIncrementIpv4Ratio {10} \
+ -maxIncrementIpv4Ratio {90} \
+ -minIncrementIpv6Ratio {90} \
+ -stepIncrementIpv6Ratio {-10} \
+ -maxIncrementIpv6Ratio {10} \
+ -minRandomIpv4Ratio {10} \
+ -maxRandomIpv4Ratio {90} \
+ -minRandomIpv6Ratio {90} \
+ -maxRandomIpv6Ratio {10} \
+ -countRandomIpRatio 1 \
+ -mapType {oneToOne|manyToMany|fullMesh} \
+ -supportedTrafficTypes {mac,ipv4,ipv6,ipmix}
+ ixNet setMultiAttrs $sg_rfc2544back2back/learnFrames \
+ -learnFrequency $learningFrequency \
+ -learnNumFrames 10 \
+ -learnRate 100 \
+ -learnWaitTime 1000 \
+ -learnFrameSize 64 \
+ -fastPathLearnFrameSize 64 \
+ -learnWaitTimeBeforeTransmit 0 \
+ -learnSendMacOnly False \
+ -learnSendRouterSolicitation False \
+ -fastPathEnable $fastPathEnable \
+ -fastPathRate 100 \
+ -fastPathNumFrames 10
+ ixNet setMultiAttrs $sg_rfc2544back2back/passCriteria \
+ -passCriteriaLoadRateMode average \
+ -passCriteriaLoadRateValue 100 \
+ -passCriteriaLoadRateScale mbps \
+ -enablePassFail False \
+ -enableRatePassFail False \
+ -enableLatencyPassFail False \
+ -enableStandardDeviationPassFail False \
+ -latencyThresholdValue 10 \
+ -latencyThresholdScale us \
+ -latencyThresholdMode average \
+ -latencyVariationThresholdValue 0 \
+ -latencyVariationThresholdScale us \
+ -latencyVarThresholdMode average \
+ -enableSequenceErrorsPassFail False \
+ -seqErrorsThresholdValue 0 \
+ -seqErrorsThresholdMode average \
+ -enableDataIntegrityPassFail False \
+ -dataErrorThresholdValue 0 \
+ -dataErrorThresholdMode average \
+ -enableFrameCountPassFail False \
+ -passCriteriaFrameCountValue 100 \
+ -passCriteriaFrameCountMode average
+ sg_commit
+ set sg_rfc2544back2back [lindex [ixNet remapIds $sg_rfc2544back2back] 0]
+ set ixNetSG_Stack(1) $sg_rfc2544back2back
+
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544back2back:2/protocols
+ #
+ set sg_protocols $ixNetSG_Stack(1)/protocols
+ ixNet setMultiAttrs $sg_protocols \
+ -protocolState default \
+ -waitAfterStart 120 \
+ -waitAfterStop 30
+ sg_commit
+ set sg_protocols [lindex [ixNet remapIds $sg_protocols] 0]
+
+ #
+ # configuring the object that corresponds to /quickTest/rfc2544back2back:2/trafficSelection:1
+ #
+ set sg_trafficSelection [ixNet add $ixNetSG_Stack(1) trafficSelection]
+ ixNet setMultiAttrs $sg_trafficSelection \
+ -id $ixNetSG_ref(26) \
+ -includeMode inTest \
+ -itemType trafficItem
+ sg_commit
+ set sg_trafficSelection [lindex [ixNet remapIds $sg_trafficSelection] 0]
+ ixNet commit
+ }
+ #
+ # getting and applying the RFC2544 test
+ #
+ set root [ixNet getRoot]
+ set qt [ixNet getList $root quickTest]
+ if {$rfc2544TestType == "throughput"} {
+ set rfc2544test [ixNet getList $qt rfc2544throughput]
+ } elseif {$rfc2544TestType == "back2back"} {
+ set rfc2544test [ixNet getList $qt rfc2544back2back]
+ }
+ ixNet exec apply $rfc2544test
+ after 5000
+
+ #
+ # starting the RFC2544 Throughput test
+ #
+ puts "Starting test..."
+ ixNet exec start $rfc2544test
+}
+
+proc waitForRfc2544Test { } {
+ # Wait for- and return results of- RFC2544 quicktest.
+
+ global rfc2544test
+
+ puts "Waiting for test to complete..."
+ set result [ixNet exec waitForTest $rfc2544test]
+ puts "Finished Test"
+
+ return "$result"
+}
diff --git a/3rd_party/ixia/ixnetrfc2544v2.tcl b/3rd_party/ixia/ixnetrfc2544v2.tcl
index b5c0fe2a..01e7b482 100755
--- a/3rd_party/ixia/ixnetrfc2544v2.tcl
+++ b/3rd_party/ixia/ixnetrfc2544v2.tcl
@@ -1,7 +1,7 @@
#!/usr/bin/env tclsh
# Copyright (c) 2014, Ixia
-# Copyright (c) 2015-2017, Intel Corporation
+# Copyright (c) 2015-2018, Intel Corporation, Tieto
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -1667,7 +1667,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-trackingEnabled False \
-valueType $L4ValueType \
-activeFieldChoice False \
- -startValue {0} \
+ -startValue $dstPort \
-countValue $L4CountValue
#
diff --git a/3rd_party/ixia/ixnetrfc2544v2_random_ip_crc.tcl b/3rd_party/ixia/ixnetrfc2544v2_random_ip_crc.tcl
index 7955fd23..29cb6e83 100755
--- a/3rd_party/ixia/ixnetrfc2544v2_random_ip_crc.tcl
+++ b/3rd_party/ixia/ixnetrfc2544v2_random_ip_crc.tcl
@@ -1,7 +1,7 @@
#!/usr/bin/env tclsh
# Copyright (c) 2014, Ixia
-# Copyright (c) 2015-2017, Intel Corporation
+# Copyright (c) 2015-2018, Intel Corporation, Tieto
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -1667,7 +1667,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-trackingEnabled False \
-valueType $L4ValueType \
-activeFieldChoice False \
- -startValue {0} \
+ -startValue $dstPort \
-countValue $L4CountValue
#
diff --git a/3rd_party/ixia/ixnetrfc2544v2_random_udp_crc.tcl b/3rd_party/ixia/ixnetrfc2544v2_random_udp_crc.tcl
index dc35f78e..7af4b9fc 100755
--- a/3rd_party/ixia/ixnetrfc2544v2_random_udp_crc.tcl
+++ b/3rd_party/ixia/ixnetrfc2544v2_random_udp_crc.tcl
@@ -1,7 +1,7 @@
#!/usr/bin/env tclsh
# Copyright (c) 2014, Ixia
-# Copyright (c) 2015-2017, Intel Corporation
+# Copyright (c) 2015-2018, Intel Corporation, Tieto
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -1667,7 +1667,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-trackingEnabled False \
-valueType $L4ValueType \
-activeFieldChoice False \
- -startValue {0} \
+ -startValue $dstPort \
-countValue $L4CountValue
#
diff --git a/3rd_party/ixia/pass_fail.tcl b/3rd_party/ixia/pass_fail.tcl
index 0a5a7677..bf1fb556 100755
--- a/3rd_party/ixia/pass_fail.tcl
+++ b/3rd_party/ixia/pass_fail.tcl
@@ -431,7 +431,7 @@ proc sendTraffic { flowSpec trafficSpec } {
if {[udp set $::chassis $::card $::port1]} {
errorMsg "Error setting udp on port $::chassis.$::card.$::port1"
}
- errorMsg "frameSize: $frameSize, packetSize: $packetSize, srcMac: $srcMac, dstMac: $dstMac, srcPort: $srcPort, dstPort: $dstPort"
+ errorMsg "frameSize: $frameSize, packetSize: $packetSize, srcMac: $srcMac, dstMac: $dstMac, srcPort: $srcPort, dstPort: $dstPort, framerate: $frameRate %"
if {[info exists protocolPad]} {
errorMsg "protocolPad: $protocolPad, protocolPadBytes: $protocolPadBytes"
}
@@ -544,8 +544,8 @@ proc sendTraffic { flowSpec trafficSpec } {
} else {
errorMsg "Too many packets for capture."
}
-
- set result [list $framesSent $framesRecv $bytesSent $bytesRecv $payError $seqError]
+ lappend result $payError
+ lappend result $seqError
return $result
} else {
errorMsg "streamtype is not supported: '$streamType'"
@@ -638,6 +638,9 @@ proc stopTraffic {} {
logMsg "Frame Rate Sent: $sendRate"
logMsg "Frame Rate Recv: $recvRate\n"
+ logMsg "Bytes Rate Sent: $sendRateBytes"
+ logMsg "Bytes Rate Recv: $recvRateBytes\n"
+
set result [list $framesSent $framesRecv $bytesSent $bytesRecv $sendRate $recvRate $sendRateBytes $recvRateBytes]
return $result
@@ -728,13 +731,6 @@ proc rfcThroughputTest { testSpec trafficSpec } {
set framesDroppedRate 100
}
- # check if we've already found the rate before 10 iterations, i.e.
- # 'percentRate = idealValue'. This is as accurate as we can get with
- # integer values.
- if {[expr "$max - $min"] <= 0.5 } {
- break
- }
-
# handle 'percentRate <= idealValue' case
if {$framesDroppedRate <= $lossRate} {
logMsg "Frame sendRate of '$sendRate' pps succeeded ('$framesDropped' frames dropped)"
@@ -754,6 +750,18 @@ proc rfcThroughputTest { testSpec trafficSpec } {
set max $percentRate
set percentRate [expr "$percentRate - ([expr "$max - $min"] * 0.5)"]
}
+
+ # check if we've already found the rate before 10 iterations, i.e.
+ # 'percentRate = idealValue'. This is as accurate as we can get with
+ # integer values.
+ if {[expr "$max - $min"] <= 0.5 } {
+ logMsg "End of search condition for framerate is met: $max % - $min % <= 0.5 %"
+ break
+ }
+
+ logMsg "Waiting 2000 ms"
+ # wait to process delayed frames
+ after 2000
}
set bestRate [lindex $result 4]
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 00000000..32e095ea
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,67 @@
+---
+project: 'Data-plane performance testing and benchmarking'
+project_creation_date: ''
+project_category: 'Integration & Testing'
+lifecycle_state: 'Mature'
+project_lead: &opnfv_vswitchperf_ptl
+ name: 'Sridhar Rao'
+ email: 'Sridhar.Rao@spirent.com'
+ company: 'spirent.com'
+ id: 'sridharkn'
+ timezone: 'IST'
+primary_contact: *opnfv_vswitchperf_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/VSPERF'
+ key: ''
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[vsperf]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-vswitchperf'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: '15:00 UTC' # eg: '16:00 UTC'
+repositories:
+ - 'vswitchperf'
+committers:
+ - <<: *opnfv_vswitchperf_ptl
+ - name: 'Maryam Tahhan'
+ email: 'maryam.tahhan@intel.com'
+ company: 'intel.com'
+ id: 'maryamtahhan'
+ - name: 'Al Morton'
+ email: 'acmorton@att.com'
+ company: 'att.com'
+ id: 'acm'
+ - name: 'Martin Klozik'
+ email: 'martin.klozik@tieto.com'
+ company: 'tieto.com'
+ id: 'mklozik'
+ - name: 'Bill Michalowski'
+ email: 'bmichalo@redhat.com'
+ company: 'redhat.com'
+ id: 'bmichalo'
+ - name: 'Christian Trautman'
+ email: 'ctrautma@redhat.com'
+ company: 'redhat.com'
+ id: 'ctrautma'
+ - name: 'Trevor Cooper'
+ email: 'trevor.cooper@intel.com'
+ company: 'intel.com'
+ id: 'trev'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/'
+ changes:
+ - type: 'removal'
+ link: 'http//ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-05-17-13.59.html'
+ # yamllint enable rule:line-length
diff --git a/check b/check
index 3c8c2927..b1c79ac4 100755
--- a/check
+++ b/check
@@ -159,7 +159,7 @@ if [ -s $FILE_LIST ] ; then
fi
# run pylint and extract final rating
output=`$PYLINT --rcfile $PYLINT_RC $pyfile 2>/dev/null`
- rating=`echo -e $output | tail -n3 | grep rated | sed -e 's/^.*rated at \([0-9.]*\).*$/\1/'`
+ rating=`echo -e $output | tail -n3 | grep rated | sed -e 's/^.*rated at \(-\?[0-9.]*\).*$/\1/'`
# evaluate and display aquired rating
if [ "x$rating" == "x" ] ; then
# rating is not available for files without python statements
@@ -167,7 +167,7 @@ if [ -s $FILE_LIST ] ; then
elif rating_is_ok $rating ; then
printf " %-70s ${GREEN}%-6s${BLACK}\n" $pyfile "OK"
else
- echo -e "$output" | awk '/^*+ Module|^[A-Z]\:/'
+ echo -e "$output" | awk '/^\*+ Module|^[A-Z]\:/'
printf " %-70s ${RED}%-6s${BLACK}\n" $pyfile $rating
fi
done
diff --git a/ci/build-vsperf.sh b/ci/build-vsperf.sh
index 00a548ba..75edbfbb 100755
--- a/ci/build-vsperf.sh
+++ b/ci/build-vsperf.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,9 +44,9 @@ RESULTS_ARCHIVE="$HOME/ci_results_archive"
# CI job specific configuration
# VERIFY - run basic set of TCs with default settings
-TESTCASES_VERIFY="vswitch_add_del_bridge vswitch_add_del_bridges vswitch_add_del_vport vswitch_add_del_vports vswitch_vports_add_del_flow"
+TESTCASES_VERIFY="vswitch_add_del_bridge vswitch_add_del_bridges vswitch_add_del_vport vswitch_add_del_vports vswitch_vports_add_del_connection"
TESTPARAM_VERIFY="--integration"
-TESTCASES_VERIFY_VPP="vswitch_add_del_bridge vswitch_add_del_bridges vswitch_add_del_vport vswitch_add_del_vports vswitch_vports_add_del_connection_vpp"
+TESTCASES_VERIFY_VPP=$TESTCASES_VERIFY
TESTPARAM_VERIFY_VPP=$TESTPARAM_VERIFY
# MERGE - run selected TCs with default settings
TESTCASES_MERGE=$TESTCASES_VERIFY
@@ -55,7 +55,9 @@ TESTCASES_MERGE_VPP=$TESTCASES_VERIFY_VPP
TESTPARAM_MERGE_VPP=$TESTPARAM_VERIFY_VPP
# DAILY - run selected TCs for defined packet sizes
TESTCASES_DAILY='phy2phy_tput back2back phy2phy_tput_mod_vlan phy2phy_scalability pvp_tput pvp_back2back pvvp_tput pvvp_back2back'
+TESTCASES_DAILY_MIN='phy2phy_tput'
TESTCASES_DAILY_VPP='phy2phy_tput_vpp phy2phy_back2back_vpp pvp_tput_vpp pvp_back2back_vpp pvvp_tput_vpp pvvp_back2back_vpp'
+TESTCASES_DAILY_VPP_MIN='phy2phy_tput_vpp'
TESTPARAM_DAILY='--test-params TRAFFICGEN_PKT_SIZES=(64,128,512,1024,1518)'
TESTPARAM_DAILY_VPP=$TESTPARAM_DAILY
TESTCASES_SRIOV='pvp_tput'
@@ -137,7 +139,7 @@ function print_results() {
printf " %-70s %-6s\n" "result_${i}" "FAILED"
EXIT=$EXIT_TC_FAILED
else
- RES_FILE=`ls -1 $1 | egrep "result_${i}_[0-9a-zA-Z\-]+.csv"`
+ RES_FILE=`ls -1 $1 | egrep "result_[0-9]+_${i}_[0-9a-zA-Z\-]+.csv"`
if [ "x$RES_FILE" != "x" -a -e "${1}/${RES_FILE}" ]; then
if grep ^FAILED "${1}/${RES_FILE}" &> /dev/null ; then
@@ -184,13 +186,13 @@ function execute_vsperf() {
# by default use daily build and upload results to the OPNFV databse
if [ "$1" == "VPP" ] ; then
TESTPARAM=$TESTPARAM_DAILY_VPP
- TESTCASES=$TESTCASES_DAILY_VPP
+ TESTCASES=$TESTCASES_DAILY_VPP_MIN
# don't report VPP results into testresults DB, until TC name mapping
# for VPP tests will be defined
#OPNFVPOD="--opnfvpod=$NODE_NAME"
else
TESTPARAM=$TESTPARAM_DAILY
- TESTCASES=$TESTCASES_DAILY
+ TESTCASES=$TESTCASES_DAILY_MIN
OPNFVPOD="--opnfvpod=$NODE_NAME"
fi
;;
@@ -433,15 +435,6 @@ function dependencies_check() {
sudo apt-get install -y $PACKAGE
fi
done
- # install additional python packages into python environment
- for PACKAGE in "pylint" ; do
- if pip show $PACKAGE &> /dev/null ; then
- printf " %-70s %-6s\n" $PACKAGE "OK"
- else
- printf " %-70s %-6s\n" $PACKAGE "missing"
- pip install $PACKAGE
- fi
- done
echo
fi
}
@@ -535,7 +528,7 @@ case $1 in
echo "VSPERF merge job"
echo "================"
- execute_pylint_check
+ execute_vsperf_pylint_check
terminate_vsperf
execute_vsperf_sanity
terminate_vsperf
@@ -556,21 +549,21 @@ case $1 in
terminate_vsperf
execute_vsperf OVS_with_DPDK_and_vHost_User $1
terminate_vsperf
- execute_vsperf OVS_vanilla $1
- terminate_vsperf
+# execute_vsperf OVS_vanilla $1
+# terminate_vsperf
execute_vsperf VPP $1
terminate_vsperf
- execute_vsperf SRIOV $1
- terminate_vsperf
+# execute_vsperf SRIOV $1
+# terminate_vsperf
generate_report
push_results_to_artifactory
generate_and_push_graphs "$TESTCASES_DAILY" ",OvsDpdkVhost,"
- generate_and_push_graphs "$TESTCASES_DAILY" ",OvsVanilla,"
+# generate_and_push_graphs "$TESTCASES_DAILY" ",OvsVanilla,"
generate_and_push_graphs "$TESTCASES_DAILY_VPP" ",VppDpdkVhost,"
- generate_and_push_graphs "$TESTCASES_SRIOV" ",none,"
+# generate_and_push_graphs "$TESTCASES_SRIOV" ",none,"
cleanup
diff --git a/conf/00_common.conf b/conf/00_common.conf
index 4c25b0b8..c3579014 100644
--- a/conf/00_common.conf
+++ b/conf/00_common.conf
@@ -98,11 +98,14 @@ SHELL_CMD = ['/bin/bash', '-c']
LOG_DIR = '/tmp'
# default log for all "small" executables
-LOG_FILE_DEFAULT = 'overall.log'
+LOG_FILE_DEFAULT = 'vsperf-overall.log'
# log file for all commands executed on host
LOG_FILE_HOST_CMDS = 'host-cmds.log'
+# log file prefix for infrastructure metrics
+LOG_FILE_INFRA_METRICS_PFX = 'collectd_'
+
# ############################
# Test configuration
# ############################
@@ -119,6 +122,18 @@ TEST_PARAMS = {}
# delay enforced after every step to allow system to process changes
TEST_STEP_DELAY = 5
+# parameter used, when running mupltiple tests, to accumulate _PARAMS_LIST
+# parameters for multiple tests running in a series
+CUMULATIVE_PARAMS = False
+
+# metric used by the performance matrix for comparision and analysis
+# of tests run in a series. Must always refer to a numeric value.
+# For example: 'throughput_rx_mbps', 'throughput_rx_fps', 'avg_latency_ns'
+MATRIX_METRIC = 'throughput_rx_fps'
+
+# OPNFVPOD specification.
+OPNFVPOD = ''
+
# ############################
# Modules
# ############################
@@ -127,4 +142,19 @@ TEST_STEP_DELAY = 5
# it can be used to suppress automatic load of obsoleted or abstract modules
# Example:
# EXCLUDE_MODULES = ['ovs_vanilla', 'qemu_virtio_net', 'pidstat']
-EXCLUDE_MODULES = ["testcenter-rfc2544-throughput"]
+EXCLUDE_MODULES = ["testcenter-rfc2544-throughput", "vsperf_controller", "vsperf_pb2", "vsperf_client", "vsperf_pb2_grpc"]
+
+# ############################
+# Vsperf Internal Options
+# ############################
+# following options should not be changed by the user
+
+# internal list to keep track of PIDs of jobs executed by vsperf
+_EXECUTED_PIDS = []
+
+# dictionary containing the test-specific parameters of all tests being run
+# for the purposes of cummulative parameter assignment using performance matrix
+_PARAMS_LIST = {}
+
+# index number of the current test, used for naming of result files
+_TEST_INDEX = 0
diff --git a/conf/01_testcases.conf b/conf/01_testcases.conf
index bd5ba9eb..d766df65 100755
--- a/conf/01_testcases.conf
+++ b/conf/01_testcases.conf
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -92,82 +92,6 @@
# "Dependency": [Test_Case_Name |None],
#
-# VPP specific macros used in TC defintions
-#
-VPP_P2P = [
- ['vswitch', 'add_switch', 'int_br0'], # STEP 0
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2
- ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[1][0]', True],
- ['trafficgen', 'send_traffic', {}],
- ['vswitch', 'dump_connections', 'int_br0'],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[1][0]', True],
- ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
-VPP_PVP = [
- ['vswitch', 'add_switch', 'int_br0'], # STEP 0
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2
- ['vswitch', 'add_vport', 'int_br0'], # STEP 3
- ['vswitch', 'add_vport', 'int_br0'], # STEP 4
- ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[4][0]', '#STEP[2][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[4][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]', True],
- ['vnf', 'start'],
- ['trafficgen', 'send_traffic', {}],
- ['vnf', 'stop'],
- ['vswitch', 'dump_connections', 'int_br0'],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[4][0]', '#STEP[2][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[4][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]', True],
- ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[3][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[4][0]'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
-VPP_PVVP = [
- ['vswitch', 'add_switch', 'int_br0'], # STEP 0
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1
- ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2
- ['vswitch', 'add_vport', 'int_br0'], # STEP 3
- ['vswitch', 'add_vport', 'int_br0'], # STEP 4
- ['vswitch', 'add_vport', 'int_br0'], # STEP 5
- ['vswitch', 'add_vport', 'int_br0'], # STEP 6
- ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[4][0]', '#STEP[5][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[6][0]', '#STEP[2][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[6][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[5][0]', '#STEP[4][0]', True],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]', True],
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['trafficgen', 'send_traffic', {}],
- ['vnf2', 'stop'],
- ['vnf1', 'stop'],
- ['vswitch', 'dump_connections', 'int_br0'],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[4][0]', '#STEP[5][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[6][0]', '#STEP[2][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[6][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[5][0]', '#STEP[4][0]', True],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]', True],
- ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[3][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[4][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[5][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[6][0]'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
-
-#
# Generic performance TC definitions
#
PERFORMANCE_TESTS = [
@@ -232,6 +156,18 @@ PERFORMANCE_TESTS = [
},
},
},
+ {
+ "Name": "phy2phy_tput_mod_vlan_cont",
+ "Deployment": "p2p",
+ "Frame Modification": "vlan",
+ "Description": "Phy2Phy VLAN Continuous Stream",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 100,
+ },
+ },
+ },
{
"Name": "phy2phy_cont",
"Deployment": "p2p",
@@ -244,6 +180,18 @@ PERFORMANCE_TESTS = [
},
},
{
+ "Name": "phy2phy_burst",
+ "Deployment": "p2p",
+ "Description": "Phy2Phy single burst of 1000 frames at 100% frame rate",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "burst",
+ "frame_rate" : 100,
+ "burst_size" : 1000,
+ },
+ },
+ },
+ {
"Name": "pvp_cont",
"Deployment": "pvp",
"Description": "PVP Continuous Stream",
@@ -288,6 +236,18 @@ PERFORMANCE_TESTS = [
},
},
{
+ "Name": "phy2phy_scalability_cont",
+ "Deployment": "p2p",
+ "Description": "Phy2Phy Scalability Continuous Stream",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 100,
+ "multistream" : 8000,
+ },
+ },
+ },
+ {
"Name": "pvp_tput",
"Deployment": "pvp",
"Description": "LTD.Throughput.RFC2544.PacketLossRatio",
@@ -361,9 +321,14 @@ PERFORMANCE_TESTS = [
},
},
},
+ #
+ # Backward compatible definition of VPP TCs.
+ # It will be removed after CI reporting will be fixed to use
+ # default TCs for VPP reporting.
+ #
{
"Name": "phy2phy_tput_vpp",
- "Deployment": "clean",
+ "Deployment": "p2p",
"Description": "VPP: LTD.Throughput.RFC2544.PacketLossRatio",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -371,11 +336,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_throughput",
},
},
- "TestSteps": VPP_P2P,
},
{
"Name": "phy2phy_cont_vpp",
- "Deployment": "clean",
+ "Deployment": "p2p",
"Description": "VPP: Phy2Phy Continuous Stream",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -384,11 +348,10 @@ PERFORMANCE_TESTS = [
"frame_rate" : 100,
},
},
- "TestSteps": VPP_P2P,
},
{
"Name": "phy2phy_back2back_vpp",
- "Deployment": "clean",
+ "Deployment": "p2p",
"Description": "VPP: LTD.Throughput.RFC2544.BackToBackFrames",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -396,11 +359,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_back2back",
},
},
- "TestSteps": VPP_P2P,
},
{
"Name": "pvp_tput_vpp",
- "Deployment": "clean",
+ "Deployment": "pvp",
"Description": "VPP: LTD.Throughput.RFC2544.PacketLossRatio",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -408,11 +370,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_throughput",
},
},
- "TestSteps": VPP_PVP,
},
{
"Name": "pvp_cont_vpp",
- "Deployment": "clean",
+ "Deployment": "pvp",
"Description": "VPP: PVP Continuous Stream",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -420,11 +381,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_continuous",
},
},
- "TestSteps": VPP_PVP,
},
{
"Name": "pvp_back2back_vpp",
- "Deployment": "clean",
+ "Deployment": "pvp",
"Description": "VPP: LTD.Throughput.RFC2544.BackToBackFrames",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -432,11 +392,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_back2back",
},
},
- "TestSteps": VPP_PVP,
},
{
"Name": "pvvp_tput_vpp",
- "Deployment": "clean",
+ "Deployment": "pvvp",
"Description": "VPP: LTD.Throughput.RFC2544.PacketLossRatio",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -444,11 +403,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_throughput",
},
},
- "TestSteps": VPP_PVVP,
},
{
"Name": "pvvp_cont_vpp",
- "Deployment": "clean",
+ "Deployment": "pvvp",
"Description": "VPP: PVP Continuous Stream",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -456,11 +414,10 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_continuous",
},
},
- "TestSteps": VPP_PVVP,
},
{
"Name": "pvvp_back2back_vpp",
- "Deployment": "clean",
+ "Deployment": "pvvp",
"Description": "VPP: LTD.Throughput.RFC2544.BackToBackFrames",
"vSwitch" : "VppDpdkVhost",
"Parameters" : {
@@ -468,6 +425,5 @@ PERFORMANCE_TESTS = [
"traffic_type" : "rfc2544_back2back",
},
},
- "TestSteps": VPP_PVVP,
},
]
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index 6a830a05..4eca1a52 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -1,4 +1,4 @@
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation, Tieto and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -115,7 +115,7 @@ PATHS['vswitch'] = {
'path': os.path.join(ROOT_DIR, 'src/vpp/vpp/build-root/install-vpp-native/vpp'),
'vpp': 'bin/vpp',
'vppctl': 'bin/vppctl',
- 'vpp_plugin_path' : 'lib64/vpp_plugins',
+ 'vpp_plugin_path' : 'lib/vpp_plugins',
},
'bin': {
'vpp': 'vpp',
@@ -172,7 +172,11 @@ OVS_OLD_STYLE_MQ = False
VSWITCHD_VANILLA_ARGS = []
# Bridge name to be used by VSWTICH
-VSWITCH_BRIDGE_NAME = 'br0'
+VSWITCH_BRIDGE_NAME = 'vsperf-br0'
+
+# A tunnel type used by OP2P and PTUNP deployments
+# Supported values: 'vxlan', 'gre' or 'geneve'
+TUNNEL_TYPE = 'vxlan'
# directory where hugepages will be mounted on system init
HUGEPAGE_DIR = '/dev/hugepages'
@@ -201,12 +205,26 @@ VSWITCH = "OvsDpdkVhost"
VSWITCH_JUMBO_FRAMES_ENABLED = False
VSWITCH_JUMBO_FRAMES_SIZE = 9000
+# default arguments of OVS ctl tools
+OVS_VSCTL_ARGS = []
+OVS_OFCTL_ARGS = ['-O', 'OpenFlow13'] # backward compatible default value
+OVS_APPCTL_ARGS = []
+
+# default flow template to be used by OVS classes
+OVS_FLOW_TEMPLATE = {
+ 'idle_timeout': '0'
+}
+
+# enable or disable configuration of routing tables; See vswitchperf_design.rst
+# for details.
+OVS_ROUTING_TABLES = False
+
#########################
## VPP
#########################
# Set of arguments used for startup of VPP
# NOTE: DPDK socket mem allocation is driven by parameter DPDK_SOCKET_MEM
-VSWITCH_VPP_CLI_SOCK = ''
+VSWITCH_VPP_CLI_SOCK = '/run/vpp/cli.sock'
VSWITCH_VPP_ARGS = {
'unix' : [
'interactive', # required by VSPERF to detect successful VPP startup
@@ -218,6 +236,9 @@ VSWITCH_VPP_ARGS = {
'workers 2',
'corelist-workers 4,5',
],
+ 'socksvr' : [
+ 'socket-name /run/vpp-api.sock',
+ ],
}
# log file for VPP
diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf
index 3c7bd2f5..01747a38 100644
--- a/conf/03_traffic.conf
+++ b/conf/03_traffic.conf
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,8 +23,8 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# Detailed description of TRAFFIC dictionary items follows:
#
# 'traffic_type' - One of the supported traffic types.
-# E.g. rfc2544_throughput, rfc2544_back2back
-# or rfc2544_continuous
+# E.g. rfc2544_throughput, rfc2544_back2back,
+# rfc2544_continuous or burst
# Data type: str
# Default value: "rfc2544_throughput".
# 'bidir' - Specifies if generated traffic will be full-duplex (True)
@@ -36,6 +36,12 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# continuous stream tests.
# Data type: int
# Default value: 100.
+# 'burst_size' - Defines a number of frames in the single burst, which is sent
+# by burst traffic type. Burst size is applied for each direction,
+# i.e. the total number of tx frames will be 2*burst_size in case of
+# bidirectional traffic.
+# Data type: int
+# Default value: 100.
# 'multistream' - Defines number of flows simulated by traffic generator.
# Value 0 disables multistream feature
# Data type: int
@@ -58,7 +64,6 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# feature. If enabled, it will implicitly insert a flow
# for each stream. If multistream is disabled, then
# pre-installed flows will be ignored.
-# Note: It is supported only for p2p deployment scenario.
# Data type: str
# Supported values:
# "Yes" - flows will be inserted into OVS
@@ -112,7 +117,7 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# NOTE: It can be modified by vsperf in some scenarios.
# Data type: str
# Default value: "90.90.90.90".
-# 'proto' - Specifies deflaut protocol type.
+# 'proto' - Specifies protocol type.
# Please check particular traffic generator implementation
# for supported protocol types.
# Data type: str
@@ -147,9 +152,81 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# congestion (DEI header field).
# Data type: int (NOTE: must fit to 1 bit)
# Default value: 0
+# 'capture' - A dictionary with traffic capture configuration.
+# NOTE: It is supported only by T-Rex traffic generator.
+# 'enabled' - Specifies if traffic should be captured
+# Data type: bool
+# Default value: False
+# 'tx_ports' - A list of ports, where frames transmitted towards DUT will
+# be captured. Ports have numbers 0 and 1. TX packet capture
+# is disabled if list of ports is empty.
+# Data type: list
+# Default value: [0]
+# 'rx_ports' - A list of ports, where frames received from DUT will
+# be captured. Ports have numbers 0 and 1. RX packet capture
+# is disabled if list of ports is empty.
+# Data type: list
+# Default value: [1]
+# 'count' - A number of frames to be captured. The same count value
+# is applied to both TX and RX captures.
+# Data type: int
+# Default value: 1
+# 'filter' - An expression used to filter TX and RX packets. It uses the same
+# syntax as pcap library. See pcap-filter man page for additional
+# details.
+# Data type: str
+# Default value: ''
+# 'scapy' - A dictionary with definition of a frame content for both traffic
+# directions. The frame content is defined by a SCAPY notation.
+# NOTE: It is supported only by the T-Rex traffic generator.
+# Following keywords can be used to refer to the related parts of
+# the TRAFFIC dictionary:
+# Ether_src - refers to TRAFFIC['l2']['srcmac']
+# Ether_dst - refers to TRAFFIC['l2']['dstmac']
+# IP_proto - refers to TRAFFIC['l3']['proto']
+# IP_PROTO - refers to upper case version of TRAFFIC['l3']['proto']
+# IP_src - refers to TRAFFIC['l3']['srcip']
+# IP_dst - refers to TRAFFIC['l3']['dstip']
+# IP_PROTO_sport - refers to TRAFFIC['l4']['srcport']
+# IP_PROTO_dport - refers to TRAFFIC['l4']['dstport']
+# Dot1Q_prio - refers to TRAFFIC['vlan']['priority']
+# Dot1Q_id - refers to TRAFFIC['vlan']['cfi']
+# Dot1Q_vlan - refers to TRAFFIC['vlan']['id']
+# '0' - A string with the frame definition for the 1st direction.
+# Data type: str
+# Default value: 'Ether(src={Ether_src}, dst={Ether_dst})/'
+# 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+# 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+# '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})'
+# '1' - A string with the frame definition for the 2nd direction.
+# Data type: str
+# Default value: 'Ether(src={Ether_dst}, dst={Ether_src})/'
+# 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+# 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+# '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+# 'latency_histogram'
+# - A dictionary with definition of a latency histogram provision in results.
+# 'enabled' - Specifies if the histogram provisioning is enabled or not.
+# 'type' - Defines how histogram is provided. Currenty only 'Default' is defined.
+# 'Default' - Default histogram as provided by the Traffic-generator.
+# 'imix' - A dictionary for IMIX Specification.
+# 'enabled' - Specifies if IMIX is enabled or NOT.
+# 'type' - The specification type - denotes how IMIX is specified.
+# Currently only 'genome' type is defined.
+# Other types (ex: table-of-proportions) can be added in future.
+# 'genome' - The Genome Encoding of Pkt-Sizes and Ratio for IMIX.
+# The ratio is inferred from the number of particular geneome characters.
+# Genome encoding is described in RFC 6985. This specification is closest
+# to the method described in section 6.2 of RFC 6985.
+# Ex: 'aaaaaaaddddg' denotes ratio of 7:4:1 of packets sizes 64:512:1518.
+# Note: Exact-sequence is not maintained, only the ratio of packets
+# is ensured.
+# Data type: str
+# Default Value: 'aaaaaaaddddg'
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
+ 'burst_size' : 100,
'bidir' : 'True', # will be passed as string in title format to tgen
'multistream' : 0,
'stream_type' : 'L4',
@@ -179,6 +256,33 @@ TRAFFIC = {
'priority': 0,
'cfi': 0,
},
+ 'capture': {
+ 'enabled': False,
+ 'tx_ports' : [0],
+ 'rx_ports' : [1],
+ 'count': 1,
+ 'filter': '',
+ },
+ 'scapy': {
+ 'enabled': False,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+ },
+ 'latency_histogram': {
+ 'enabled': False,
+ 'type': 'Default',
+ },
+ 'imix': {
+ 'enabled': False,
+ 'type': 'genome',
+ 'genome': 'aaaaaaaddddg',
+ },
}
#path to traffic generators directory.
@@ -368,6 +472,12 @@ TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR = ""
# Print additional information to the terminal during the test
TRAFFICGEN_STC_VERBOSE = "True"
+# Live Results Required?
+TRAFFICGEN_STC_LIVE_RESULTS = "True"
+
+# Live results file name
+TRAFFICGEN_STC_LIVERESULTS_FILE = "stc-liveresults.dat"
+
# Spirent TestCenter Configuration -- END
#########################################
@@ -456,11 +566,20 @@ TRAFFICGEN_TREX_LEARNING_MODE = True
TRAFFICGEN_TREX_LEARNING_DURATION = 5
# FOR SR-IOV or multistream layer 2 tests to work with T-Rex enable Promiscuous mode
TRAFFICGEN_TREX_PROMISCUOUS = False
+# Enable below options to force T-rex api to attempt to use speed specified on server
+# side when pushing traffic. For 40G use 40000. For 25G use 25000.
+TRAFFICGEN_TREX_FORCE_PORT_SPEED = False
+TRAFFICGEN_TREX_PORT_SPEED = 10000 # 10G
+TRAFFICGEN_TREX_LIVE_RESULTS = True
+TRAFFICGEN_TREX_LC_FILE = "trex-liveresults-counts.dat"
+TRAFFICGEN_TREX_LE_FILE = "trex-liveresults-errors.dat"
+
+
PATHS['trafficgen'] = {
'Trex': {
'type' : 'src',
'src': {
- 'path': os.path.join(ROOT_DIR, 'src/trex/trex/scripts/automation/trex_control_plane/stl')
+ 'path': os.path.join(ROOT_DIR, 'src/trex/trex/scripts/automation/trex_control_plane/interactive')
}
}
}
diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf
index 37fbe2b1..1574ca8d 100644
--- a/conf/04_vnf.conf
+++ b/conf/04_vnf.conf
@@ -87,8 +87,9 @@ GUEST_TIMEOUT = [180]
# Guest images may require different drive types such as ide to mount shared
# locations and/or boot correctly. You can modify the types here.
-GUEST_BOOT_DRIVE_TYPE = ['scsi']
-GUEST_SHARED_DRIVE_TYPE = ['scsi']
+# Default setting to ide to support qemu version 3.1.1.
+GUEST_BOOT_DRIVE_TYPE = ['ide']
+GUEST_SHARED_DRIVE_TYPE = ['ide']
# guest loopback application method; supported options are:
# 'testpmd' - testpmd from dpdk will be built and used
@@ -130,10 +131,13 @@ GUEST_PROMPT = ['root.*#']
GUEST_NICS_NR = [2]
# template for guests with 4 NICS, but only GUEST_NICS_NR NICS will be configured at runtime
-GUEST_NICS = [[{'device' : 'eth0', 'mac' : '#MAC(00:00:00:00:00:01,2)', 'pci' : '00:04.0', 'ip' : '#IP(192.168.1.2,4)/24'},
- {'device' : 'eth1', 'mac' : '#MAC(00:00:00:00:00:02,2)', 'pci' : '00:05.0', 'ip' : '#IP(192.168.1.3,4)/24'},
- {'device' : 'eth2', 'mac' : '#MAC(cc:00:00:00:00:01,2)', 'pci' : '00:06.0', 'ip' : '#IP(192.168.1.4,4)/24'},
- {'device' : 'eth3', 'mac' : '#MAC(cc:00:00:00:00:02,2)', 'pci' : '00:07.0', 'ip' : '#IP(192.168.1.5,4)/24'},
+# With qemu verison 3.1.1 the PCI assignments are starting from 00.03.0.
+# TODO: Need a better approach for pci configuration. Currently its based on what qemu-system-x86_64 assigns.
+# One option is to use the pci configuration as one of the parameters of the qemu-system-x86_64 command.
+GUEST_NICS = [[{'device' : 'eth0', 'mac' : '#MAC(00:00:00:00:00:01,2)', 'pci' : '00:03.0', 'ip' : '#IP(192.168.1.2,4)/24'},
+ {'device' : 'eth1', 'mac' : '#MAC(00:00:00:00:00:02,2)', 'pci' : '00:04.0', 'ip' : '#IP(192.168.1.3,4)/24'},
+ {'device' : 'eth2', 'mac' : '#MAC(cc:00:00:00:00:01,2)', 'pci' : '00:05.0', 'ip' : '#IP(192.168.1.4,4)/24'},
+ {'device' : 'eth3', 'mac' : '#MAC(cc:00:00:00:00:02,2)', 'pci' : '00:06.0', 'ip' : '#IP(192.168.1.5,4)/24'},
]]
# amount of host memory allocated for each guest
@@ -142,6 +146,8 @@ GUEST_MEMORY = ['2048']
GUEST_HUGEPAGES_NR = ['1024']
# test-pmd requires 2 VM cores
+# It is also possible to configure GUEST's CPU topology,
+# e.g. GUEST_SMP = ["sockets=1,cores=2"]
GUEST_SMP = ['2']
# cpu features to the guest, default options provided to pass all available
@@ -206,11 +212,18 @@ GUEST_BRIDGE_IP = ['#IP(1.1.1.5)/16']
# Note: Testpmd must be executed in interactive mode. It means, that
# VSPERF won't work correctly if '-i' will be removed.
GUEST_TESTPMD_PARAMS = ['-c 0x3 -n 4 --socket-mem 512 -- '
- '--burst=64 -i --txqflags=0xf00 '
- '--disable-hw-vlan']
+ '--burst=64 -i ']
# packet forwarding mode supported by testpmd; Please see DPDK documentation
# for comprehensive list of modes supported by your version.
# e.g. io|mac|mac_retry|macswap|flowgen|rxonly|txonly|csum|icmpecho|...
# Note: Option "mac_retry" has been changed to "mac retry" since DPDK v16.07
GUEST_TESTPMD_FWD_MODE = ['csum']
+
+# map queue stats to separate regs to verify MQ functionality
+# setting this from testpmd command line prameters since DPDK 18.11 does not
+# work as expected so we have to set this inside testpmd i.e. to set rx queue
+# 2 on port 0 to mapping 5 add: "rx 0 2 5"
+# Please see DPDK documentation to get more information how to set stat_qmap
+# (https://doc.dpdk.org/guides/testpmd_app_ug/testpmd_funcs.html)
+GUEST_QUEUE_STATS_MAPPING = []
diff --git a/conf/05_collector.conf b/conf/05_collector.conf
index 9fd2558c..882ef414 100644
--- a/conf/05_collector.conf
+++ b/conf/05_collector.conf
@@ -1,4 +1,4 @@
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation, Spirent Communications
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,3 +31,52 @@ PIDSTAT_SAMPLE_INTERVAL = 1
# prefix of pidstat's log file; separate log file is created
# for each testcase in the directory with results
LOG_FILE_PIDSTAT = 'pidstat'
+
+##########################################
+# Collectd Specific configuration
+##########################################
+COLLECTD_IP = "127.0.0.1"
+COLLECTD_PORT = 25826
+COLLECTD_SECURITY_LEVEL = 0
+COLLECTD_AUTH_FILE = ''
+LOG_FILE_COLLECTD = 'collectd'
+
+# Configure filters - Interested (KEYS), Not-Interested (XKEYS)
+COLLECTD_CPU_KEYS = ['system', 'idle']
+COLLECTD_PROCESSES_KEYS = ['user', 'system']
+COLLECTD_INTERFACE_KEYS = ['dropped']
+COLLECTD_OVSSTAT_KEYS = ['dropped', 'broadcast']
+COLLECTD_DPDKSTAT_KEYS = ['dropped']
+COLLECTD_INTELRDT_KEYS = ['llc']
+
+# Interface types to exclude
+COLLECTD_INTERFACE_XKEYS = ['docker', 'lo']
+# Core-Ids to Exclude from
+# Provide individual core-ids or range of core-ids.
+# The range is specified using '-'
+COLLECTD_INTELRDT_XKEYS = [ ]
+
+###############################################
+# Multi Command Collector Configurations
+###############################################
+MC_COLLECTD_CSV = '/tmp/csv/'
+MC_COLLECTD_CMD = '/opt/collectd/sbin/collectd'
+MC_PROX_HOME = '/home/opnfv/irq/'
+MC_PROX_CMD = './runrapid.py'
+MC_PROX_OUT = 'RUNirq.irq.log'
+MC_CRON_OUT = '/tmp/ovs-cores.log'
+MC_BEAT_CFILE = '/etc/filebeat/filebeat.yml'
+
+###############################################
+# Cadvisor Specific configuration
+###############################################
+
+LOG_FILE_CADVISOR = 'cadvisor'
+CADVISOR_STORAGE_DRIVER = 'stdout,influxdb'
+# ip:port of influxdb
+CADVISOR_STORAGE_HOST = '10.10.120.22:8086'
+CADVISOR_DRIVER_DB = '_internal'
+# names of all containers to calcualte results
+#CADVISOR_CONTAINERS = ['container1name','container2name']
+CADVISOR_CONTAINERS = []
+
diff --git a/conf/07_loadgen.conf b/conf/07_loadgen.conf
index e7349a5d..0b2cc1e6 100644
--- a/conf/07_loadgen.conf
+++ b/conf/07_loadgen.conf
@@ -15,7 +15,23 @@
LOADGEN_DIR = os.path.join(ROOT_DIR, 'tools/load_gen')
######################################################
-# LOADGEN tool: one of DummyLoadGen, Stress, StressNg
+# LOADGEN tool: one of DummyLoadGen, Stress, StressNg,
+# and StressorVM
######################################################
LOADGEN = "DummyLoadGen"
######################################################
+
+
+######################################################
+# StressorVm specific COnfiguration
+######################################################
+NN_COUNT = 1
+NN_MEMORY = ['4096']
+NN_SMP = ['2']
+NN_IMAGE = ['/home/opnfv/stressng-images/stressng-high-TypeE.qemu']
+NN_SHARED_DRIVE_TYPE = ['scsi']
+NN_BOOT_DRIVE_TYPE = ['scsi']
+NN_CORE_BINDING = [('9','10')]
+NN_NICS_NR = ['2']
+NN_BASE_VNC_PORT = 4
+NN_LOG_FILE = 'nnqemu.log'
diff --git a/conf/08_llcmanagement.conf b/conf/08_llcmanagement.conf
new file mode 100644
index 00000000..92e6367c
--- /dev/null
+++ b/conf/08_llcmanagement.conf
@@ -0,0 +1,62 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##################################
+# LLC Management Configuration #
+##################################
+
+####################################################################
+# Specify how the policy is defined.
+# Select any one of the following: COS, CUSTOM.
+####################################################################
+POLICY_TYPE = 'COS'
+
+####################################################################
+# Policy Definition by COS
+# Choose any one class of service among Gold, Silver and Bronze.
+# The min-cache and max-cache for these 3 services vary.
+# gold - has the maximum with 'guaranteed' allocation.
+# sliver-bf- lower than gold, and best effort.
+# bronze-shared - least and shared.
+# This value will be used for "policy" variable in the REST call.
+####################################################################
+VSWITCH_COS = "silver-bf"
+VNF_COS = "silver-bf"
+PMD_COS = "gold"
+NOISEVM_COS = "bronze-shared"
+
+####################################################################
+# CUSTOM Policy Definition
+# Specify Minimum and Maximum Cache Values each workload
+# [mincache, maxcache]
+####################################################################
+VSWITCH_CA = [10, 18]
+VNF_CA = [8, 10]
+PMD_CA = [10, 16]
+NOISEVM_CA = [1, 1]
+
+####################################################################
+# Intel RMD Server Specific Configuration
+# Port: 8081 (Debug) 8888 (normal)
+# Version: v1
+# IP: only localhost.
+####################################################################
+RMD_PORT = 8081
+RMD_SERVER_IP = '127.0.0.1'
+RMD_API_VERSION = 'v1'
+
+####################################################################
+# LLC Allocation Control.
+####################################################################
+LLC_ALLOCATION = False
diff --git a/conf/10_custom.conf b/conf/10_custom.conf
index 917d16b4..99600966 100644
--- a/conf/10_custom.conf
+++ b/conf/10_custom.conf
@@ -138,11 +138,18 @@ TRAFFICGEN_TREX_LEARNING_MODE = True
TRAFFICGEN_TREX_LEARNING_DURATION = 5
# FOR SR-IOV or multistream layer 2 tests to work with T-Rex enable Promiscuous mode
TRAFFICGEN_TREX_PROMISCUOUS = False
+# Enable below options to force T-rex api to attempt to use speed specified on server
+# side when pushing traffic. For 40G use 40000. For 25G use 25000.
+TRAFFICGEN_TREX_FORCE_PORT_SPEED = False
+TRAFFICGEN_TREX_PORT_SPEED = 10000 # 10G
# TRex validation option for RFC2544
TRAFFICGEN_TREX_VERIFICATION_MODE = False
TRAFFICGEN_TREX_VERIFICATION_DURATION = 60
TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS = 10
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 0
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = False
+
# TREX Configuration and Connection Info-- END
####################################################
diff --git a/conf/11_openstack.conf b/conf/11_openstack.conf
new file mode 100644
index 00000000..6be65228
--- /dev/null
+++ b/conf/11_openstack.conf
@@ -0,0 +1,43 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file describes a list of parameters used for deploying a TGEN,
+# on Openstack.
+
+
+DEFAULT_POLLING_INTERVAL = 10
+SCENARIOS = ['templates/l2_2c_2i.yaml']
+
+SCHEMA = 'templates/scenario.yaml'
+
+OS_AUTH_URL="http://10.10.180.21/identity"
+OS_PROJECT_ID="0440a230a799460facec0d09dde64497"
+OS_PROJECT_NAME="admin"
+OS_USER_DOMAIN_NAME="Default"
+OS_PROJECT_DOMAIN_ID="default"
+OS_USERNAME="admin"
+OS_PASSWORD="admin123"
+OS_REGION_NAME="RegionOne"
+OS_INTERFACE="public"
+OS_IDENTITY_API_VERSION=3
+OS_INSECURE=False
+OS_CA_CERT= 'None'
+
+STACK_NAME = 'testvnf_vsperf'
+CLEANUP_ON_EXIT = True
+
+FLAVOR_NAME = 'm1.large'
+IMAGE_NAME = 'bionic'
+EXTERNAL_NET = 'public'
+DNS_NAMESERVERS = ['8.8.8.8', '8.8.4.4']
diff --git a/conf/12_k8s.conf b/conf/12_k8s.conf
new file mode 100644
index 00000000..5cfac966
--- /dev/null
+++ b/conf/12_k8s.conf
@@ -0,0 +1,41 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Information about the Master Node.
+
+POD_DIR=os.path.join(ROOT_DIR, 'pods/')
+POD='Papi'
+
+MASTER_IP = '10.10.120.22'
+MASTER_LOGIN = 'opnfv'
+MASTER_PASSWD = 'opnfv'
+
+K8S_CONFIG_FILEPATH = '/home/opnfv/sridhar/k8sconfig'
+
+# Information about the Worker Node. Default is Localhost.
+WORKER_IP = '10.10.120.21'
+WORKER_LOGIN = 'opnfv'
+WORKER_PASSWD = 'opnfv'
+
+
+# Plugin to use.
+PLUGIN = 'ovsdpdk'
+
+# Paths. Default location: Master Node.
+NETWORK_ATTACHMENT_FILEPATH = ['/home/opnfv/sridhar/cnb/userspace/ovsdpdk/userspace-ovs-netAttach.yaml']
+POD_MANIFEST_FILEPATH = '/home/opnfv/sridhar/cnb/userspace/ovsdpdk/userspace-ovs-netapp-pod.yaml'
+
+
+# Application pod
+APP_NAME = 'l2fwd'
+
diff --git a/conf/__init__.py b/conf/__init__.py
index d5d26757..7f6c1912 100644
--- a/conf/__init__.py
+++ b/conf/__init__.py
@@ -70,7 +70,7 @@ class Settings(object):
except AttributeError:
pass
return param
- elif isinstance(param, list) or isinstance(param, tuple):
+ elif isinstance(param, (list, tuple)):
tmp_list = []
for item in param:
tmp_list.append(self._eval_param(item))
@@ -108,6 +108,13 @@ class Settings(object):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, attr))
+ def hasValue(self, attr):
+ """Return true if key exists
+ """
+ if attr in self.__dict__:
+ return True
+ return False
+
def __setattr__(self, name, value):
"""Set a value
"""
@@ -229,7 +236,7 @@ class Settings(object):
if key not in self.__dict__ and key not in _EXTRA_TEST_PARAMS:
unknown_keys.append(key)
- if len(unknown_keys):
+ if unknown_keys:
raise RuntimeError('Test parameters contain unknown configuration '
'parameter(s): {}'.format(', '.join(unknown_keys)))
@@ -256,6 +263,9 @@ class Settings(object):
Expand VM option with given key for given number of VMs
"""
tmp_value = self.getValue(key)
+ # skip empty/not set value
+ if not tmp_value:
+ return
if isinstance(tmp_value, str):
scalar = True
master_value = tmp_value
@@ -270,7 +280,7 @@ class Settings(object):
for vmindex in range(vm_number):
value = master_value_str.replace('#VMINDEX', str(vmindex))
for macro, args, param, _, step in re.findall(_PARSE_PATTERN, value):
- multi = int(step) if len(step) and int(step) else 1
+ multi = int(step) if step and int(step) else 1
if macro == '#EVAL':
# pylint: disable=eval-used
tmp_result = str(eval(param))
@@ -325,13 +335,13 @@ class Settings(object):
assert result == self.getValue(attr)
return True
- def validate_setValue(self, dummy_result, name, value):
+ def validate_setValue(self, _dummy_result, name, value):
"""Verifies, that value was correctly set
"""
assert value == self.__dict__[name]
return True
- def validate_resetValue(self, dummy_result, attr):
+ def validate_resetValue(self, _dummy_result, attr):
"""Verifies, that value was correctly reset
"""
return 'TEST_PARAMS' not in self.__dict__ or \
diff --git a/conf/integration/01_testcases.conf b/conf/integration/01_testcases.conf
index 692f1561..7daff217 100644
--- a/conf/integration/01_testcases.conf
+++ b/conf/integration/01_testcases.conf
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation, Tieto and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,10 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# The 1st value of SUPPORTED_TUNNELING_PROTO is used as the default
-# tunneling protocol for OP2P tests.
-SUPPORTED_TUNNELING_PROTO = ['vxlan', 'gre', 'geneve']
-
#
# Generic test configuration options are described at conf/01_testcases.conf
#
@@ -39,7 +35,9 @@ SUPPORTED_TUNNELING_PROTO = ['vxlan', 'gre', 'geneve']
# Common TestSteps parts ("macros")
#
+#
# P2P macros
+#
STEP_VSWITCH_P2P_INIT = [
['vswitch', 'add_switch', 'int_br0'], # STEP 0
['vswitch', 'add_phy_port', 'int_br0'], # STEP 1
@@ -52,6 +50,18 @@ STEP_VSWITCH_P2P_FINIT = [
['vswitch', 'del_switch', 'int_br0'],
]
+STEP_VSWITCH_P2P_CONNECTIONS_INIT = STEP_VSWITCH_P2P_INIT + [
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[1][0]'],
+]
+
+STEP_VSWITCH_P2P_CONNECTIONS_FINIT = [
+ ['vswitch', 'dump_connections', 'int_br0'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[1][0]'],
+] + STEP_VSWITCH_P2P_FINIT
+
+# P2P OVS specific macros
STEP_VSWITCH_P2P_FLOWS_INIT = STEP_VSWITCH_P2P_INIT + [
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}],
@@ -63,7 +73,9 @@ STEP_VSWITCH_P2P_FLOWS_FINIT = [
['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}],
] + STEP_VSWITCH_P2P_FINIT
-# PVP and PVVP macros
+#
+# PVP macros
+#
STEP_VSWITCH_PVP_INIT = STEP_VSWITCH_P2P_INIT + [
['vswitch', 'add_vport', 'int_br0'], # STEP 3 vm1 ports
['vswitch', 'add_vport', 'int_br0'], # STEP 4
@@ -74,6 +86,22 @@ STEP_VSWITCH_PVP_FINIT = [
['vswitch', 'del_port', 'int_br0', '#STEP[4][0]'],
] + STEP_VSWITCH_P2P_FINIT
+STEP_VSWITCH_PVP_CONNECTIONS_INIT = STEP_VSWITCH_PVP_INIT + [
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[4][0]', '#STEP[2][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[4][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]'],
+]
+
+STEP_VSWITCH_PVP_CONNECTIONS_FINIT = [
+ ['vswitch', 'dump_connections', 'int_br0'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[4][0]', '#STEP[2][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[4][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]'],
+] + STEP_VSWITCH_PVP_FINIT
+
+# PVP OVS specific macros
STEP_VSWITCH_PVP_FLOWS_INIT = STEP_VSWITCH_PVP_INIT + [
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
@@ -89,6 +117,9 @@ STEP_VSWITCH_PVP_FLOWS_FINIT = [
['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[3][1]'}],
] + STEP_VSWITCH_PVP_FINIT
+#
+# PVVP macros
+#
STEP_VSWITCH_PVVP_INIT = STEP_VSWITCH_PVP_INIT + [
['vswitch', 'add_vport', 'int_br0'], # STEP 5 vm2 ports
['vswitch', 'add_vport', 'int_br0'], # STEP 6
@@ -99,6 +130,26 @@ STEP_VSWITCH_PVVP_FINIT = [
['vswitch', 'del_port', 'int_br0', '#STEP[6][0]'],
] + STEP_VSWITCH_PVP_FINIT
+STEP_VSWITCH_PVVP_CONNECTIONS_INIT = STEP_VSWITCH_PVVP_INIT + [
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[4][0]', '#STEP[5][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[6][0]', '#STEP[2][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[2][0]', '#STEP[6][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[5][0]', '#STEP[4][0]'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]'],
+]
+
+STEP_VSWITCH_PVVP_CONNECTIONS_FINIT = [
+ ['vswitch', 'dump_connections', 'int_br0'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[3][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[4][0]', '#STEP[5][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[6][0]', '#STEP[2][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[2][0]', '#STEP[6][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[5][0]', '#STEP[4][0]'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[3][0]', '#STEP[1][0]'],
+] + STEP_VSWITCH_PVVP_FINIT
+
+# PVVP OVS specific macros
STEP_VSWITCH_PVVP_FLOWS_INIT = STEP_VSWITCH_PVVP_INIT + [
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
@@ -118,192 +169,6 @@ STEP_VSWITCH_PVVP_FLOWS_FINIT = [
['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[3][1]'}],
] + STEP_VSWITCH_PVVP_FINIT
-STEP_VSWITCH_P4VP_INIT = STEP_VSWITCH_PVVP_INIT + [
- ['vswitch', 'add_vport', 'int_br0'], # STEP 7 vm3 ports
- ['vswitch', 'add_vport', 'int_br0'], # STEP 8
- ['vswitch', 'add_vport', 'int_br0'], # STEP 9 vm4 ports
- ['vswitch', 'add_vport', 'int_br0'], # STEP 10
-]
-
-STEP_VSWITCH_P4VP_FINIT = [
- ['vswitch', 'del_port', 'int_br0', '#STEP[7][0]'], # vm3 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[8][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[9][0]'], # vm4 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[10][0]'],
-] + STEP_VSWITCH_PVVP_FINIT
-
-STEP_VSWITCH_P4VP_FLOWS_INIT = STEP_VSWITCH_P4VP_INIT + [
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', \
- 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[6][1]', \
- 'actions': ['output:#STEP[7][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[8][1]', \
- 'actions': ['output:#STEP[9][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[10][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', \
- 'actions': ['output:#STEP[10][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[9][1]', \
- 'actions': ['output:#STEP[8][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[7][1]', \
- 'actions': ['output:#STEP[6][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[5][1]', \
- 'actions': ['output:#STEP[4][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[3][1]', \
- 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}],
-]
-
-STEP_VSWITCH_P4VP_FLOWS_FINIT = [
- ['vswitch', 'dump_flows', 'int_br0'],
- ['vswitch', 'del_flow', 'int_br0'],
-] + STEP_VSWITCH_P4VP_FINIT
-
-STEP_VSWITCH_2PHY_2VM_INIT = STEP_VSWITCH_PVVP_INIT
-
-STEP_VSWITCH_2PHY_2VM_FINIT = STEP_VSWITCH_PVVP_FINIT
-
-STEP_VSWITCH_2_PARALLEL_VM_FLOWS_INIT = [
- # Setup Flows to reply ICMPv6 and similar packets, so to
- # avoid flooding the internal port with their re-transmissions
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:01', \
- 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:02', \
- 'actions': ['output:#STEP[4][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:03', \
- 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:04', \
- 'actions': ['output:#STEP[6][1]'], 'idle_timeout': '0'}],
- # Forward UDP packets depending on dest port
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '0', \
- 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '1', \
- 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
- # Send VM outputs to phy port #2
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[6][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
-]
-
-STEP_VSWITCH_2PHY_4VM_INIT = STEP_VSWITCH_2PHY_2VM_INIT + [
- ['vswitch', 'add_vport', 'int_br0'], # STEP 7 vm3 ports
- ['vswitch', 'add_vport', 'int_br0'], # STEP 8
- ['vswitch', 'add_vport', 'int_br0'], # STEP 9 vm4 ports
- ['vswitch', 'add_vport', 'int_br0'], # STEP 10
-]
-
-STEP_VSWITCH_2PHY_4VM_FINIT = [
- ['vswitch', 'del_port', 'int_br0', '#STEP[7][0]'], # vm3 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[8][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[9][0]'], # vm4 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[10][0]'],
-] + STEP_VSWITCH_2PHY_2VM_FINIT
-
-STEP_VSWITCH_FLOWS_FINIT = [
- ['vswitch', 'dump_flows', 'int_br0'],
- ['vswitch', 'del_flow', 'int_br0'],
-]
-
-STEP_VSWITCH_4_PARALLEL_VM_FLOWS_INIT = [
- # Setup Flows to reply ICMPv6 and similar packets, so to
- # avoid flooding the internal port with their re-transmissions
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:01', \
- 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:02', \
- 'actions': ['output:#STEP[4][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:03', \
- 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:04', \
- 'actions': ['output:#STEP[6][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:05', \
- 'actions': ['output:#STEP[7][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:06', \
- 'actions': ['output:#STEP[8][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:07', \
- 'actions': ['output:#STEP[9][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:08', \
- 'actions': ['output:#STEP[10][1]'], 'idle_timeout': '0'}],
- # Forward UDP packets depending on dest port
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '0', \
- 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '1', \
- 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '2', \
- 'actions': ['output:#STEP[7][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '3', \
- 'actions': ['output:#STEP[9][1]'], 'idle_timeout': '0'}],
- # Send VM outputs to phy port #2
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[6][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[8][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[10][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
-]
-
-STEP_VSWITCH_2PHY_6VM_INIT = STEP_VSWITCH_2PHY_4VM_INIT + [
- ['vswitch', 'add_vport', 'int_br0'], # STEP 11 vm5 vhu8
- ['vswitch', 'add_vport', 'int_br0'], # STEP 12 vhu9
- ['vswitch', 'add_vport', 'int_br0'], # STEP 13 vm6 vhu10
- ['vswitch', 'add_vport', 'int_br0'], # STEP 14 vhu11
-]
-
-STEP_VSWITCH_6_PARALLEL_VM_FLOWS_INIT = STEP_VSWITCH_4_PARALLEL_VM_FLOWS_INIT + [
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:09', \
- 'actions': ['output:#STEP[11][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:0a', \
- 'actions': ['output:#STEP[12][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:0b', \
- 'actions': ['output:#STEP[13][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', \
- {'priority': '1', 'dl_src': '00:00:00:00:00:0c', \
- 'actions': ['output:#STEP[14][1]'], 'idle_timeout': '0'}],
- # Forward UDP packets depending on dest port
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '4', \
- 'actions': ['output:#STEP[11][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', \
- 'dl_type': '0x0800', 'nw_proto': '17', 'udp_dst': '5', \
- 'actions': ['output:#STEP[13][1]'], 'idle_timeout': '0'}],
- # Send VM outputs to phy port #2
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[12][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[14][1]', \
- 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
-]
-
-STEP_VSWITCH_2PHY_6VM_FINIT = [
- ['vswitch', 'del_port', 'int_br0', '#STEP[11][0]'], # vm5 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[12][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[13][0]'], # vm6 ports
- ['vswitch', 'del_port', 'int_br0', '#STEP[14][0]'],
-] + STEP_VSWITCH_2PHY_4VM_FINIT
-
#
# Definition of integration tests
#
@@ -327,7 +192,6 @@ INTEGRATION_TESTS = [
{
"Name": "overlay_p2p_tput",
"Deployment": "op2p",
- "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0],
"Tunnel Operation": "encapsulation",
"Description": "Overlay Encapsulation Throughput RFC2544 Test",
"Parameters": {
@@ -341,7 +205,6 @@ INTEGRATION_TESTS = [
{
"Name": "overlay_p2p_cont",
"Deployment": "op2p",
- "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0],
"Tunnel Operation": "encapsulation",
"Description": "Overlay Encapsulation RFC2544 Continuous Stream",
"Parameters": {
@@ -355,7 +218,6 @@ INTEGRATION_TESTS = [
{
"Name": "overlay_p2p_decap_tput",
"Deployment": "op2p",
- "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0],
"Tunnel Operation": "decapsulation",
"Description": "Overlay Decapsulation Throughput RFC2544 Test",
"Parameters": {
@@ -369,7 +231,6 @@ INTEGRATION_TESTS = [
{
"Name": "overlay_p2p_decap_cont",
"Deployment": "op2p",
- "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0],
"Tunnel Operation": "decapsulation",
"Description": "Overlay Decapsulation RFC2544 Continuous Stream",
"Parameters": {
@@ -449,9 +310,49 @@ INTEGRATION_TESTS = [
]
},
{
+ "Name": "vswitch_add_del_connection",
+ "Deployment": "clean",
+ "Description": "vSwitch - add and delete connection",
+ "TestSteps": [
+ ['vswitch', 'add_switch', 'int_br0'],
+ ['vswitch', 'add_phy_port', 'int_br0'],
+ ['vswitch', 'add_phy_port', 'int_br0'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'dump_connections', 'int_br0'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
+ ['vswitch', 'del_switch', 'int_br0'],
+ ]
+ },
+ {
+ "Name": "vswitch_vports_add_del_connection",
+ "Deployment": "clean",
+ "Description": "vSwitch - add and delete connection",
+ "Description": "vSwitch - configure switch with vports, add and delete connection",
+ "TestSteps": [
+ ['vswitch', 'add_switch', 'int_br0'],
+ ['vswitch', 'add_vport', 'int_br0'],
+ ['vswitch', 'add_vport', 'int_br0'],
+ ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'dump_connections', 'int_br0'],
+ ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
+ ['vswitch', 'del_switch', 'int_br0'],
+ ]
+ },
+ {
+ "Name": "vswitch_add_del_connections",
+ "Deployment": "clean",
+ "Description": "vSwitch - add and delete connections",
+ "TestSteps": STEP_VSWITCH_P2P_CONNECTIONS_INIT +
+ STEP_VSWITCH_P2P_CONNECTIONS_FINIT
+ },
+ {
"Name": "vswitch_add_del_flow",
"Deployment": "clean",
- "Description": "vSwitch - add and delete flow",
+ "Description": "OVS: vSwitch - add and delete flow",
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_phy_port', 'int_br0'],
@@ -466,7 +367,7 @@ INTEGRATION_TESTS = [
{
"Name": "vswitch_vports_add_del_flow",
"Deployment": "clean",
- "Description": "vSwitch - configure switch with vports, add and delete flow",
+ "Description": "OVS: vSwitch - configure switch with vports, add and delete flow",
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_vport', 'int_br0'],
@@ -481,7 +382,7 @@ INTEGRATION_TESTS = [
{
"Name": "vswitch_add_del_flows",
"Deployment": "clean",
- "Description": "vSwitch - add and delete flows",
+ "Description": "OVS: vSwitch - add and delete flows",
"TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT +
STEP_VSWITCH_P2P_FLOWS_FINIT
},
@@ -489,31 +390,31 @@ INTEGRATION_TESTS = [
"Name": "vswitch_p2p_tput",
"Deployment": "clean",
"Description": "vSwitch - configure switch and execute RFC2544 throughput test",
- "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_P2P_CONNECTIONS_INIT +
[
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput', 'bidir' : 'True'}],
] +
- STEP_VSWITCH_P2P_FLOWS_FINIT
+ STEP_VSWITCH_P2P_CONNECTIONS_FINIT
},
{
"Name": "vswitch_p2p_back2back",
"Deployment": "clean",
"Description": "vSwitch - configure switch and execute RFC2544 back2back test",
- "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_P2P_CONNECTIONS_INIT +
[
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_back2back', 'bidir' : 'True'}],
] +
- STEP_VSWITCH_P2P_FLOWS_FINIT
+ STEP_VSWITCH_P2P_CONNECTIONS_FINIT
},
{
"Name": "vswitch_p2p_cont",
"Deployment": "clean",
"Description": "vSwitch - configure switch and execute RFC2544 continuous stream test",
- "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_P2P_CONNECTIONS_INIT +
[
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'True'}],
] +
- STEP_VSWITCH_P2P_FLOWS_FINIT
+ STEP_VSWITCH_P2P_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvp",
@@ -545,43 +446,43 @@ INTEGRATION_TESTS = [
"Name": "vswitch_pvp_tput",
"Deployment": "clean",
"Description": "vSwitch - configure switch, vnf and execute RFC2544 throughput test",
- "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVP_CONNECTIONS_INIT +
[
['vnf', 'start'],
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput', 'bidir' : 'True'}],
['vnf', 'stop'],
] +
- STEP_VSWITCH_PVP_FLOWS_FINIT
+ STEP_VSWITCH_PVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvp_back2back",
"Deployment": "clean",
"Description": "vSwitch - configure switch, vnf and execute RFC2544 back2back test",
- "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVP_CONNECTIONS_INIT +
[
['vnf', 'start'],
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_back2back', 'bidir' : 'True'}],
['vnf', 'stop'],
] +
- STEP_VSWITCH_PVP_FLOWS_FINIT
+ STEP_VSWITCH_PVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvp_cont",
"Deployment": "clean",
"Description": "vSwitch - configure switch, vnf and execute RFC2544 continuous stream test",
- "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVP_CONNECTIONS_INIT +
[
['vnf', 'start'],
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'True'}],
['vnf', 'stop'],
] +
- STEP_VSWITCH_PVP_FLOWS_FINIT
+ STEP_VSWITCH_PVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvp_all",
"Deployment": "clean",
"Description": "vSwitch - configure switch, vnf and execute all test types",
- "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVP_CONNECTIONS_INIT +
[
['vnf', 'start'],
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput', 'bidir' : 'True'}],
@@ -589,7 +490,7 @@ INTEGRATION_TESTS = [
['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'True'}],
['vnf', 'stop'],
] +
- STEP_VSWITCH_PVP_FLOWS_FINIT
+ STEP_VSWITCH_PVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvvp",
@@ -608,7 +509,7 @@ INTEGRATION_TESTS = [
"Name": "vswitch_pvvp_tput",
"Deployment": "clean",
"Description": "vSwitch - configure switch, two chained vnfs and execute RFC2544 throughput test",
- "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVVP_CONNECTIONS_INIT +
[
['vnf1', 'start'],
['vnf2', 'start'],
@@ -616,13 +517,13 @@ INTEGRATION_TESTS = [
['vnf1', 'stop'],
['vnf2', 'stop'],
] +
- STEP_VSWITCH_PVVP_FLOWS_FINIT
+ STEP_VSWITCH_PVVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvvp_back2back",
"Deployment": "clean",
"Description": "vSwitch - configure switch, two chained vnfs and execute RFC2544 back2back test",
- "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVVP_CONNECTIONS_INIT +
[
['vnf1', 'start'],
['vnf2', 'start'],
@@ -630,13 +531,13 @@ INTEGRATION_TESTS = [
['vnf1', 'stop'],
['vnf2', 'stop'],
] +
- STEP_VSWITCH_PVVP_FLOWS_FINIT
+ STEP_VSWITCH_PVVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvvp_cont",
"Deployment": "clean",
"Description": "vSwitch - configure switch, two chained vnfs and execute RFC2544 continuous stream test",
- "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVVP_CONNECTIONS_INIT +
[
['vnf1', 'start'],
['vnf2', 'start'],
@@ -644,13 +545,13 @@ INTEGRATION_TESTS = [
['vnf1', 'stop'],
['vnf2', 'stop'],
] +
- STEP_VSWITCH_PVVP_FLOWS_FINIT
+ STEP_VSWITCH_PVVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_pvvp_all",
"Deployment": "clean",
"Description": "vSwitch - configure switch, two chained vnfs and execute all test types",
- "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT +
+ "TestSteps": STEP_VSWITCH_PVVP_CONNECTIONS_INIT +
[
['vnf1', 'start'],
['vnf2', 'start'],
@@ -660,104 +561,48 @@ INTEGRATION_TESTS = [
['vnf1', 'stop'],
['vnf2', 'stop'],
] +
- STEP_VSWITCH_PVVP_FLOWS_FINIT
- },
- {
- "Name": "vswitch_p4vp",
- "Description": "Just configure 4 chained vnfs",
- "Deployment": "clean",
- "TestSteps": STEP_VSWITCH_P4VP_FLOWS_INIT +
- [
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ] +
- STEP_VSWITCH_P4VP_FLOWS_FINIT
+ STEP_VSWITCH_PVVP_CONNECTIONS_FINIT
},
{
"Name": "vswitch_p4vp_tput",
- "Description": "4 chained vnfs, execute RFC2544 throughput test",
- "Deployment": "clean",
- "TestSteps": STEP_VSWITCH_P4VP_FLOWS_INIT +
- [
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput', \
- 'bidir' : 'True'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ] +
- STEP_VSWITCH_P4VP_FLOWS_FINIT
+ "Description": "4 chained vnfs, execute RFC2544 throughput test, deployment pvvp4",
+ "Deployment": "pvvp4",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_throughput",
+ },
+ },
},
{
"Name": "vswitch_p4vp_back2back",
- "Description": "4 chained vnfs, execute RFC2544 back2back test",
- "Deployment": "clean",
- "TestSteps": STEP_VSWITCH_P4VP_FLOWS_INIT +
- [
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_back2back', \
- 'bidir' : 'True'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ] +
- STEP_VSWITCH_P4VP_FLOWS_FINIT
+ "Description": "4 chained vnfs, execute RFC2544 back2back test, deployment pvvp4",
+ "Deployment": "pvvp4",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_back2back",
+ },
+ },
},
{
"Name": "vswitch_p4vp_cont",
- "Description": "4 chained vnfs, execute RFC2544 continuous stream test",
- "Deployment": "clean",
- "TestSteps": STEP_VSWITCH_P4VP_FLOWS_INIT +
- [
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', \
- 'bidir' : 'True'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ] +
- STEP_VSWITCH_P4VP_FLOWS_FINIT
+ "Description": "4 chained vnfs, execute RFC2544 continuous stream test, deployment pvvp4",
+ "Deployment": "pvvp4",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ },
+ },
},
{
"Name": "vswitch_p4vp_all",
- "Description": "4 chained vnfs, execute RFC2544 throughput test",
- "Deployment": "clean",
- "TestSteps": STEP_VSWITCH_P4VP_FLOWS_INIT +
+ "Description": "4 chained vnfs, execute RFC2544 throughput tests, deployment pvvp4",
+ "Deployment": "pvvp4",
+ "TestSteps":
[
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput', \
- 'bidir' : 'True'}],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_back2back', \
- 'bidir' : 'True'}],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', \
- 'bidir' : 'True'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ] +
- STEP_VSWITCH_P4VP_FLOWS_FINIT
+ ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_throughput'}],
+ ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_back2back'}],
+ ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous'}],
+ ]
},
{
# Topology: 2 Parallel PVP connections
@@ -766,25 +611,13 @@ INTEGRATION_TESTS = [
# or add "Parameters" option to the test definition:
# "Parameters" : {'GUEST_LOOPBACK' : ['linux_bridge'],},
"Name": "2pvp_udp_dest_flows",
- "Description": "RFC2544 Continuous TC with 2 Parallel VMs, flows on UDP Dest Port",
- "Deployment": "clean",
+ "Description": "RFC2544 Continuous TC with 2 Parallel VMs, flows on UDP Dest Port, deployment pvpv2",
+ "Deployment": "pvpv2",
"Parameters" : {
"TRAFFIC" : {
- "multistream" : 2,
- "stream_type" : "L4",
+ "traffic_type" : "rfc2544_continuous",
},
},
- "TestSteps": STEP_VSWITCH_2PHY_2VM_INIT +
- STEP_VSWITCH_2_PARALLEL_VM_FLOWS_INIT + [
- # Start 2 VMs
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'False'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- # Clean up
- ] + STEP_VSWITCH_FLOWS_FINIT +
- STEP_VSWITCH_2PHY_2VM_FINIT
},
{
# Topology: 4 Parallel PVP connections
@@ -793,29 +626,13 @@ INTEGRATION_TESTS = [
# or add "Parameters" option to the test definition:
# "Parameters" : {'GUEST_LOOPBACK' : ['linux_bridge'],},
"Name": "4pvp_udp_dest_flows",
- "Description": "RFC2544 Continuous TC with 4 Parallel VMs, flows on UDP Dest Port",
- "Deployment": "clean",
+ "Description": "RFC2544 Continuous TC with 4 Parallel VMs, flows on UDP Dest Port, deployment pvpv4",
+ "Deployment": "pvpv4",
"Parameters" : {
"TRAFFIC" : {
- "multistream" : 4,
- "stream_type" : "L4",
+ "traffic_type" : "rfc2544_continuous",
},
},
- "TestSteps": STEP_VSWITCH_2PHY_4VM_INIT +
- STEP_VSWITCH_4_PARALLEL_VM_FLOWS_INIT + [
- # Start 4 VMs
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'False'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- # Clean up
- ] + STEP_VSWITCH_FLOWS_FINIT +
- STEP_VSWITCH_2PHY_4VM_FINIT
},
{
# Topology: 6 Parallel PVP connections
@@ -824,32 +641,13 @@ INTEGRATION_TESTS = [
# or add "Parameters" option to the test definition:
# "Parameters" : {'GUEST_LOOPBACK' : ['linux_bridge'],},
"Name": "6pvp_udp_dest_flows",
- "Description": "RFC2544 Continuous TC with 6 Parallel VMs, flows on UDP Dest Port",
- "Deployment": "clean",
+ "Description": "RFC2544 Continuous TC with 6 Parallel VMs, flows on UDP Dest Port, deployment pvpv6",
+ "Deployment": "pvpv6",
"Parameters" : {
"TRAFFIC" : {
- "multistream" : 6,
- "stream_type" : "L4",
+ "traffic_type" : "rfc2544_continuous",
},
},
- "TestSteps": STEP_VSWITCH_2PHY_6VM_INIT +
- STEP_VSWITCH_6_PARALLEL_VM_FLOWS_INIT + [
- # Start VMs
- ['vnf1', 'start'],
- ['vnf2', 'start'],
- ['vnf3', 'start'],
- ['vnf4', 'start'],
- ['vnf5', 'start'],
- ['vnf6', 'start'],
- ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'False'}],
- ['vnf1', 'stop'],
- ['vnf2', 'stop'],
- ['vnf3', 'stop'],
- ['vnf4', 'stop'],
- ['vnf5', 'stop'],
- ['vnf6', 'stop'],
- ] + STEP_VSWITCH_FLOWS_FINIT +
- STEP_VSWITCH_2PHY_6VM_FINIT
},
{
# Testcase for verification of vHost User NUMA awareness feature
@@ -979,23 +777,6 @@ INTEGRATION_TESTS = [
['tools', 'assert', 'len(#STEP[-1][0])'],
]
},
- {
- "Name": "vswitch_vports_add_del_connection_vpp",
- "Deployment": "clean",
- "Description": "VPP: vSwitch - configure switch with vports, add and delete connection",
- "vSwitch" : "VppDpdkVhost",
- "TestSteps": [
- ['vswitch', 'add_switch', 'int_br0'],
- ['vswitch', 'add_vport', 'int_br0'],
- ['vswitch', 'add_vport', 'int_br0'],
- ['vswitch', 'add_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]', True],
- ['vswitch', 'dump_connections', 'int_br0'],
- ['vswitch', 'del_connection', 'int_br0', '#STEP[1][0]', '#STEP[2][0]', True],
- ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'],
- ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
- },
#
# END of VPP tests used by VERIFY and MERGE jobs by OPNFV Jenkins
#
@@ -1027,9 +808,9 @@ INTEGRATION_TESTS = [
},
"TestSteps": [
# replace original flows with vlan ID modification
- ['!vswitch', 'add_flow', 'br0', {'in_port': '1', 'actions': ['mod_vlan_vid:4','output:3']}],
- ['!vswitch', 'add_flow', 'br0', {'in_port': '2', 'actions': ['mod_vlan_vid:4','output:4']}],
- ['vswitch', 'dump_flows', 'br0'],
+ ['!vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '1', 'actions': ['mod_vlan_vid:4','output:3']}],
+ ['!vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '2', 'actions': ['mod_vlan_vid:4','output:4']}],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
# verify that received frames have modified vlan ID
['VNF0', 'execute_and_wait', 'tcpdump -i eth0 -c 5 -w dump.pcap vlan 4 &'],
['trafficgen', 'send_traffic',{}],
@@ -1055,14 +836,14 @@ _CAPTURE_P2P2P_SETUP = [
# create and configure two bridges to forward traffic through NIC under
# the test and back to the traffic generator
# 1st bridge:
- ['vswitch', 'add_switch', 'br0'],
+ ['vswitch', 'add_switch', '$VSWITCH_BRIDGE_NAME'],
['tools', 'exec_shell', 'sudo ip addr flush dev $NICS[0]["device"]'],
['tools', 'exec_shell', 'sudo ip link set dev $NICS[0]["device"] up'],
- ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br0 $NICS[0]["device"]'],
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port $VSWITCH_BRIDGE_NAME $NICS[0]["device"]'],
['tools', 'exec_shell', 'sudo $TOOLS["bind-tool"] --bind igb_uio $NICS[3]["pci"]'],
- ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br0 dpdk0 -- '
+ ['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port $VSWITCH_BRIDGE_NAME dpdk0 -- '
'set Interface dpdk0 type=dpdk options:dpdk-devargs=$NICS[3]["pci"]'],
- ['tools', 'exec_shell', '$TOOLS["ovs-ofctl"] add-flow br0 in_port=1,action='
+ ['tools', 'exec_shell', '$TOOLS["ovs-ofctl"] add-flow $VSWITCH_BRIDGE_NAME in_port=1,action='
'$_CAPTURE_P2P2P_OVS_ACTION,output:2'],
# 2nd bridge:
['vswitch', 'add_switch', 'br1'],
@@ -1074,7 +855,7 @@ _CAPTURE_P2P2P_SETUP = [
['tools', 'exec_shell', '$TOOLS["ovs-vsctl"] add-port br1 $NICS[1]["device"]'],
['vswitch', 'add_flow', 'br1', {'in_port': '1', 'actions': ['output:2']}],
# log flow details
- ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
['vswitch', 'dump_flows', 'br1'],
]
INTEGRATION_TESTS += [
@@ -1108,7 +889,7 @@ INTEGRATION_TESTS += [
['tools', 'exec_shell_background', 'tcpdump -i $NICS[2]["device"] -c 5 -w capture.pcap '
'ether src $TRAFFIC["l2"]["srcmac"]'],
['trafficgen', 'send_traffic', {}],
- ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
['vswitch', 'dump_flows', 'br1'],
# there must be 5 captured frames...
['tools', 'exec_shell', 'tcpdump -r capture.pcap | wc -l', '|^(\d+)$'],
@@ -1118,6 +899,45 @@ INTEGRATION_TESTS += [
['tools', 'assert', '#STEP[-1][0] == 0'],
],
},
+ # Capture Example 3 - Traffic capture by traffic generator.
+ # This TestCase uses OVS flow to add VLAN tag with given ID into every
+ # frame send by traffic generator. Correct frame modificaiton is verified by
+ # inspection of packet capture received by T-Rex.
+ {
+ "Name": "capture_p2p_add_vlan_ovs_trex",
+ "Deployment": "clean",
+ "Description": "OVS: Test VLAN tag modification and verify it by traffic capture",
+ "vSwitch" : "OvsDpdkVhost", # works also for Vanilla OVS
+ "Parameters" : {
+ "TRAFFICGEN" : "Trex",
+ "TRAFFICGEN_TREX_LEARNING_MODE" : True,
+ "TRAFFIC" : {
+ "traffic_type" : "burst",
+ "frame_rate" : 100,
+ "burst_size" : 5,
+ # enable capture of five RX frames
+ 'capture': {
+ 'enabled': True,
+ 'tx_ports' : [],
+ 'rx_ports' : [1],
+ 'count' : 5,
+ },
+ },
+ },
+ "TestSteps" : STEP_VSWITCH_P2P_INIT + [
+ # replace standard L2 flows by flows, which will add VLAN tag with ID 3
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '1', 'actions': ['mod_vlan_vid:3','output:2']}],
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '2', 'actions': ['mod_vlan_vid:3','output:1']}],
+ ['vswitch', 'dump_flows', 'int_br0'],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ # verify that captured frames have vlan tag with ID 3
+ ['tools', 'exec_shell', 'tcpdump -qer $RESULTS_PATH/#STEP[-1][0]["capture_rx"] vlan 3 '
+ '2>/dev/null | wc -l', '|^(\d+)$'],
+ # number of received frames with expected VLAN id must match the number of captured frames
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ] + STEP_VSWITCH_P2P_FINIT,
+ },
#
# End of examples of functional testcases with traffic capture validation
#
@@ -1132,11 +952,11 @@ INTEGRATION_TESTS += [
# "VNF" : "QemuVirtioNet",
# "Trafficgen": "IxNet",
# "Parameters": {"GUEST_LOOPBACK" : ["linux_bridge"],},
-# "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
+# "TestSteps": STEP_VSWITCH_PVP_CONNECTIONS_INIT +
# [
# ['vnf', 'start'],
# ['trafficgen', 'send_traffic', {'traffic_type' : 'rfc2544_continuous', 'bidir' : 'True'}],
# ['vnf', 'stop'],
# ] +
-# STEP_VSWITCH_PVP_FLOWS_FINIT
+# STEP_VSWITCH_PVP_CONNECTIONS_FINIT
# },
diff --git a/conf/integration/01a_testcases_l34_vxlan.conf b/conf/integration/01a_testcases_l34_vxlan.conf
index 17c0d6ff..b42a14d1 100644
--- a/conf/integration/01a_testcases_l34_vxlan.conf
+++ b/conf/integration/01a_testcases_l34_vxlan.conf
@@ -1,4 +1,4 @@
-# Copyright 2017 Intel Corporation.
+# Copyright 2017-2018 Intel Corporation and Tieto.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -63,7 +63,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'cmds.close()'],
['tools', 'exec_shell', "sudo $TOOLS['ovs-ofctl'] -O OpenFlow13 --bundle add-flows int_br0 /tmp/ovsofctl_cmds.txt"],
['trafficgen', 'send_traffic', {}],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_P2P_FINIT
},
@@ -93,7 +92,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0' }],
['trafficgen', 'send_traffic', {}],
['vswitch', 'dump_flows', 'int_br0'],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_P2P_FINIT
},
@@ -130,7 +128,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['trafficgen', 'send_traffic', {}],
['vswitch', 'dump_flows', 'int_br0'],
['vnf', 'stop'],
- ['vswitch', 'del_flow', 'int_br0'],
] + STEP_VSWITCH_PVP_FINIT
},
{
@@ -172,7 +169,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['vswitch', 'dump_flows', 'int_br0'],
['vnf2', 'stop'],
['vnf1', 'stop'],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_PVVP_FINIT
},
@@ -213,7 +209,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['tools', 'exec_shell', "sudo $TOOLS['ovs-ofctl'] -O OpenFlow13 --bundle "
"add-flows int_br0 /tmp/ovsofctl_cmds.txt"],
['trafficgen', 'send_traffic', {}],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_P2P_FINIT
},
@@ -249,7 +244,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}],
['trafficgen', 'send_traffic', {}],
['vswitch', 'dump_flows', 'int_br0'],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_P2P_FINIT
},
@@ -293,7 +287,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['trafficgen', 'send_traffic', {}],
['vswitch', 'dump_flows', 'int_br0'],
['vnf', 'stop'],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_PVP_FINIT
},
@@ -344,7 +337,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['vswitch', 'dump_flows', 'int_br0'],
['vnf2', 'stop'],
['vnf1', 'stop'],
- ['vswitch', 'del_flow', 'int_br0'],
] +
STEP_VSWITCH_PVVP_FINIT
},
@@ -888,13 +880,13 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
},
"TestSteps": [
- ['vswitch', 'del_flow', 'br0', {'in_port': '1'}],
- ['vswitch', 'add_flow', 'br0',
+ ['vswitch', 'del_flow', '$TUNNEL_INTEGRATION_BRIDGE', {'in_port': '1'}],
+ ['vswitch', 'add_flow', '$TUNNEL_INTEGRATION_BRIDGE',
{'in_port': '1', 'dl_type': '0x800', 'nw_proto': '17',
'nw_dst': '$TRAFFICGEN_PORT2_IP/8', 'actions': ['output:2'],
'idle_timeout': '0'}],
- ['vswitch', 'dump_flows', 'br0'],
- ['vswitch', 'dump_flows', 'br-ext'],
+ ['vswitch', 'dump_flows', '$TUNNEL_INTEGRATION_BRIDGE'],
+ ['vswitch', 'dump_flows', '$TUNNEL_EXTERNAL_BRIDGE'],
],
},
{
diff --git a/conf/integration/01b_dpdk_regression_tests.conf b/conf/integration/01b_dpdk_regression_tests.conf
index 2e63d677..44343d28 100644
--- a/conf/integration/01b_dpdk_regression_tests.conf
+++ b/conf/integration/01b_dpdk_regression_tests.conf
@@ -1,4 +1,4 @@
-# Copyright 2017 Intel Corporation.
+# Copyright 2017-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,6 +21,10 @@
# Generic configuration used by OVSDPDK testcases
#
############################################################
+
+# required to import path to the log file
+from conf import settings
+
_OVSDPDK_1st_PMD_CORE = 4
_OVSDPDK_2nd_PMD_CORE = 5
# calculate PMD mask from core IDs configured above
@@ -32,8 +36,11 @@ _OVSDPDK_GUEST_5_CORES = [('7', '8', '9', '10', '11')]
# number of queues configured in OVS and GUEST
_OVSDPDK_MQ = '2'
-# Path to the log file
-_OVSDPDK_VSWITCH_LOG = os.path.join(LOG_DIR, LOG_FILE_VSWITCHD)
+# path to the log file
+_RESULTS_PATH = settings.getValue('RESULTS_PATH')
+name, ext = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
+log_file = "{name}_{uid}{ex}".format(name=name,uid=settings.getValue('LOG_TIMESTAMP'),ex=ext)
+_OVSDPDK_VSWITCH_LOG = os.path.join(_RESULTS_PATH, log_file)
_OVSDPDK_HEADER_LEN = 18 # length of frame headers in bytes, it's used for calculation
# of payload size, i.e. payload = frame_size - header_len
@@ -170,62 +177,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
'|Error attaching device.*$NICS[0]["pci"]'],
['tools', 'assert', 'not len(#STEP[-1])'],
- # clean up
- ['vswitch', 'del_port', 'int_br0', '#STEP[port2][0]'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
- },
- {
- # Support of netdev-dpdk/detach has been removed from OVS, so testcase will fail with recent
- # OVS/DPDK versions. There is an ongoing discussion about possible support of netdev-dpdk/detach
- # in the future OVS versions.
- # Test has been tested with:
- # OVS_TAG = 03d6399e618e4136c5da0be2b6f18f0b7d75b2bb
- # DPDK_TAG = v16.11
- "Name": "ovsdpdk_hotplug_detach",
- "Deployment": "clean",
- "Description": "Same as ovsdpdk_hotplug_attach, but delete and detach the device after the hotplug. "
- "Note: Support of netdev-dpdk/detach has been removed from OVS, so testcase will fail "
- "with recent OVS/DPDK versions.",
- "vSwitch" : "OvsDpdkVhost",
- "Parameters" : {
- # suppress DPDK configuration, so physical interfaces are not bound to DPDK driver
- 'WHITELIST_NICS' : [],
- 'NICS' : [],
- },
- "TestSteps": [
- # check if OVS supports netdev-dpdk/detach, fail otherwise
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] list-commands', '|netdev-dpdk\/detach'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # restore original NICS configuration, so we can use add/del_phy_port
- ['settings', 'setValue', 'TEST_PARAMS', ''],
- # find out which DPDK driver is being used; it should be the last configured
- # DPDK module; optional path and .ko suffix must be removed
- ['tools', 'eval', '\'$TOOLS["dpdk_modules"][-1]\'.split("/")[-1].split(".")[0]'],
- # bind NIC to DPDK driver
- ['tools', 'exec_shell', 'sudo $TOOLS["bind-tool"] --bind #STEP[-1] $NICS[0]["pci"]'],
- # and check that DPDK port can be created without errors
- ['vswitch', 'add_switch', 'int_br0'],
- ['#port', 'vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|Error attaching device.*$NICS[0]["pci"]'],
- ['tools', 'assert', 'not len(#STEP[-1])'],
- # try to unbind port - should fail beause it is being used
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] netdev-dpdk/detach $NICS[0]["pci"] 2>&1; exit 0',
- '|Device.*$NICS[0]["pci"].*is being used by interface'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # delete port and unbind it - should succeed
- ['vswitch', 'del_port', 'int_br0', '#STEP[port][0]'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] netdev-dpdk/detach $NICS[0]["pci"]',
- '|Device.*$NICS[0]["pci"].*has been detached'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # try to add port again
- ['vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|Error attaching device.*$NICS[0]["pci"]'],
- # it will work because auto attach was implemented into OVS
- ['tools', 'assert', 'not len(#STEP[-1])'],
- ['vswitch', 'del_switch', 'int_br0'],
]
},
]
@@ -344,14 +295,14 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# frame loss with small packets should be detected
['tools', 'assert', '#STEP[-1][0]["frame_loss_percent"] > 10'],
# delete phy ports so they can be created with flow control
- ['vswitch', 'del_flow', 'br0', {}],
- ['vswitch', 'del_port', 'br0', 'dpdk0'],
- ['vswitch', 'del_port', 'br0', 'dpdk1'],
+ ['vswitch', 'del_flow', '$VSWITCH_BRIDGE_NAME', {}],
+ ['vswitch', 'del_port', '$VSWITCH_BRIDGE_NAME', 'dpdk0'],
+ ['vswitch', 'del_port', '$VSWITCH_BRIDGE_NAME', 'dpdk1'],
# turn on flow control
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port br0 dpdk0 -- set Interface dpdk0 type=dpdk options:dpdk-devargs=$NICS[0]["pci"] options:rx-flow-ctrl=true'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port br0 dpdk1 -- set Interface dpdk1 type=dpdk options:dpdk-devargs=$NICS[1]["pci"] options:rx-flow-ctrl=true'],
- ['vswitch', 'add_flow', 'br0', {'in_port': '3', 'actions': ['output:4'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '4', 'actions': ['output:3'], 'idle_timeout': '0'}],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port $VSWITCH_BRIDGE_NAME dpdk0 -- set Interface dpdk0 type=dpdk options:dpdk-devargs=$NICS[0]["pci"] options:rx-flow-ctrl=true'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port $VSWITCH_BRIDGE_NAME dpdk1 -- set Interface dpdk1 type=dpdk options:dpdk-devargs=$NICS[1]["pci"] options:rx-flow-ctrl=true'],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '3', 'actions': ['output:4'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '4', 'actions': ['output:3'], 'idle_timeout': '0'}],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show'],
['trafficgen', 'send_traffic', {}],
['trafficgen', 'get_results'],
@@ -413,7 +364,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"VSWITCH_DPDK_MULTI_QUEUES" : _OVSDPDK_MQ,
},
"TestSteps": [
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk[01]\s+queue-id: \d+'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk[01]\s+queue-id:\s+\d+'],
# check that requested nr of queues was created on both NICs
['tools', 'assert', 'len(#STEP[-1])=={}'.format(int(_OVSDPDK_MQ)*2)],
]
@@ -435,8 +386,10 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
"TestSteps": [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] -- set Interface dpdk0 other_config:pmd-rxq-affinity="0:{},1:{}"'.format(_OVSDPDK_1st_PMD_CORE, _OVSDPDK_1st_PMD_CORE)],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 0 1'],
- ['tools', 'assert', 'len(#STEP[-1])==1'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
]
},
{
@@ -456,8 +409,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
"TestSteps": [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] -- set Interface dpdk0 other_config:pmd-rxq-affinity="0:{},1:{}"'.format(_OVSDPDK_1st_PMD_CORE, _OVSDPDK_2nd_PMD_CORE)],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 0$'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 1$'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+1'],
['tools', 'assert', 'len(#STEP[-2])==1'],
['tools', 'assert', 'len(#STEP[-2])==1'],
]
@@ -478,12 +431,15 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": STEP_VSWITCH_PVP_INIT +
[
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdkvhostuserclient0\s+queue-id: \d'],
+ '|dpdkvhostuserclient0\s+queue-id:\s+\d'],
['tools', 'assert', 'len(#STEP[-1])==1'],
['vnf', 'start'],
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdkvhostuserclient0\s+queue-id: 0 1'],
- ['tools', 'assert', 'len(#STEP[-1])==1'],
+ '|dpdkvhostuserclient0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
+ '|dpdkvhostuserclient0\s+queue-id:\s+1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
['vnf', 'stop'],
] +
STEP_VSWITCH_PVP_FINIT
@@ -503,11 +459,12 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TRAFFICGEN_DURATION" : 5,
"TRAFFICGEN" : "IxNet",
"TRAFFIC" : {
- "bidir" : "false",
+ "bidir" : "False",
"traffic_type" : "rfc2544_continuous",
"multistream" : 6,
"stream_type" : "L3",
"frame_rate" : 1,
+ "learning_frames" : False,
'l2': {
'srcmac': "00:00:07:00:0E:00",
'dstmac': "00:00:00:00:00:01"
@@ -517,7 +474,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'proto': 'udp',
'srcip': '6.6.6.6',
'dstip': '1.1.1.1',
- },
+ }
}
},
"TestSteps": STEP_VSWITCH_PVP_INIT + [
@@ -528,6 +485,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# so send_traffic() will end with success
['vswitch', 'add_flow', 'int_br0',
{'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'priority' : '0', 'actions' : ['NORMAL']}],
['vnf', 'start'],
# configure two channels, so multiple cores could be used
['vnf', 'execute_and_wait', 'ethtool -L eth0 combined 2'],
@@ -552,7 +510,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['vnf', 'execute_and_wait', 'route add default gw 1.1.1.5 eth0'],
['vnf', 'execute_and_wait', 'arp -s 1.1.1.5 DE:AD:BE:EF:CA:FC'],
['vnf', 'execute_and_wait', 'ip a'],
-
['trafficgen', 'send_traffic',{}],
# check interrupts to verify that traffic was corectly dispatched...
['#result', 'vnf', 'execute_and_wait', 'cat /proc/interrupts',
@@ -583,16 +540,15 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# there must be separate CPU for each of RX/TX queues
"GUEST_SMP" : ['5'],
"GUEST_TESTPMD_PARAMS" : ['-c 0x1F -n 4 --socket-mem 512 -- '
- '--burst=64 -i --txqflags=0xf00 --nb-cores=4 '
- # map queue stats to separate regs to verify MQ functionality
- '--rx-queue-stats-mapping=\(0,0,0\),\(0,1,1\),\(1,0,2\),\(1,1,3\) '
- '--tx-queue-stats-mapping=\(0,0,4\),\(0,1,5\),\(1,0,6\),\(1,1,7\) '
- '--disable-hw-vlan --rxq=2 --txq=2'],
+ '--burst=64 -i --nb-cores=4 '
+ '--rxq=2 --txq=2'],
"TRAFFICGEN_DURATION" : 5,
"TRAFFIC" : {
"traffic_type" : "rfc2544_continuous",
"multistream" : 3,
"stream_type" : "L3",
+ "frame_rate" : 1,
+ "learning_frames" : False,
'l3': {
'enabled': True,
'proto': 'udp',
@@ -600,12 +556,21 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'dstip': '1.1.1.1',
},
},
+ "GUEST_QUEUE_STATS_MAPPING" : ["rx 0 0 0",
+ "rx 0 1 1",
+ "rx 1 0 2",
+ "rx 1 1 3",
+ "tx 0 0 4",
+ "tx 0 1 5",
+ "tx 1 0 6",
+ "tx 1 1 7"
+ ]
},
"TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
[
['vnf', 'start'],
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdk\w+\s+queue-id: \d'],
+ '|dpdk\w+\s+queue-id:\s+\d'],
# there must be two standalone queue records for every interface (2x4)
['tools', 'assert', 'len(#STEP[-1])==8'],
['trafficgen', 'send_traffic', {}],
@@ -671,15 +636,17 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": [
# send traffic to verify correct PVP configuration
['trafficgen', 'send_traffic', {}],
- ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
# restart vswitchd, ovsdb is kept untouched, so ovs configuration
# (except flows) will be restored
['vswitch', 'restart'],
- ['vswitch', 'del_flow', 'br0'],
- ['vswitch', 'add_flow', 'br0', {'in_port': '1', 'actions': ['output:3'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '3', 'actions': ['output:1'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '2', 'actions': ['output:4'], 'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '4', 'actions': ['output:2'], 'idle_timeout': '0'}],
+ ['vswitch', 'del_flow', '$VSWITCH_BRIDGE_NAME', {}],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '1', 'actions': ['output:3'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '3', 'actions': ['output:1'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '2', 'actions': ['output:4'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '4', 'actions': ['output:2'], 'idle_timeout': '0'}],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
# send traffic to verify that OVS works correctly after restart
['trafficgen', 'send_traffic', {}],
],
@@ -718,13 +685,13 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"VSWITCH_VHOSTUSER_SERVER_MODE" : True,
},
"TestSteps": [
- ['vswitch', 'add_switch', 'br0'],
+ ['vswitch', 'add_switch', '$VSWITCH_BRIDGE_NAME'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Open_vSwitch . '
'other_config:vhost-sock-dir=test_dir'],
# enforce vswitchd to read new configuration
['vswitch', 'restart'],
['tools', 'exec_shell', 'sudo mkdir $TOOLS["ovs_var_tmp"]/test_dir'],
- ['vswitch', 'add_vport', 'br0'],
+ ['vswitch', 'add_vport', '$VSWITCH_BRIDGE_NAME'],
['tools', 'exec_shell', 'ls -1 $TOOLS["ovs_var_tmp"]/test_dir',
'|dpdkvhostuser0'],
['tools', 'assert', 'len(#STEP[-1])'],
@@ -740,7 +707,7 @@ _OVSDPDK_VDEV_ADD_NULL = [
['vswitch', 'add_switch', 'int_br0'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port int_br0 null0 -- '
'set Interface null0 type=dpdk options:dpdk-devargs=eth_null0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show', '|dpdk-devargs=\S+eth_null0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show', '|dpdk-devargs=eth_null0'],
['tools', 'assert', 'len(#STEP[-1])==1'],
]
@@ -777,7 +744,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": _OVSDPDK_VDEV_ADD_NULL + [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] del-port null0'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|dpdk-devargs=\S+eth_null0'],
+ '|dpdk-devargs=eth_null0'],
['tools', 'assert', 'not len(#STEP[-1])'],
]
},
@@ -1018,9 +985,9 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9710'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9702'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdk0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
# get line number of next log file entry
['tools', 'exec_shell', 'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9711'],
@@ -1028,7 +995,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['tools', 'exec_shell', "sed -n '#STEP[-2][0],$ p' $_OVSDPDK_VSWITCH_LOG",
'|unsupported MTU 9711'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdk0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
]
},
{
@@ -1042,9 +1009,9 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_vport', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9710'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9702'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdkvhostuserclient0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
# get line number of next log file entry
['tools', 'exec_shell', 'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9711'],
@@ -1053,7 +1020,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'|unsupported MTU 9711'],
['tools', 'assert', 'len(#STEP[-1])'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdkvhostuserclient0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
]
},
{
@@ -1201,8 +1168,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['trafficgen', 'get_results'],
# all traffic should pass through (i.e. 0% frame loss)
['tools', 'assert', 'float(#STEP[-1][0]["frame_loss_percent"])==0'],
- # set packetsize to 9019 and send traffic
- ['settings', 'setValue', 'TRAFFICGEN_PKT_SIZES', (9019,)],
+ # set packetsize to 9702 and send traffic
+ ['settings', 'setValue', 'TRAFFICGEN_PKT_SIZES', (9702,)],
# disable verification of send_traffic "!" prefix, otherwise vsperf
# will fail when 100% packet loss is detected
['!trafficgen', 'send_traffic', {}],
@@ -1245,10 +1212,6 @@ _OVSDPDK_RATE_set_rate_limiter = [
'set Interface $_OVSDPDK_RATE_PORT$_OVSDPDK_RATE_NICID '
'ingress_policing_burst=$_OVSDPDK_RATE_BURST '
'ingress_policing_rate=$_OVSDPDK_RATE_RATE'],
- # check vswitchd log file, that rate limiter was created
- ['tools', 'exec_shell', "sed -n '#STEP[-2][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- '|CIR period'],
- ['tools', 'assert', '("CIR period" in #STEP[-1])==$_OVSDPDK_RATE_LIMITER_CREATED'],
# verify that interface has correct rate limiter configuration
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] '
'list interface $_OVSDPDK_RATE_PORT$_OVSDPDK_RATE_NICID',
@@ -1297,7 +1260,7 @@ _OVSDPDK_RATE_confirm_multiple_rate_limit_setup = \
# check that traffic rate is no longer limited
['trafficgen', 'get_results'],
['tools', 'assert', 'int(#STEP[-1][0]["throughput_rx_mbps"])>500'],
- ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
]
INTEGRATION_TESTS = INTEGRATION_TESTS + [
@@ -1492,11 +1455,6 @@ _OVSDPDK_QOS_set_qos = [
'other-config:cbs=$_OVSDPDK_QOS_CBS','|\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'],
['tools', 'assert', 'len(#STEP[-1])==1'],
- # Check the OVS logs
- ['tools', 'exec_shell', "sed -n '#STEP[-3][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- '|CIR period'],
- ['tools', 'assert', '"CIR period" in #STEP[-1]'],
-
# Check the QoS policy and attributes
['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] -t ovs-vswitchd qos/show '
'$_OVSDPDK_QOS_PORT$_OVSDPDK_QOS_NICID', '.+'],
@@ -1603,8 +1561,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"Parameters" : {},
"TestSteps": [
# Setup switch,port and logs
- ['vswitch', 'add_switch', 'br0'],
- ['vswitch', 'add_vport', 'br0'],
+ ['vswitch', 'add_switch', '$VSWITCH_BRIDGE_NAME'],
+ ['vswitch', 'add_vport', '$VSWITCH_BRIDGE_NAME'],
['#LOG_MARK', 'tools', 'exec_shell',
'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
@@ -1616,7 +1574,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# Check the OVS logs
['tools', 'exec_shell', "sed -n '#STEP[LOG_MARK][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- 'Failed to set QoS type egress-policer on port #STEP[1][0]: No such file or directory'],
+ 'Failed to set QoS type egress-policer on port #STEP[1][0]: Invalid argument'],
['tools', 'assert', 'len(#STEP[-1])==1'],
# Check the attributes for vhost0
@@ -1633,8 +1591,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"Parameters" : {},
"TestSteps": [
# Setup switch,port and logs
- ['vswitch', 'add_switch', 'br0'],
- ['vswitch', 'add_vport', 'br0'],
+ ['vswitch', 'add_switch', '$VSWITCH_BRIDGE_NAME'],
+ ['vswitch', 'add_vport', '$VSWITCH_BRIDGE_NAME'],
['#LOG_MARK', 'tools', 'exec_shell',
'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
@@ -1646,7 +1604,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# Check the OVS logs
['tools', 'exec_shell', "sed -n '#STEP[LOG_MARK][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- 'Failed to set QoS type egress-policer on port #STEP[1][0]: No such file or directory'],
+ 'Failed to set QoS type egress-policer on port #STEP[1][0]: Invalid argument'],
['tools', 'assert', 'len(#STEP[-1])==1'],
# Check the attributes for vhost0
@@ -1686,4 +1644,72 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
"TestSteps": _OVSDPDK_QOS_confirm_multiple_qos_setup
},
+ ############################################################
+ #
+ # Custom statistics
+ #
+ ############################################################
+ {
+ "Name": "ovsdpdk_custstat_check",
+ "Deployment": "clean",
+ "Description": "Test if custom statistics are supported.",
+ "vSwitch" : "OvsDpdkVhost",
+ "TestSteps": [
+ # enable custom statistics
+ ['vswitch', 'add_switch', 'int_br0', [
+ 'protocols=OpenFlow10,OpenFlow11,OpenFlow12,'
+ 'OpenFlow13,OpenFlow14,OpenFlow15']],
+ ['#port', 'vswitch', 'add_phy_port', 'int_br0'],
+ # check that custom statistics are available for given interface
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-ofctl"] -O OpenFlow14 '
+ 'dump-ports int_br0 #STEP[port][1]',
+ '|CUSTOM Statistics'],
+ ['tools', 'assert', 'len(#STEP[-1])'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[port][0]'],
+ ['vswitch', 'del_switch', 'int_br0'],
+ ]
+ },
+ {
+ "Name": "ovsdpdk_custstat_rx_error",
+ "Deployment": "clean",
+ "Description": "Test bad ethernet CRC counter 'rx_crc_errors' exposed by custom statistics.",
+ "vSwitch" : "OvsDpdkVhost",
+ "Parameters" : {
+ "OVS_OFCTL_ARGS" : [],
+ "TRAFFICGEN" : "IxNet",
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ "frame_rate" : 10,
+ },
+ "TRAFFICGEN_DURATION" : 10,
+ "TRAFFICGEN_IXNET_TCL_SCRIPT" : "ixnetrfc2544_bad_l2_crc.tcl",
+ },
+ "TestSteps": [
+ # enable custom statistics
+ ['vswitch', 'add_switch', 'int_br0', [
+ 'protocols=OpenFlow10,OpenFlow11,OpenFlow12,'
+ 'OpenFlow13,OpenFlow14,OpenFlow15']],
+ ['#port1', 'vswitch', 'add_phy_port', 'int_br0'],
+ ['#port2', 'vswitch', 'add_phy_port', 'int_br0'],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '1', 'actions': ['output:2']}],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '2', 'actions': ['output:1']}],
+ ['#crc_old', 'tools', 'exec_shell', 'sudo $TOOLS["ovs-ofctl"] -O OpenFlow14 '
+ 'dump-ports int_br0 #STEP[port1][1]',
+ '|rx_crc_errors=(\d+)'],
+ # frames will be dropped by NIC, so we have to suppress send_traffic validation
+ # to avoid test failure
+ ['!trafficgen', 'send_traffic', {}],
+ # check that custom statistics are available for given interface
+ ['#crc_new', 'tools', 'exec_shell', 'sudo $TOOLS["ovs-ofctl"] -O OpenFlow14 '
+ 'dump-ports int_br0 #STEP[port1][1]',
+ '|rx_crc_errors=(\d+)'],
+ ['tools', 'assert', '#STEP[crc_new] > #STEP[crc_old]'],
+ # tear down the environment
+ ['vswitch', 'dump_flows', 'int_br0'],
+ ['vswitch', 'del_flow', 'int_br0', {}],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[port1][0]'],
+ ['vswitch', 'del_port', 'int_br0', '#STEP[port2][0]'],
+ ['vswitch', 'del_switch', 'int_br0'],
+ ]
+ },
]
diff --git a/conf/integration/01c_trex_vm_tests.conf b/conf/integration/01c_trex_vm_tests.conf
new file mode 100644
index 00000000..1bec4efd
--- /dev/null
+++ b/conf/integration/01c_trex_vm_tests.conf
@@ -0,0 +1,182 @@
+# Copyright 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Testcases in this files use T-Rex running in VM as a traffic generator.
+#
+
+# A set of options passed to the T-Rex stateless server
+_TREX_OPTIONS="--no-scapy-server --no-watchdog --nc"
+_TREX_SERVER_IP="192.168.35.2"
+_TREX_BRIDGE_IP="192.168.35.1"
+_TREX_IP_MASK="24"
+
+# Macro for initialization of T-Rex VM and execution of T-Rex server
+# NOTE: It is expected that T-Rex will run in the LAST VM!
+TREX_VM_INIT = [
+ # configure T-Rex ports, which will be used for traffic generation
+ ['#trex_p1', 'vswitch', 'add_vport', 'int_br0'],
+ ['#trex_p2', 'vswitch', 'add_vport', 'int_br0'],
+ # configure IP access to T-Rex VM
+ ['vswitch', 'add_switch', 'trex_br'],
+ ['vswitch', 'add_flow', 'trex_br', {'actions': ['NORMAL']}], # turn on MAC learning mode
+ ['#trex_admin', 'vswitch', 'add_vport', 'trex_br'],
+ ['#trex_spare', 'vswitch', 'add_vport', 'trex_br'], # spare to have even number of NICs
+ ['tools', 'exec_shell', 'sudo ip addr flush dev trex_br'],
+ ['tools', 'exec_shell', 'sudo ip addr add $_TREX_BRIDGE_IP/$_TREX_IP_MASK dev trex_br'],
+ ['tools', 'exec_shell', 'sudo ip link set dev trex_br up'],
+ ['vnf_trex', 'start'],
+ ['vnf_trex', 'execute_and_wait', 'sudo ip addr flush dev eth2'],
+ ['vnf_trex', 'execute_and_wait', 'sudo ip addr add $_TREX_SERVER_IP/$_TREX_IP_MASK dev eth2'],
+ ['vnf_trex', 'execute_and_wait', 'sudo ip link set dev eth2 up'],
+ # preapre system for T-Rex execution
+ ['vnf_trex', 'execute_and_wait', 'sysctl vm.nr_hugepages=$GUEST_HUGEPAGES_NR[-1]'],
+ ['vnf_trex', 'execute_and_wait', 'mkdir -p /dev/hugepages'],
+ ['vnf_trex', 'execute_and_wait', 'mount -t hugetlbfs hugetlbfs /dev/hugepages'],
+ ['vnf_trex', 'execute_and_wait', 'grep -i huge /proc/meminfo'],
+ ['vnf_trex', 'execute_and_wait', 'iptables -F'],
+ # configure passwordless ssh access to VM with T-Rex server
+ ['tools', 'exec_shell', 'sshpass -p $GUEST_PASSWORD[-1] ssh-copy-id -o StrictHostKeyChecking=no $GUEST_USERNAME[-1]@$_TREX_SERVER_IP'],
+ # prepare T-Rex configuration
+ ['vnf_trex', 'execute_and_wait', 'echo \'- port_limit: 2\' > /etc/trex_cfg.yaml'],
+ ['vnf_trex', 'execute_and_wait', 'echo \' version: 2\' >> /etc/trex_cfg.yaml'],
+ ['vnf_trex', 'execute_and_wait', "echo \" interfaces: [ '$GUEST_NICS[-1][0]['pci']', '$GUEST_NICS[-1][1]['pci']' ]\" >> /etc/trex_cfg.yaml"],
+ # execute T-Rex stateless server and wait until it is up and running
+ ['vnf_trex', 'execute_and_wait', 'cd $TRAFFICGEN_TREX_BASE_DIR'],
+ ['#trex_pid', 'vnf_trex', 'execute_and_wait', 'nohup sudo bash -c "./t-rex-64 -i $_TREX_OPTIONS" &', '|\[1\] (\d+)$'],
+ ['vnf_trex', 'execute_and_wait', 'echo -ne "Starting T-Rex " ; while ! netstat -nl | grep 4501 &> /dev/null ; do echo -ne "."; sleep 1 ; done; echo', 300],
+]
+
+# T-Rex VM teardown macro
+TREX_VM_FINIT = [
+ ['vnf_trex', 'execute_and_wait', 'sudo kill #STEP[trex_pid][0]'],
+ ['vnf_trex', 'execute_and_wait', 'sudo pkill t-rex-64'],
+ ['vnf_trex', 'stop'],
+ ['vswitch', 'del_port', 'trex_br', '#STEP[trex_admin][0]'],
+ ['vswitch', 'del_port', 'trex_br', '#STEP[trex_spare][0]'],
+ ['tools', 'exec_shell', 'sudo ip link set dev trex_br down'],
+ ['tools', 'exec_shell', 'sudo ip addr flush dev trex_br'],
+ ['vswitch', 'del_switch', 'trex_br'],
+]
+
+# Configure T-Rex loopback test, where traffic from T-Rex is forwarded back via OVS flows
+TREX_VM_TEST = [
+ ['vswitch', 'add_switch', 'int_br0'],
+ ] + TREX_VM_INIT + [
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[trex_p1][1]', 'actions': ['output:#STEP[trex_p2][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[trex_p2][1]', 'actions': ['output:#STEP[trex_p1][1]'], 'idle_timeout': '0'}],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ # cleanup
+ ] + TREX_VM_FINIT
+
+# Configure VM2VM test, where traffic from T-Rex VM is forwarded to 2nd VM with loopback app.
+TREX_VM2VM_TEST = [
+ ['vswitch', 'add_switch', 'int_br0'],
+ ['#vm_p1', 'vswitch', 'add_vport', 'int_br0'],
+ ['#vm_p2', 'vswitch', 'add_vport', 'int_br0'],
+ ['vnf', 'start'],
+ ] + TREX_VM_INIT + [
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[trex_p1][1]', 'actions': ['output:#STEP[vm_p1][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[vm_p1][1]', 'actions': ['output:#STEP[trex_p1][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[trex_p2][1]', 'actions': ['output:#STEP[vm_p2][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[vm_p2][1]', 'actions': ['output:#STEP[trex_p2][1]'], 'idle_timeout': '0'}],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ ['vnf', 'stop'],
+ ['vswitch', 'dump_flows', 'int_br0'],
+ # cleanup
+ ] + TREX_VM_FINIT
+
+#
+# A set of tests with T-Rex VM as a traffic generator.
+#
+INTEGRATION_TESTS = INTEGRATION_TESTS + [
+ {
+ "Name": "trex_vm_cont",
+ "Deployment": "clean",
+ "Description": "T-Rex VM - execute RFC2544 Continuous Stream from T-Rex VM and loop it back through Open vSwitch.",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ },
+ 'GUEST_LOOPBACK' : ['clean',],
+ 'GUEST_NICS_NR' : [4],
+ 'GUEST_SMP' : ['sockets=1,cores=3'],
+ 'GUEST_CORE_BINDING' : [['6', '7', '8'],],
+ 'TRAFFICGEN' : 'Trex',
+ 'TRAFFICGEN_TREX_HOST_IP_ADDR' : _TREX_SERVER_IP,
+ 'TRAFFICGEN_TREX_BASE_DIR' : '/root/trex/scripts/',
+ 'TRAFFICGEN_TREX_USER' : 'root',
+ },
+ "TestSteps": TREX_VM_TEST
+ },
+ {
+ "Name": "trex_vm_tput",
+ "Deployment": "clean",
+ "Description": "T-Rex VM - execute RFC2544 Throughput from T-Rex VM and loop it back through Open vSwitch.",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_throughput",
+ },
+ 'GUEST_LOOPBACK' : ['clean',],
+ 'GUEST_NICS_NR' : [4],
+ 'GUEST_SMP' : ['sockets=1,cores=3'],
+ 'GUEST_CORE_BINDING' : [['6', '7', '8'],],
+ 'TRAFFICGEN' : 'Trex',
+ 'TRAFFICGEN_TREX_HOST_IP_ADDR' : _TREX_SERVER_IP,
+ 'TRAFFICGEN_TREX_BASE_DIR' : '/root/trex/scripts/',
+ 'TRAFFICGEN_TREX_USER' : 'root',
+ },
+ "TestSteps": TREX_VM_TEST
+ },
+ {
+ "Name": "trex_vm2vm_cont",
+ "Deployment": "clean",
+ "Description": "T-Rex VM2VM - execute RFC2544 Continuous Stream from T-Rex VM and loop it back through 2nd VM.",
+ "Parameters" : {
+ 'GUEST_LOOPBACK' : ['testpmd', 'clean'],
+ 'GUEST_NICS_NR' : [2, 4],
+ 'GUEST_SMP' : ['sockets=1,cores=2', 'sockets=1,cores=3'],
+ 'GUEST_CORE_BINDING' : [['9', '10'],['6', '7', '8'],],
+ 'TRAFFICGEN' : 'Trex',
+ 'TRAFFICGEN_TREX_HOST_IP_ADDR' : _TREX_SERVER_IP,
+ 'TRAFFICGEN_TREX_BASE_DIR' : '/root/trex/scripts/',
+ 'TRAFFICGEN_TREX_USER' : 'root',
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_continuous",
+ },
+ },
+ "TestSteps": TREX_VM2VM_TEST,
+ },
+ {
+ "Name": "trex_vm2vm_tput",
+ "Deployment": "clean",
+ "Description": "T-Rex VM2VM - execute RFC2544 Throughput from T-Rex VM and loop it back through 2nd VM.",
+ "Parameters" : {
+ 'GUEST_LOOPBACK' : ['testpmd', 'clean'],
+ 'GUEST_NICS_NR' : [2, 4],
+ 'GUEST_SMP' : ['sockets=1,cores=2', 'sockets=1,cores=3'],
+ 'GUEST_CORE_BINDING' : [['9', '10'],['6', '7', '8'],],
+ 'TRAFFICGEN' : 'Trex',
+ 'TRAFFICGEN_TREX_HOST_IP_ADDR' : _TREX_SERVER_IP,
+ 'TRAFFICGEN_TREX_BASE_DIR' : '/root/trex/scripts/',
+ 'TRAFFICGEN_TREX_USER' : 'root',
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_throughput",
+ },
+ },
+ "TestSteps": TREX_VM2VM_TEST,
+ },
+]
diff --git a/conf/integration/02_vswitch.conf b/conf/integration/02_vswitch.conf
index 63ffe1bc..9477a1d0 100644
--- a/conf/integration/02_vswitch.conf
+++ b/conf/integration/02_vswitch.conf
@@ -1,4 +1,4 @@
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
VTEP_IP1 = '192.168.0.1/24'
VTEP_IP2 = '192.168.240.10'
VTEP_IP2_SUBNET = '192.168.240.0/24'
-TUNNEL_INTEGRATION_BRIDGE = 'br0'
-TUNNEL_EXTERNAL_BRIDGE = 'br-ext'
+TUNNEL_INTEGRATION_BRIDGE = 'vsperf-br0'
+TUNNEL_EXTERNAL_BRIDGE = 'vsperf-br-ext'
TUNNEL_EXTERNAL_BRIDGE_IP = '192.168.240.1/24'
# vxlan|gre|geneve
diff --git a/conf/kubernetes/01_testcases.conf b/conf/kubernetes/01_testcases.conf
new file mode 100644
index 00000000..c5b3135c
--- /dev/null
+++ b/conf/kubernetes/01_testcases.conf
@@ -0,0 +1,12 @@
+K8SPERFORMANCE_TESTS = [
+ {
+ "Name": "pcp_tput",
+ "Deployment": "p2p",
+ "Description": "LTD.Throughput.RFC2544.Throughput",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_throughput",
+ },
+ },
+ },
+]
diff --git a/core/component_factory.py b/core/component_factory.py
index bd9a1019..f13bfb5b 100644
--- a/core/component_factory.py
+++ b/core/component_factory.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from core.vswitch_controller_op2p import VswitchControllerOP2P
from core.vswitch_controller_ptunp import VswitchControllerPtunP
from core.vnf_controller import VnfController
from core.pktfwd_controller import PktFwdController
-
+from core.pod_controller import PodController
def __init__():
"""Finds and loads all the modules required.
@@ -66,23 +66,23 @@ def create_vswitch(deployment_scenario, vswitch_class, traffic,
:return: IVSwitchController for the deployment_scenario
"""
# pylint: disable=too-many-return-statements
- deployment_scenario = deployment_scenario.lower()
- if deployment_scenario.startswith("p2p"):
- return VswitchControllerP2P(vswitch_class, traffic)
- elif deployment_scenario.startswith("pvp"):
- return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
- elif deployment_scenario.startswith("pvvp"):
- return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
- elif deployment_scenario.startswith("pvpv"):
- return VswitchControllerPXP(deployment_scenario, vswitch_class, traffic)
- elif deployment_scenario.startswith("op2p"):
- return VswitchControllerOP2P(vswitch_class, traffic, tunnel_operation)
- elif deployment_scenario.startswith("ptunp"):
- return VswitchControllerPtunP(vswitch_class, traffic)
- elif deployment_scenario.startswith("clean"):
- return VswitchControllerClean(vswitch_class, traffic)
+ deployment = deployment_scenario.lower()
+ if deployment.startswith("p2p"):
+ return VswitchControllerP2P(deployment, vswitch_class, traffic)
+ elif deployment.startswith("pvp"):
+ return VswitchControllerPXP(deployment, vswitch_class, traffic)
+ elif deployment.startswith("pvvp"):
+ return VswitchControllerPXP(deployment, vswitch_class, traffic)
+ elif deployment.startswith("pvpv"):
+ return VswitchControllerPXP(deployment, vswitch_class, traffic)
+ elif deployment.startswith("op2p"):
+ return VswitchControllerOP2P(deployment, vswitch_class, traffic, tunnel_operation)
+ elif deployment.startswith("ptunp"):
+ return VswitchControllerPtunP(deployment, vswitch_class, traffic)
+ elif deployment.startswith("clean"):
+ return VswitchControllerClean(deployment, vswitch_class, traffic)
else:
- raise RuntimeError("Unknown deployment scenario '{}'.".format(deployment_scenario))
+ raise RuntimeError("Unknown deployment scenario '{}'.".format(deployment))
def create_vnf(deployment_scenario, vnf_class, extra_vnfs):
@@ -102,6 +102,19 @@ def create_vnf(deployment_scenario, vnf_class, extra_vnfs):
"""
return VnfController(deployment_scenario, vnf_class, extra_vnfs)
+def create_pod(deployment_scenario, pod_class):
+ """Return a new PodController for the deployment_scenario.
+
+ The returned controller is configured with the given POD class.
+
+ Deployment scenarios: 'pvp', 'pvvp'
+
+ :param deployment_scenario: The deployment scenario name
+ :param pod_class: Reference to pod class to be used.
+ :return: PodController for the deployment_scenario
+ """
+ return PodController(deployment_scenario, pod_class)
+
def create_collector(collector_class, result_dir, test_name):
"""Return a new Collector of the given class
@@ -121,7 +134,6 @@ def create_loadgen(loadgen_class, loadgen_cfg):
:param loadgen_cfg: Configuration for the loadgen
:return: A new ILoadGenerator class
"""
- # pylint: disable=too-many-function-args
return loadgen_class(loadgen_cfg)
def create_pktfwd(deployment, pktfwd_class):
diff --git a/core/loader/loader.py b/core/loader/loader.py
index dcd77ced..45e0d5ba 100755
--- a/core/loader/loader.py
+++ b/core/loader/loader.py
@@ -23,6 +23,7 @@ from tools.pkt_fwd.pkt_fwd import IPktFwd
from tools.pkt_gen.trafficgen import ITrafficGenerator
from vswitches.vswitch import IVSwitch
from vnfs.vnf.vnf import IVnf
+from pods.pod.pod import IPod
# pylint: disable=too-many-public-methods
class Loader(object):
@@ -71,6 +72,11 @@ class Loader(object):
settings.getValue('PKTFWD'),
IPktFwd)
+ self._pod_loader = LoaderServant(
+ settings.getValue('POD_DIR'),
+ settings.getValue('POD'),
+ IPod)
+
def get_trafficgen(self):
"""Returns a new instance configured traffic generator.
@@ -220,6 +226,37 @@ class Loader(object):
"""
return self._vnf_loader.get_classes_printable()
+ def get_pod(self):
+ """Returns instance of currently configured pod implementation.
+
+ :return: IPod implementation if available, None otherwise.
+ """
+ return self._pod_loader.get_class()()
+
+ def get_pod_class(self):
+ """Returns type of currently configured pod implementation.
+
+ :return: Type of IPod implementation if available.
+ None otherwise.
+ """
+ return self._pod_loader.get_class()
+
+ def get_pods(self):
+ """Returns dictionary of all available pods.
+
+ :return: Dictionary of pods.
+ - key: name of the class which implements IPod,
+ - value: Type of vnf which implements IPod.
+ """
+ return self._pod_loader.get_classes()
+
+ def get_pods_printable(self):
+ """Returns all available pods in printable format.
+
+ :return: String containing printable list of pods.
+ """
+ return self._pod_loader.get_classes_printable()
+
def get_pktfwd(self):
"""Returns instance of currently configured packet forwarder implementation.
diff --git a/core/loader/loader_servant.py b/core/loader/loader_servant.py
index 8bad9ab9..6db8e0f2 100644
--- a/core/loader/loader_servant.py
+++ b/core/loader/loader_servant.py
@@ -120,7 +120,7 @@ class LoaderServant(object):
if class_name in results:
logging.info(
- "Class found: " + class_name + ".")
+ "Class found: %s.", class_name)
return results.get(class_name)
return None
@@ -180,7 +180,7 @@ class LoaderServant(object):
mod = imp.load_module(
modname, *imp.find_module(modname, [root]))
except ImportError:
- logging.error('Could not import file ' + filename)
+ logging.error('Could not import file %s', filename)
raise
mods.append((modname, mod))
diff --git a/core/pktfwd_controller.py b/core/pktfwd_controller.py
index b38aefa5..363302c3 100644
--- a/core/pktfwd_controller.py
+++ b/core/pktfwd_controller.py
@@ -1,4 +1,4 @@
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,12 +35,12 @@ class PktFwdController(object):
self._pktfwd_class = pktfwd_class
self._pktfwd = pktfwd_class(guest=True if deployment == "pvp" and
settings.getValue('VNF') != "QemuPciPassthrough" else False)
- self._logger.debug('Creation using ' + str(self._pktfwd_class))
+ self._logger.debug('Creation using %s', str(self._pktfwd_class))
def setup(self):
"""Sets up the packet forwarder for p2p.
"""
- self._logger.debug('Setup using ' + str(self._pktfwd_class))
+ self._logger.debug('Setup using %s', str(self._pktfwd_class))
try:
self._pktfwd.start()
@@ -56,7 +56,7 @@ class PktFwdController(object):
def setup_for_guest(self):
"""Sets up the packet forwarder for pvp.
"""
- self._logger.debug('Setup using ' + str(self._pktfwd_class))
+ self._logger.debug('Setup using %s', str(self._pktfwd_class))
try:
self._pktfwd.start_for_guest()
@@ -67,7 +67,7 @@ class PktFwdController(object):
def stop(self):
"""Tears down the packet forwarder created in setup().
"""
- self._logger.debug('Stop using ' + str(self._pktfwd_class))
+ self._logger.debug('Stop using %s', str(self._pktfwd_class))
self._pktfwd.stop()
def __enter__(self):
@@ -89,9 +89,9 @@ class PktFwdController(object):
"""
return self._pktfwd
- def dump_vswitch_flows(self):
- """ Dumps flows from vswitch
+ def dump_vswitch_connections(self):
+ """ Dumps connections from vswitch
"""
raise NotImplementedError(
"The PktFwdController does not implement the "
- "\"dump_vswitch_flows\" function.")
+ "\"dump_vswitch_connections\" function.")
diff --git a/core/pod_controller.py b/core/pod_controller.py
new file mode 100644
index 00000000..8bc91ec4
--- /dev/null
+++ b/core/pod_controller.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" pod Controller interface
+"""
+
+import logging
+import pexpect
+#from conf import settings
+from pods.pod.pod import IPod
+
+class PodController():
+ """POD controller class
+
+ Used to set-up and control PODs for specified scenario
+
+ Attributes:
+ _pod_class: A class object representing the POD.
+ _deployment: A string describing the scenario to set-up in the
+ constructor.
+ _pods: A list of pods controlled by the controller.
+ """
+
+ def __init__(self, deployment, pod_class):
+ """Sets up the POD infrastructure based on deployment scenario
+
+ :param pod_class: The POD class to be used.
+ """
+ # reset POD ID counter for each testcase
+ IPod.reset_pod_counter()
+ pod_number = 0
+ # setup controller with requested number of pods
+ self._logger = logging.getLogger(__name__)
+ self._pod_class = pod_class
+ self._deployment = deployment.lower()
+ self._pods = []
+ if self._deployment == 'p2p':
+ pod_number = 1
+
+ if pod_number:
+ self._pods = [pod_class() for _ in range(pod_number)]
+
+ self._logger.debug('Initializing the pod')
+
+ def get_pods(self):
+ """Returns a list of pods controlled by this controller.
+ """
+ self._logger.debug('get the pods')
+ return self._pods
+
+ def get_pods_number(self):
+ """Returns a number of pods controlled by this controller.
+ """
+ self._logger.debug('get_pods_number %s pod[s]', str(len(self._pods)))
+ return len(self._pods)
+
+ def start(self):
+ """Boots all pods set-up by __init__.
+
+ This is a blocking function.
+ """
+ self._logger.debug('start the pod')
+ try:
+ for pod in self._pods:
+ pod.create()
+ except pexpect.TIMEOUT:
+ self.stop()
+ raise
+
+ def stop(self):
+ """Stops all pods set-up by __init__.
+
+ This is a blocking function.
+ """
+ self._logger.debug('stopping the pod')
+ for pod in self._pods:
+ pod.terminate()
+
+ def __enter__(self):
+ self.start()
+
+ def __exit__(self, type_, value, traceback):
+ self.stop()
diff --git a/core/results/results_constants.py b/core/results/results_constants.py
index ef2df847..769938a8 100644
--- a/core/results/results_constants.py
+++ b/core/results/results_constants.py
@@ -69,6 +69,15 @@ class ResultsConstants(object):
TEST_START_TIME = "start_time"
TEST_STOP_TIME = "stop_time"
+ # files with traffic capture
+ CAPTURE_TX = "capture_tx"
+ CAPTURE_RX = "capture_rx"
+
+ # IMIX Used
+ IMIX_GENOME = "imix_genome"
+ # IMIX Avg. Frame Size
+ IMIX_AVG_FRAMESIZE = "imix_avg_framesize"
+
@staticmethod
def get_traffic_constants():
"""Method returns all Constants used to store results.
@@ -92,4 +101,6 @@ class ResultsConstants(object):
ResultsConstants.MIN_LATENCY_NS,
ResultsConstants.MAX_LATENCY_NS,
ResultsConstants.AVG_LATENCY_NS,
- ResultsConstants.FRAME_LOSS_PERCENT]
+ ResultsConstants.FRAME_LOSS_PERCENT,
+ ResultsConstants.IMIX_GENOME,
+ ResultsConstants.IMIX_AVG_FRAMESIZE]
diff --git a/core/traffic_controller.py b/core/traffic_controller.py
index de82dddf..1f21e57d 100644
--- a/core/traffic_controller.py
+++ b/core/traffic_controller.py
@@ -125,7 +125,7 @@ class TrafficController(object):
:param traffic: A dictionary describing the traffic to send.
"""
- self._logger.debug('send_traffic with ' +
+ self._logger.debug('send_traffic with %s',
str(self._traffic_gen_class))
self.configure(traffic)
@@ -144,7 +144,8 @@ class TrafficController(object):
If this function requires more than one argument, all should be
should be passed using the args list and appropriately handled.
"""
- self._logger.debug('send_traffic_async with ' +
+ # pylint: disable=unused-argument
+ self._logger.debug('send_traffic_async with %s',
str(self._traffic_gen_class))
self.configure(traffic)
@@ -158,7 +159,7 @@ class TrafficController(object):
"""
counter = 0
for item in self._results:
- logging.info("Record: " + str(counter))
+ logging.info("Record: %s", str(counter))
counter += 1
for(key, value) in list(item.items()):
logging.info(" Key: " + str(key) +
@@ -169,7 +170,7 @@ class TrafficController(object):
"""
return self._results
- def validate_send_traffic(self, dummy_result, dummy_traffic):
+ def validate_send_traffic(self, _dummy_result, _dummy_traffic):
"""Verify that send traffic has succeeded
"""
if self._results:
diff --git a/core/traffic_controller_rfc2544.py b/core/traffic_controller_rfc2544.py
index 488dde6f..2bb30fec 100644
--- a/core/traffic_controller_rfc2544.py
+++ b/core/traffic_controller_rfc2544.py
@@ -62,6 +62,9 @@ class TrafficControllerRFC2544(TrafficController, IResults):
elif traffic['traffic_type'] == 'rfc2544_continuous':
result = self._traffic_gen_class.send_cont_traffic(
traffic, duration=self._duration)
+ elif traffic['traffic_type'] == 'burst':
+ result = self._traffic_gen_class.send_burst_traffic(
+ traffic, duration=self._duration)
elif traffic['traffic_type'] == 'rfc2544_throughput':
result = self._traffic_gen_class.send_rfc2544_throughput(
traffic, tests=self._tests, duration=self._duration, lossrate=self._lossrate)
@@ -87,7 +90,7 @@ class TrafficControllerRFC2544(TrafficController, IResults):
tests=self._tests,
duration=self._duration)
self._traffic_started = True
- if len(function['args']) > 0:
+ if function['args']:
function['function'](function['args'])
else:
function['function']()
diff --git a/core/traffic_controller_rfc2889.py b/core/traffic_controller_rfc2889.py
index 64ab0ba6..316202c9 100644
--- a/core/traffic_controller_rfc2889.py
+++ b/core/traffic_controller_rfc2889.py
@@ -84,7 +84,7 @@ class TrafficControllerRFC2889(TrafficController, IResults):
trials=self._trials,
duration=self._duration)
self._traffic_started = True
- if len(function['args']) > 0:
+ if function['args']:
function['function'](function['args'])
else:
function['function']()
diff --git a/core/vnf_controller.py b/core/vnf_controller.py
index 78a29258..cbf59b79 100644
--- a/core/vnf_controller.py
+++ b/core/vnf_controller.py
@@ -93,8 +93,7 @@ class VnfController(object):
def get_vnfs_number(self):
"""Returns a number of vnfs controlled by this controller.
"""
- self._logger.debug('get_vnfs_number ' + str(len(self._vnfs)) +
- ' VNF[s]')
+ self._logger.debug('get_vnfs_number %s VNF[s]', str(len(self._vnfs)))
return len(self._vnfs)
def start(self):
diff --git a/core/vswitch_controller.py b/core/vswitch_controller.py
index 855de8b2..889f14bc 100644
--- a/core/vswitch_controller.py
+++ b/core/vswitch_controller.py
@@ -1,4 +1,4 @@
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,33 +13,57 @@
# limitations under the License.
"""Interface for deployment specific vSwitch controllers
"""
+import logging
class IVswitchController(object):
- """Abstract class which defines a vSwitch controller object
+ """Interface class for a vSwitch controller object
This interface is used to setup and control a vSwitch provider for a
particular deployment scenario.
"""
- def __enter__(self):
+ def __init__(self, deployment, vswitch_class, traffic):
+ """Initializes up the generic prerequisites for deployment scenario.
+
+ :deployment: the deployment scenario to configure
+ :vswitch_class: the vSwitch class to be used.
+ :traffic: dictionary with detailed traffic definition
+ """
+ self._logger = logging.getLogger(__name__)
+ self._vswitch_class = vswitch_class
+ self._vswitch = vswitch_class()
+ self._deployment_scenario = deployment
+ self._logger.debug('Creation using %s', str(self._vswitch_class))
+ self._traffic = traffic.copy()
+ self._bridge = None
+
+ def setup(self):
"""Sets up the switch for the particular deployment scenario
"""
raise NotImplementedError(
"The VswitchController does not implement the \"setup\" function.")
- def __exit__(self, type_, value, traceback):
+ def stop(self):
"""Tears down the switch created in setup()
"""
raise NotImplementedError(
"The VswitchController does not implement the \"stop\" function.")
+ def __enter__(self):
+ """Sets up the switch for the particular deployment scenario
+ """
+ self.setup()
+
+ def __exit__(self, type_, value, traceback):
+ """Tears down the switch created in setup()
+ """
+ self.stop()
+
def get_vswitch(self):
"""Get the controlled vSwitch
:return: The controlled IVswitch
"""
- raise NotImplementedError(
- "The VswitchController does not implement the \"get_vswitch\" "
- "function.")
+ return self._vswitch
def get_ports_info(self):
"""Returns a dictionary describing all ports on the vSwitch.
@@ -50,9 +74,9 @@ class IVswitchController(object):
"The VswitchController does not implement the \"get_ports_info\" "
"function.")
- def dump_vswitch_flows(self):
- """ Dumps flows from vswitch
+ def dump_vswitch_connections(self):
+ """ Dumps connections from vswitch
"""
raise NotImplementedError(
"The VswitchController does not implement the "
- "\"dump_vswitch_flows\" function.")
+ "\"dump_vswitch_connections\" function.")
diff --git a/core/vswitch_controller_clean.py b/core/vswitch_controller_clean.py
index 61724b9b..7a771226 100644
--- a/core/vswitch_controller_clean.py
+++ b/core/vswitch_controller_clean.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,6 @@
"""VSwitch controller for basic initialization of vswitch
"""
-
-import logging
-
from core.vswitch_controller import IVswitchController
class VswitchControllerClean(IVswitchController):
@@ -28,22 +25,10 @@ class VswitchControllerClean(IVswitchController):
_deployment_scenario: A string describing the scenario to set-up in the
constructor.
"""
- def __init__(self, vswitch_class, traffic):
- """Initializes up the prerequisites for the Clean deployment scenario.
-
- :vswitch_class: the vSwitch class to be used.
- """
- self._logger = logging.getLogger(__name__)
- self._vswitch_class = vswitch_class
- self._vswitch = vswitch_class()
- self._deployment_scenario = "Clean"
- self._logger.debug('Creation using ' + str(self._vswitch_class))
- self._traffic = traffic.copy()
-
def setup(self):
"""Sets up the switch for Clean.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -54,26 +39,15 @@ class VswitchControllerClean(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
- def __enter__(self):
- self.setup()
-
- def __exit__(self, type_, value, traceback):
- self.stop()
-
- def get_vswitch(self):
- """See IVswitchController for description
- """
- return self._vswitch
-
def get_ports_info(self):
"""See IVswitchController for description
"""
pass
- def dump_vswitch_flows(self):
+ def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
pass
diff --git a/core/vswitch_controller_op2p.py b/core/vswitch_controller_op2p.py
index 85bf79bd..072a690a 100644
--- a/core/vswitch_controller_op2p.py
+++ b/core/vswitch_controller_op2p.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,44 +14,24 @@
"""VSwitch controller for Physical to Tunnel Endpoint to Physical deployment
"""
-
-import logging
-
from core.vswitch_controller import IVswitchController
from vswitches.utils import add_ports_to_flow
from conf import settings as S
from tools import tasks
-_FLOW_TEMPLATE = {
- 'idle_timeout': '0'
-}
-
class VswitchControllerOP2P(IVswitchController):
"""VSwitch controller for OP2P deployment scenario.
-
- Attributes:
- _vswitch_class: The vSwitch class to be used.
- _vswitch: The vSwitch object controlled by this controller
- _deployment_scenario: A string describing the scenario to set-up in the
- constructor.
"""
- def __init__(self, vswitch_class, traffic, tunnel_operation=None):
- """Initializes up the prerequisites for the OP2P deployment scenario.
-
- :vswitch_class: the vSwitch class to be used.
+ def __init__(self, deployment, vswitch_class, traffic, tunnel_operation=None):
+ """See IVswitchController for general description
"""
- self._logger = logging.getLogger(__name__)
- self._vswitch_class = vswitch_class
- self._vswitch = vswitch_class()
- self._deployment_scenario = "OP2P"
- self._traffic = traffic.copy()
+ super().__init__(deployment, vswitch_class, traffic)
self._tunnel_operation = tunnel_operation
- self._logger.debug('Creation using ' + str(self._vswitch_class))
def setup(self):
""" Sets up the switch for overlay P2P (tunnel encap or decap)
"""
- self._logger.debug('Setting up ' + str(self._tunnel_operation))
+ self._logger.debug('Setting up %s', str(self._tunnel_operation))
if self._tunnel_operation == "encapsulation":
self._setup_encap()
else:
@@ -66,7 +46,7 @@ class VswitchControllerOP2P(IVswitchController):
Create 2 bridges br0 (integration bridge) and br-ext and a VXLAN port
for encapsulation.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -118,10 +98,13 @@ class VswitchControllerOP2P(IVswitchController):
# Test is unidirectional for now
self._vswitch.del_flow(bridge)
- flow1 = add_ports_to_flow(_FLOW_TEMPLATE, phy1_number,
+ flow1 = add_ports_to_flow(S.getValue('OVS_FLOW_TEMPLATE'), phy1_number,
phy2_number)
self._vswitch.add_flow(bridge, flow1)
-
+ # enable MAC learning mode at external bridge
+ flow_ext = S.getValue('OVS_FLOW_TEMPLATE').copy()
+ flow_ext.update({'actions': ['NORMAL']})
+ self._vswitch.add_flow(bridge_ext, flow_ext)
except:
self._vswitch.stop()
raise
@@ -129,7 +112,7 @@ class VswitchControllerOP2P(IVswitchController):
def _setup_decap(self):
""" Sets up the switch for overlay P2P decapsulation test
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -178,7 +161,7 @@ class VswitchControllerOP2P(IVswitchController):
bridge)
# Test is unidirectional for now
self._vswitch.del_flow(bridge_ext)
- flow1 = add_ports_to_flow(_FLOW_TEMPLATE, phy3_number,
+ flow1 = add_ports_to_flow(S.getValue('OVS_FLOW_TEMPLATE'), phy3_number,
phy2_number)
self._vswitch.add_flow(bridge_ext, flow1)
@@ -189,7 +172,7 @@ class VswitchControllerOP2P(IVswitchController):
def _setup_decap_vanilla(self):
""" Sets up the switch for overlay P2P decapsulation test
"""
- self._logger.debug('Setup decap vanilla ' + str(self._vswitch_class))
+ self._logger.debug('Setup decap vanilla %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -251,7 +234,7 @@ class VswitchControllerOP2P(IVswitchController):
# Test is unidirectional for now
self._vswitch.del_flow(bridge_ext)
- flow1 = add_ports_to_flow(_FLOW_TEMPLATE, phy2_number, 'LOCAL')
+ flow1 = add_ports_to_flow(S.getValue('OVS_FLOW_TEMPLATE'), phy2_number, 'LOCAL')
self._vswitch.add_flow(bridge_ext, flow1)
except:
@@ -261,20 +244,9 @@ class VswitchControllerOP2P(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
- def __enter__(self):
- self.setup()
-
- def __exit__(self, type_, value, traceback):
- self.stop()
-
- def get_vswitch(self):
- """See IVswitchController for description
- """
- return self._vswitch
-
def get_ports_info(self):
"""See IVswitchController for description
"""
@@ -286,8 +258,8 @@ class VswitchControllerOP2P(IVswitchController):
self._vswitch.get_ports(
S.getValue('TUNNEL_EXTERNAL_BRIDGE'))
- def dump_vswitch_flows(self):
+ def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
- self._vswitch.dump_flows(S.getValue('TUNNEL_INTEGRATION_BRIDGE'))
- self._vswitch.dump_flows(S.getValue('TUNNEL_EXTERNAL_BRIDGE'))
+ self._vswitch.dump_connections(S.getValue('TUNNEL_INTEGRATION_BRIDGE'))
+ self._vswitch.dump_connections(S.getValue('TUNNEL_EXTERNAL_BRIDGE'))
diff --git a/core/vswitch_controller_p2p.py b/core/vswitch_controller_p2p.py
index 0d41b145..0037d484 100644
--- a/core/vswitch_controller_p2p.py
+++ b/core/vswitch_controller_p2p.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,20 +14,9 @@
"""VSwitch controller for Physical to Physical deployment
"""
-
-import logging
-import netaddr
-
from core.vswitch_controller import IVswitchController
from conf import settings
-_FLOW_TEMPLATE = {
- 'idle_timeout': '0'
-}
-
-_PROTO_TCP = 6
-_PROTO_UDP = 17
-
class VswitchControllerP2P(IVswitchController):
"""VSwitch controller for P2P deployment scenario.
@@ -37,71 +26,29 @@ class VswitchControllerP2P(IVswitchController):
_deployment_scenario: A string describing the scenario to set-up in the
constructor.
"""
- def __init__(self, vswitch_class, traffic):
- """Initializes up the prerequisites for the P2P deployment scenario.
-
- :vswitch_class: the vSwitch class to be used.
+ def __init__(self, deployment, vswitch_class, traffic):
+ """See IVswitchController for general description
"""
- self._logger = logging.getLogger(__name__)
- self._vswitch_class = vswitch_class
- self._vswitch = vswitch_class()
- self._deployment_scenario = "P2P"
- self._logger.debug('Creation using ' + str(self._vswitch_class))
- self._traffic = traffic.copy()
+ super().__init__(deployment, vswitch_class, traffic)
+ self._bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
def setup(self):
"""Sets up the switch for p2p.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
- bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
- self._vswitch.add_switch(bridge)
-
- (_, _) = self._vswitch.add_phy_port(bridge)
- (_, _) = self._vswitch.add_phy_port(bridge)
-
- self._vswitch.del_flow(bridge)
-
- # table#0 - flows designed to force 5 & 13 tuple matches go here
- flow = {'table':'0', 'priority':'1', 'actions': ['goto_table:1']}
- self._vswitch.add_flow(bridge, flow)
-
- # table#1 - flows to route packets between ports goes here. The
- # chosen port is communicated to subsequent tables by setting the
- # metadata value to the egress port number
+ self._vswitch.add_switch(self._bridge)
- # configure flows according to the TC definition
- flow_template = _FLOW_TEMPLATE.copy()
- if self._traffic['flow_type'] == 'IP':
- flow_template.update({'dl_type':'0x0800', 'nw_src':self._traffic['l3']['srcip'],
- 'nw_dst':self._traffic['l3']['dstip']})
+ (port1, _) = self._vswitch.add_phy_port(self._bridge)
+ (port2, _) = self._vswitch.add_phy_port(self._bridge)
- flow = flow_template.copy()
- flow.update({'table':'1', 'priority':'1', 'in_port':'1',
- 'actions': ['write_actions(output:2)', 'write_metadata:0x2',
- 'goto_table:2']})
- self.process_flow_template(bridge, flow)
- flow = flow_template.copy()
- flow.update({'table':'1', 'priority':'1', 'in_port':'2',
- 'actions': ['write_actions(output:1)', 'write_metadata:0x1',
- 'goto_table:2']})
- self.process_flow_template(bridge, flow)
+ if not settings.getValue('K8S'):
+ self._vswitch.add_connection(self._bridge, port1, port2, self._traffic)
+ self._vswitch.add_connection(self._bridge, port2, port1, self._traffic)
- # Frame modification table. Frame modification flow rules are
- # isolated in this table so that they can be turned on or off
- # without affecting the routing or tuple-matching flow rules.
- flow = {'table':'2', 'priority':'1', 'actions': ['goto_table:3']}
- self._vswitch.add_flow(bridge, flow)
-
- # Egress table
- # (TODO) Billy O'Mahony - the drop action here actually required in
- # order to egress the packet. This is the subject of a thread on
- # ovs-discuss 2015-06-30.
- flow = {'table':'3', 'priority':'1', 'actions': ['drop']}
- self._vswitch.add_flow(bridge, flow)
except:
self._vswitch.stop()
raise
@@ -109,68 +56,16 @@ class VswitchControllerP2P(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
- def __enter__(self):
- self.setup()
-
- def __exit__(self, type_, value, traceback):
- self.stop()
-
- def get_vswitch(self):
- """See IVswitchController for description
- """
- return self._vswitch
-
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
- return self._vswitch.get_ports(settings.getValue('VSWITCH_BRIDGE_NAME'))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
+ return self._vswitch.get_ports(self._bridge)
- def dump_vswitch_flows(self):
+ def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
- self._vswitch.dump_flows(settings.getValue('VSWITCH_BRIDGE_NAME'))
-
- def process_flow_template(self, bridge, flow_template):
- """Method adds flows into the vswitch based on given flow template
- and configuration of multistream feature.
- """
- if ('pre_installed_flows' in self._traffic and
- self._traffic['pre_installed_flows'].lower() == 'yes' and
- 'multistream' in self._traffic and self._traffic['multistream'] > 0 and
- 'stream_type' in self._traffic):
- # multistream feature is enabled and flows should be inserted into OVS
- # so generate flows based on template and multistream configuration
- if self._traffic['stream_type'] == 'L2':
- # iterate through destimation MAC address
- dst_mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value
- for i in range(self._traffic['multistream']):
- tmp_mac = netaddr.EUI(dst_mac_value + i)
- tmp_mac.dialect = netaddr.mac_unix_expanded
- flow_template.update({'dl_dst':tmp_mac})
- # optimize flow insertion by usage of cache
- self._vswitch.add_flow(bridge, flow_template, cache='on')
- elif self._traffic['stream_type'] == 'L3':
- # iterate through destimation IP address
- dst_ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value
- for i in range(self._traffic['multistream']):
- tmp_ip = netaddr.IPAddress(dst_ip_value + i)
- flow_template.update({'dl_type':'0x0800', 'nw_dst':tmp_ip})
- # optimize flow insertion by usage of cache
- self._vswitch.add_flow(bridge, flow_template, cache='on')
- elif self._traffic['stream_type'] == 'L4':
- # read transport protocol from configuration and iterate through its destination port
- proto = _PROTO_TCP if self._traffic['l3']['proto'].lower() == 'tcp' else _PROTO_UDP
- for i in range(self._traffic['multistream']):
- flow_template.update({'dl_type':'0x0800', 'nw_proto':proto, 'tp_dst':i})
- # optimize flow insertion by usage of cache
- self._vswitch.add_flow(bridge, flow_template, cache='on')
- else:
- self._logger.error('Stream type is set to uknown value %s', self._traffic['stream_type'])
- # insert cached flows into the OVS
- self._vswitch.add_flow(bridge, [], cache='flush')
- else:
- self._vswitch.add_flow(bridge, flow_template)
+ self._vswitch.dump_connections(self._bridge)
diff --git a/core/vswitch_controller_ptunp.py b/core/vswitch_controller_ptunp.py
index 27d26789..b10da2a9 100644
--- a/core/vswitch_controller_ptunp.py
+++ b/core/vswitch_controller_ptunp.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +15,6 @@
"""VSwitch controller for Physical to VxLAN Tunnel Endpoint to Physical
deployment with mod operation.
"""
-
-import logging
from netaddr import EUI, IPNetwork, mac_unix
from core.vswitch_controller import IVswitchController
@@ -24,10 +22,6 @@ from vswitches.utils import add_ports_to_flow
from conf import settings
from tools import tasks
-_FLOW_TEMPLATE = {
- 'idle_timeout': '0'
-}
-
class VswitchControllerPtunP(IVswitchController):
"""VSwitch controller for VxLAN ptunp deployment scenario.
The deployment scenario is to test VxLAN tunneling feature without using an
@@ -40,16 +34,10 @@ class VswitchControllerPtunP(IVswitchController):
_deployment_scenario: A string describing the scenario to set-up in the
constructor.
"""
- def __init__(self, vswitch_class, traffic):
- """Initializes up the prerequisites for the ptunp deployment scenario.
-
- :vswitch_class: the vSwitch class to be used.
+ def __init__(self, deployment, vswitch_class, traffic):
+ """See IVswitchController for general description
"""
- self._logger = logging.getLogger(__name__)
- self._vswitch_class = vswitch_class
- self._vswitch = vswitch_class()
- self._deployment_scenario = "ptunp"
- self._traffic = traffic.copy()
+ super().__init__(deployment, vswitch_class, traffic)
self.bridge_phy1 = settings.getValue('TUNNEL_EXTERNAL_BRIDGE1')
self.bridge_phy2 = settings.getValue('TUNNEL_EXTERNAL_BRIDGE2')
self.bridge_mod1 = settings.getValue('TUNNEL_MODIFY_BRIDGE1')
@@ -59,13 +47,12 @@ class VswitchControllerPtunP(IVswitchController):
self.br_mod_ip1 = settings.getValue('TUNNEL_MODIFY_BRIDGE_IP1')
self.br_mod_ip2 = settings.getValue('TUNNEL_MODIFY_BRIDGE_IP2')
self.tunnel_type = settings.getValue('TUNNEL_TYPE')
- self._logger.debug('Creation using ' + str(self._vswitch_class))
def setup(self):
""" Sets up the switch for VxLAN overlay PTUNP (tunnel encap or decap)
"""
self._logger.debug('Setting up phy-tun-phy tunneling scenario')
- if self.tunnel_type is 'vxlan':
+ if self.tunnel_type == 'vxlan':
self._setup_vxlan_encap_decap()
else:
self._logger.error("Only VxLAN is supported for now")
@@ -78,7 +65,7 @@ class VswitchControllerPtunP(IVswitchController):
physical ports. Two more bridges br-mod1 and br-mod2 to mangle
and redirect the packets from one tunnel port to other.
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
self._vswitch.add_switch(self.bridge_phy1)
@@ -156,23 +143,23 @@ class VswitchControllerPtunP(IVswitchController):
self._vswitch.del_flow(self.bridge_phy2)
self._vswitch.del_flow(self.bridge_mod1)
self._vswitch.del_flow(self.bridge_mod2)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy1_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy1_number,
phy3_number)
self._vswitch.add_flow(self.bridge_phy1, flow)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy3_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy3_number,
phy1_number)
self._vswitch.add_flow(self.bridge_phy1, flow)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy2_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy2_number,
phy4_number)
self._vswitch.add_flow(self.bridge_phy2, flow)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy4_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy4_number,
phy2_number)
self._vswitch.add_flow(self.bridge_phy2, flow)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy5_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy5_number,
'LOCAL')
self._vswitch.add_flow(self.bridge_mod1, flow)
- mod_flow_template = _FLOW_TEMPLATE.copy()
+ mod_flow_template = settings.getValue('OVS_FLOW_TEMPLATE').copy()
mod_flow_template.update({'ip':'',
'actions':
['mod_dl_src:' + str(vxlan_rem_mac2),
@@ -183,10 +170,10 @@ class VswitchControllerPtunP(IVswitchController):
})
flow = add_ports_to_flow(mod_flow_template, 'LOCAL', phy5_number)
self._vswitch.add_flow(self.bridge_mod1, flow)
- flow = add_ports_to_flow(_FLOW_TEMPLATE, phy6_number,
+ flow = add_ports_to_flow(settings.getValue('OVS_FLOW_TEMPLATE'), phy6_number,
'LOCAL')
self._vswitch.add_flow(self.bridge_mod2, flow)
- mod_flow_template = _FLOW_TEMPLATE.copy()
+ mod_flow_template = settings.getValue('OVS_FLOW_TEMPLATE').copy()
mod_flow_template.update({'ip':'',
'actions':
['mod_dl_src:' + str(vxlan_rem_mac1),
@@ -204,35 +191,24 @@ class VswitchControllerPtunP(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
- def __enter__(self):
- self.setup()
-
- def __exit__(self, type_, value, traceback):
- self.stop()
-
- def get_vswitch(self):
- """See IVswitchController for description
- """
- return self._vswitch
-
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
ports = self._vswitch.get_ports(self.bridge_phy1) +\
self._vswitch.get_ports(self.bridge_mod1) +\
self._vswitch.get_ports(self.bridge_phy2) +\
self._vswitch.get_ports(self.bridge_mod2)
return ports
- def dump_vswitch_flows(self):
+ def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
- self._logger.debug('dump_flows using ' + str(self._vswitch_class))
- self._vswitch.dump_flows(self.bridge_phy1)
- self._vswitch.dump_flows(self.bridge_mod1)
- self._vswitch.dump_flows(self.bridge_phy2)
- self._vswitch.dump_flows(self.bridge_mod2)
+ self._logger.debug('dump_connections using %s', str(self._vswitch_class))
+ self._vswitch.dump_connections(self.bridge_phy1)
+ self._vswitch.dump_connections(self.bridge_mod1)
+ self._vswitch.dump_connections(self.bridge_phy2)
+ self._vswitch.dump_connections(self.bridge_mod2)
diff --git a/core/vswitch_controller_pxp.py b/core/vswitch_controller_pxp.py
index d4d1e764..d36ecdba 100644
--- a/core/vswitch_controller_pxp.py
+++ b/core/vswitch_controller_pxp.py
@@ -1,4 +1,4 @@
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,34 +14,18 @@
"""VSwitch controller for multi VM scenarios with serial or parallel connection
"""
-
-import logging
import netaddr
from core.vswitch_controller import IVswitchController
-from vswitches.utils import add_ports_to_flow
from conf import settings
-_FLOW_TEMPLATE = {
- 'idle_timeout': '0'
-}
-
-_PROTO_TCP = 6
-_PROTO_UDP = 17
-
class VswitchControllerPXP(IVswitchController):
"""VSwitch controller for PXP deployment scenario.
"""
def __init__(self, deployment, vswitch_class, traffic):
- """Initializes up the prerequisites for the PXP deployment scenario.
-
- :vswitch_class: the vSwitch class to be used.
- :deployment: the deployment scenario to configure
- :traffic: dictionary with detailed traffic definition
+ """See IVswitchController for general description
"""
- self._logger = logging.getLogger(__name__)
- self._vswitch_class = vswitch_class
- self._vswitch = vswitch_class()
+ super().__init__(deployment, vswitch_class, traffic)
self._pxp_topology = 'parallel' if deployment.startswith('pvpv') else 'serial'
if deployment == 'pvp':
self._pxp_vm_count = 1
@@ -55,15 +39,13 @@ class VswitchControllerPXP(IVswitchController):
self._deployment_scenario = deployment
- self._traffic = traffic.copy()
self._bidir = True if self._traffic['bidir'] == 'True' else False
- self._logger.debug('Creation using ' + str(self._vswitch_class))
self._bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
def setup(self):
""" Sets up the switch for PXP
"""
- self._logger.debug('Setup using ' + str(self._vswitch_class))
+ self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
@@ -71,8 +53,8 @@ class VswitchControllerPXP(IVswitchController):
self._vswitch.add_switch(self._bridge)
# create physical ports
- (_, phy1_number) = self._vswitch.add_phy_port(self._bridge)
- (_, phy2_number) = self._vswitch.add_phy_port(self._bridge)
+ (phy1, _) = self._vswitch.add_phy_port(self._bridge)
+ (phy2, _) = self._vswitch.add_phy_port(self._bridge)
# create VM ports
# initialize vport array to requested number of VMs
@@ -86,54 +68,42 @@ class VswitchControllerPXP(IVswitchController):
self._logger.debug('Create %s vports for %s. VM with index %s',
nics_nr, vmindex + 1, vmindex)
for _ in range(nics_nr):
- (_, vport) = self._vswitch.add_vport(self._bridge)
+ (vport, _) = self._vswitch.add_vport(self._bridge)
vm_ports[vmindex].append(vport)
- self._vswitch.del_flow(self._bridge)
-
- # configure flows according to the TC definition
+ # configure connections according to the TC definition
if self._pxp_topology == 'serial':
- flow = _FLOW_TEMPLATE.copy()
- if self._traffic['flow_type'] == 'IP':
- flow.update({'dl_type':'0x0800',
- 'nw_src':self._traffic['l3']['srcip'],
- 'nw_dst':self._traffic['l3']['dstip']})
+ # NOTE: all traffic from VMs is sent to other ports directly
+ # without applying traffic options to avoid issues with MAC swapping
+ # and upper layer mods performed inside guests
- # insert flows for phy ports first
+ # insert connections for phy ports first
# from 1st PHY to 1st vport of 1st VM
- self._add_flow(flow,
- phy1_number,
- vm_ports[0][0],
- self._bidir)
+ self._vswitch.add_connection(self._bridge, phy1, vm_ports[0][0], self._traffic)
+ self._vswitch.add_connection(self._bridge, vm_ports[0][0], phy1)
# from last vport of last VM to 2nd phy
- self._add_flow(flow,
- vm_ports[self._pxp_vm_count-1][-1],
- phy2_number,
- self._bidir)
+ self._vswitch.add_connection(self._bridge, vm_ports[self._pxp_vm_count-1][-1], phy2)
+ self._vswitch.add_connection(self._bridge, phy2, vm_ports[self._pxp_vm_count-1][-1], self._traffic)
# add serial connections among VMs and VM NICs pairs if needed
# in case of multiple NICs pairs per VM, the pairs are chained
- # first, before flow to the next VM is created
+ # first, before connection to the next VM is created
for vmindex in range(self._pxp_vm_count):
# connect VMs NICs pairs in case of 4 and more NICs per VM
connections = [(vm_ports[vmindex][2*(x+1)-1],
vm_ports[vmindex][2*(x+1)])
for x in range(int(len(vm_ports[vmindex])/2)-1)]
for connection in connections:
- self._add_flow(flow,
- connection[0],
- connection[1],
- self._bidir)
+ self._vswitch.add_connection(self._bridge, connection[0], connection[1])
+ self._vswitch.add_connection(self._bridge, connection[1], connection[0])
# connect last NICs to the next VM if there is any
if self._pxp_vm_count > vmindex + 1:
- self._add_flow(flow,
- vm_ports[vmindex][-1],
- vm_ports[vmindex+1][0],
- self._bidir)
+ self._vswitch.add_connection(self._bridge, vm_ports[vmindex][-1], vm_ports[vmindex+1][0])
+ self._vswitch.add_connection(self._bridge, vm_ports[vmindex+1][0], vm_ports[vmindex][-1])
else:
- proto = _PROTO_TCP if self._traffic['l3']['proto'].lower() == 'tcp' else _PROTO_UDP
- dst_mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value
- dst_ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value
+ mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value
+ ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value
+ port_value = self._traffic['l4']['dstport']
# initialize stream index; every NIC pair of every VM uses unique stream
stream = 0
for vmindex in range(self._pxp_vm_count):
@@ -146,31 +116,33 @@ class VswitchControllerPXP(IVswitchController):
port_pairs = [(vm_ports[vmindex][0], vm_ports[vmindex][0])]
for port_pair in port_pairs:
- flow_p = _FLOW_TEMPLATE.copy()
- flow_v = _FLOW_TEMPLATE.copy()
-
- # update flow based on trafficgen settings
+ # override traffic options to ensure, that traffic is
+ # dispatched among VMs connected in parallel
+ options = {'multistream':1,
+ 'stream_type':self._traffic['stream_type'],
+ 'pre_installed_flows':'Yes'}
+ # update connection based on trafficgen settings
if self._traffic['stream_type'] == 'L2':
- tmp_mac = netaddr.EUI(dst_mac_value + stream)
+ tmp_mac = netaddr.EUI(mac_value + stream)
tmp_mac.dialect = netaddr.mac_unix_expanded
- flow_p.update({'dl_dst':tmp_mac})
+ options.update({'l2':{'dstmac':tmp_mac}})
elif self._traffic['stream_type'] == 'L3':
- tmp_ip = netaddr.IPAddress(dst_ip_value + stream)
- flow_p.update({'dl_type':'0x0800', 'nw_dst':tmp_ip})
+ tmp_ip = netaddr.IPAddress(ip_value + stream)
+ options.update({'l3':{'dstip':tmp_ip}})
elif self._traffic['stream_type'] == 'L4':
- flow_p.update({'dl_type':'0x0800', 'nw_proto':proto, 'tp_dst':stream})
+ options.update({'l3':{'proto':self._traffic['l3']['proto']}})
+ options.update({'l4':{'dstport':(port_value + stream) % 65536}})
else:
raise RuntimeError('Unknown stream_type {}'.format(self._traffic['stream_type']))
- # insert flow to dispatch traffic from physical ports
+ # insert connection to dispatch traffic from physical ports
# to VMs based on stream type; all traffic from VMs is
# sent to physical ports to avoid issues with MAC swapping
# and upper layer mods performed inside guests
- self._add_flow(flow_p, phy1_number, port_pair[0])
- self._add_flow(flow_v, port_pair[1], phy2_number)
- if self._bidir:
- self._add_flow(flow_p, phy2_number, port_pair[1])
- self._add_flow(flow_v, port_pair[0], phy1_number)
+ self._vswitch.add_connection(self._bridge, phy1, port_pair[0], options)
+ self._vswitch.add_connection(self._bridge, port_pair[1], phy2)
+ self._vswitch.add_connection(self._bridge, phy2, port_pair[1], options)
+ self._vswitch.add_connection(self._bridge, port_pair[0], phy1)
# every NIC pair needs its own unique traffic stream
stream += 1
@@ -182,40 +154,16 @@ class VswitchControllerPXP(IVswitchController):
def stop(self):
"""Tears down the switch created in setup().
"""
- self._logger.debug('Stop using ' + str(self._vswitch_class))
+ self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
- def _add_flow(self, flow, port1, port2, reverse_flow=False):
- """ Helper method to insert flow into the vSwitch
- """
- self._vswitch.add_flow(self._bridge,
- add_ports_to_flow(flow,
- port1,
- port2))
- if reverse_flow:
- self._vswitch.add_flow(self._bridge,
- add_ports_to_flow(flow,
- port2,
- port1))
-
- def __enter__(self):
- self.setup()
-
- def __exit__(self, type_, value, traceback):
- self.stop()
-
- def get_vswitch(self):
- """See IVswitchController for description
- """
- return self._vswitch
-
def get_ports_info(self):
"""See IVswitchController for description
"""
- self._logger.debug('get_ports_info using ' + str(self._vswitch_class))
+ self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
return self._vswitch.get_ports(self._bridge)
- def dump_vswitch_flows(self):
+ def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
- self._vswitch.dump_flows(self._bridge)
+ self._vswitch.dump_connections(self._bridge)
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..b281a515
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,6 @@
+""" for docs
+"""
+
+# pylint: disable=import-error
+# flake8: noqa
+from docs_conf.conf import *
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..59448e39
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: VSWITCHPERF
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..c8a400f8
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,24 @@
+.. _vswitchperf:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Vswitchperf
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/release-notes/index
+ testing/developer/devguide/index
+ testing/developer/devguide/results/index
+ testing/user/configguide/index
+ lma/index
+ openstack/index
+ k8s/index
+ xtesting/index
+
diff --git a/docs/k8s/index.rst b/docs/k8s/index.rst
new file mode 100644
index 00000000..872a3280
--- /dev/null
+++ b/docs/k8s/index.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent, AT&T, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+=========================================================
+OPNFV VSPERF Kubernetes Container Networking Benchmarking
+=========================================================
+VSPERF supports testing and benchmarking of kubernetes container networking solution, referred as kubernetes Container Networking Benchmarking (CNB). The process can be broadly classified into following four operations.
+
+1. Setting up of Kubernetes Cluster.
+2. Deploying container networking solution.
+3. Deploying pod(s).
+4. Running tests.
+
+First step is achieved through the tool present in *tools/k8s/cluster-deployment* folder. Please refer to the documentation present in that folder for automated kubernetes cluster setup. To perform the remaining steps, the user has to run the following command.
+
+.. code-block:: console
+
+ vsperf --k8s --conf-file k8s.conf pcp_tput
+
+************************
+Important Configurations
+************************
+
+VSPERF has introduced a new configuration parameters, as listed below, for kubernetes CNB. The file *12_k8s.conf*, present in conf folder provides sample values. User has to modify these parameters to suit their environment before running the above command.
+
+1. K8S_CONFIG_FILEPATH - location of the kubernetes-cluster access file. This will be used to connect to the cluster.
+2. PLUGIN - The plugin to use. Allowed values are OvsDPDK, VPP, and SRIOV.
+3. NETWORK_ATTACHMENT_FILEPATH - location of the network attachment definition file.
+4. CONFIGMAP_FILEPATH - location of the config-map file. This will be used only for SRIOV plugin.
+5. POD_MANIFEST_FILEPATH - location of the POD definition file.
+6. APP_NAME - Application to run in the pod. Options - l2fwd, testpmd, and l3fwd.
+
+
+*********
+Testcases
+*********
+Kubernetes CNB will be done through new testcases. For Jerma release, only pcp_tput will be supported. This testcase, will be similar to pvp_tput, where VNF is replaced with a pod/container. The pcp_tput testcase, will still use phy2phy as deployment. In future releases, a new deployment model will be added to support more testcases for kubernetes
diff --git a/docs/lma/index.rst b/docs/lma/index.rst
new file mode 100644
index 00000000..dd6be47b
--- /dev/null
+++ b/docs/lma/index.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, AT&T, Red Hat, Spirent, Ixia and others.
+
+.. OPNFV VSPERF LMA Documentation master file.
+
+***********************
+OPNFV VSPERF LMA Guides
+***********************
+
+.. toctree::
+ :caption: Developer Guide for Monitoring Tools
+ :maxdepth: 2
+
+ ./metrics/userguide.rst
+ ./metrics/devguide.rst
+ ./logs/userguide.rst
+ ./logs/devguide.rst
diff --git a/docs/lma/logs/devguide.rst b/docs/lma/logs/devguide.rst
new file mode 100644
index 00000000..7aeaad29
--- /dev/null
+++ b/docs/lma/logs/devguide.rst
@@ -0,0 +1,145 @@
+====================
+Logs Developer Guide
+====================
+
+Ansible Client-side
+-------------------
+
+Ansible File Organisation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Files Structure::
+
+ ansible-client
+ ├── ansible.cfg
+ ├── hosts
+ ├── playbooks
+ │ └── setup.yaml
+ └── roles
+ ├── clean-td-agent
+ │ └── tasks
+ │ └── main.yml
+ └── td-agent
+ ├── files
+ │ └── td-agent.conf
+ └── tasks
+ └── main.yml
+
+Summary of roles
+^^^^^^^^^^^^^^^^
+====================== ======================
+Roles Description
+====================== ======================
+``td-agent`` Install Td-agent & change configuration file
+``clean-td-agent`` Unistall Td-agent
+====================== ======================
+
+Configurable Parameters
+^^^^^^^^^^^^^^^^^^^^^^^
+====================================================== ====================== ======================
+File (ansible-client/roles/) Parameter Description
+====================================================== ====================== ======================
+``td-agent/files/td-agent.conf`` host Fluentd-server IP
+``td-agent/files/td-agent.conf`` port Fluentd-Server Port
+====================================================== ====================== ======================
+
+Ansible Server-side
+-------------------
+
+Ansible File Organisation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Files Structure::
+
+ ansible-server
+ ├── ansible.cfg
+ ├── group_vars
+ │ └── all.yml
+ ├── hosts
+ ├── playbooks
+ │ └── setup.yaml
+ └── roles
+ ├── clean-logging
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-master
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-pre
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-worker
+ │ └── tasks
+ │ └── main.yml
+ ├── logging
+ │ ├── files
+ │ │ ├── elastalert
+ │ │ │ ├── ealert-conf-cm.yaml
+ │ │ │ ├── ealert-key-cm.yaml
+ │ │ │ ├── ealert-rule-cm.yaml
+ │ │ │ └── elastalert.yaml
+ │ │ ├── elasticsearch
+ │ │ │ ├── elasticsearch.yaml
+ │ │ │ └── user-secret.yaml
+ │ │ ├── fluentd
+ │ │ │ ├── fluent-cm.yaml
+ │ │ │ ├── fluent-service.yaml
+ │ │ │ └── fluent.yaml
+ │ │ ├── kibana
+ │ │ │ └── kibana.yaml
+ │ │ ├── namespace.yaml
+ │ │ ├── nginx
+ │ │ │ ├── nginx-conf-cm.yaml
+ │ │ │ ├── nginx-key-cm.yaml
+ │ │ │ ├── nginx-service.yaml
+ │ │ │ └── nginx.yaml
+ │ │ ├── persistentVolume.yaml
+ │ │ └── storageClass.yaml
+ │ └── tasks
+ │ └── main.yml
+ └── nfs
+ └── tasks
+ └── main.yml
+
+Summary of roles
+^^^^^^^^^^^^^^^^
+====================== ======================
+Roles Description
+====================== ======================
+``k8s-pre`` Pre-requisite for installing K8s, like installing docker & K8s, disable swap etc.
+``k8s-master`` Reset K8s & make a master
+``k8s-worker`` Join woker nodes with token
+``logging`` EFK & elastalert setup in K8s
+``clean logging`` Remove EFK & elastalert setup from K8s
+``nfs`` Start a NFS server to store Elasticsearch data
+====================== ======================
+
+Configurable Parameters
+^^^^^^^^^^^^^^^^^^^^^^^
+========================================================================= ============================================ ======================
+File (ansible-server/roles/) Parameter name Description
+========================================================================= ============================================ ======================
+**Role: logging**
+``logging/files/persistentVolume.yaml`` storage Increase or Decrease Storage size of Persistent Volume size for each VM
+``logging/files/kibana/kibana.yaml`` version To Change the Kibana Version
+``logging/files/kibana/kibana.yaml`` count To increase or decrease the replica
+``logging/files/elasticsearch/elasticsearch.yaml`` version To Change the Elasticsearch Version
+``logging/files/elasticsearch/elasticsearch.yaml`` nodePort To Change Service Port
+``logging/files/elasticsearch/elasticsearch.yaml`` storage Increase or Decrease Storage size of Elasticsearch data for each VM
+``logging/files/elasticsearch/elasticsearch.yaml`` nodeAffinity -> values (hostname) In which VM Elasticsearch master or data pod will run (change the hostname to run the Elasticsearch master or data pod on a specific node)
+``logging/files/elasticsearch/user-secret.yaml`` stringData Add Elasticsearch User & its roles (`Elastic Docs <https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-users-and-roles.html#k8s_file_realm>`_)
+``logging/files/fluentd/fluent.yaml`` replicas To increase or decrease the replica
+``logging/files/fluentd/fluent-service.yaml`` nodePort To Change Service Port
+``logging/files/fluentd/fluent-cm.yaml`` index_template.json -> number_of_replicas To increase or decrease replica of data in Elasticsearch
+``logging/files/fluentd/fluent-cm.yaml`` fluent.conf Server port & other Fluentd Configuration
+``logging/files/nginx/nginx.yaml`` replicas To increase or decrease the replica
+``logging/files/nginx/nginx-service.yaml`` nodePort To Change Service Port
+``logging/files/nginx/nginx-key-cm.yaml`` kibana-access.key, kibana-access.pem Key file for HTTPs Connection
+``logging/files/nginx/nginx-conf-cm.yaml`` - Nginx Configuration
+``logging/files/elastalert/elastalert.yaml`` replicas To increase or decrease the replica
+``logging/files/elastalert/ealert-key-cm.yaml`` elastalert.key, elastalert.pem Key file for HTTPs Connection
+``logging/files/elastalert/ealert-conf-cm.yaml`` run_every How often ElastAlert will query Elasticsearch
+``logging/files/elastalert/ealert-conf-cm.yaml`` alert_time_limit If an alert fails for some reason, ElastAlert will retry sending the alert until this time period has elapsed
+``logging/files/elastalert/ealert-conf-cm.yaml`` es_host, es_port Elasticsearch Serivce name & port in K8s
+``logging/files/elastalert/ealert-rule-cm.yaml`` http_post_url Alert Receiver IP (`Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_)
+**Role: nfs**
+``nfs/tasks/main.yml`` line Path of NFS storage
+========================================================================= ============================================ ======================
diff --git a/docs/lma/logs/images/elasticsearch.png b/docs/lma/logs/images/elasticsearch.png
new file mode 100644
index 00000000..f0b876f5
--- /dev/null
+++ b/docs/lma/logs/images/elasticsearch.png
Binary files differ
diff --git a/docs/lma/logs/images/fluentd-cs.png b/docs/lma/logs/images/fluentd-cs.png
new file mode 100644
index 00000000..513bb3ef
--- /dev/null
+++ b/docs/lma/logs/images/fluentd-cs.png
Binary files differ
diff --git a/docs/lma/logs/images/fluentd-ss.png b/docs/lma/logs/images/fluentd-ss.png
new file mode 100644
index 00000000..4e9ab112
--- /dev/null
+++ b/docs/lma/logs/images/fluentd-ss.png
Binary files differ
diff --git a/docs/lma/logs/images/nginx.png b/docs/lma/logs/images/nginx.png
new file mode 100644
index 00000000..a0b00514
--- /dev/null
+++ b/docs/lma/logs/images/nginx.png
Binary files differ
diff --git a/docs/lma/logs/images/setup.png b/docs/lma/logs/images/setup.png
new file mode 100644
index 00000000..267685fa
--- /dev/null
+++ b/docs/lma/logs/images/setup.png
Binary files differ
diff --git a/docs/lma/logs/userguide.rst b/docs/lma/logs/userguide.rst
new file mode 100644
index 00000000..9b616fe7
--- /dev/null
+++ b/docs/lma/logs/userguide.rst
@@ -0,0 +1,386 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, AT&T, Red Hat, Spirent, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+***************
+Logs User Guide
+***************
+
+Prerequisites
+=============
+
+- Require 3 VMs to setup K8s
+- ``$ sudo yum install ansible``
+- ``$ pip install openshift pyyaml kubernetes`` (required for ansible K8s module)
+- Update IPs in all these files (if changed)
+ ====================================================================== ======================
+ Path Description
+ ====================================================================== ======================
+ ``ansible-server/group_vars/all.yml`` IP of K8s apiserver and VM hostname
+ ``ansible-server/hosts`` IP of VMs to install
+ ``ansible-server/roles/logging/files/persistentVolume.yaml`` IP of NFS-Server
+ ``ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml`` IP of alert-receiver
+ ====================================================================== ======================
+
+Architecture
+============
+.. image:: images/setup.png
+
+Installation - Clientside
+=========================
+
+Nodes
+-----
+
+- **Node1** = 10.10.120.21
+- **Node4** = 10.10.120.24
+
+How installation is done?
+-------------------------
+
+- TD-agent installation
+ ``$ curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh``
+- Copy the TD-agent config file in **Node1**
+ ``$ cp tdagent-client-config/node1.conf /etc/td-agent/td-agent.conf``
+- Copy the TD-agent config file in **Node4**
+ ``$ cp tdagent-client-config/node4.conf /etc/td-agent/td-agent.conf``
+- Restart the service
+ ``$ sudo service td-agent restart``
+
+Installation - Serverside
+=========================
+
+Nodes
+-----
+
+Inside Jumphost - POD12
+ - **VM1** = 10.10.120.211
+ - **VM2** = 10.10.120.203
+ - **VM3** = 10.10.120.204
+
+
+How installation is done?
+-------------------------
+
+**Using Ansible:**
+ - **K8s**
+ - **Elasticsearch:** 1 Master & 1 Data node at each VM
+ - **Kibana:** 1 Replicas
+ - **Nginx:** 2 Replicas
+ - **Fluentd:** 2 Replicas
+ - **Elastalert:** 1 Replica (get duplicate alert, if increase replica)
+ - **NFS Server:** at each VM to store elasticsearch data at following path
+ - ``/srv/nfs/master``
+ - ``/srv/nfs/data``
+
+How to setup?
+-------------
+
+- **To setup K8s cluster and EFK:** Run the ansible-playbook ``ansible/playbooks/setup.yaml``
+- **To clean everything:** Run the ansible-playbook ``ansible/playbooks/clean.yaml``
+
+Do we have HA?
+--------------
+
+Yes
+
+Configuration
+=============
+
+K8s
+---
+
+Path of all yamls (Serverside)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``ansible-server/roles/logging/files/``
+
+K8s namespace
+^^^^^^^^^^^^^
+
+``logging``
+
+K8s Service details
+^^^^^^^^^^^^^^^^^^^
+
+``$ kubectl get svc -n logging``
+
+Elasticsearch Configuration
+---------------------------
+
+Elasticsearch Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/elasticsearch.png
+
+Elasticsearch service details
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``logging-es-http``
+| **Service Port:** ``9200``
+| **Service Type:** ``ClusterIP``
+
+How to get elasticsearch default username & password?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- User1 (custom user):
+ | **Username:** ``elasticsearch``
+ | **Password:** ``password123``
+- User2 (by default created by Elastic Operator):
+ | **Username:** ``elastic``
+ | To get default password:
+ | ``$ PASSWORD=$(kubectl get secret -n logging logging-es-elastic-user -o go-template='{{.data.elastic | base64decode}}')``
+ | ``$ echo $PASSWORD``
+
+How to increase replica of any index?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+| $ curl -k -u "elasticsearch:password123" -H 'Content-Type: application/json' -XPUT "https://10.10.120.211:9200/indexname*/_settings" -d '
+| {
+| "index" : {
+| "number_of_replicas" : "2" }
+| }'
+
+Index Life
+^^^^^^^^^^
+**30 Days**
+
+Kibana Configuration
+--------------------
+
+Kibana Service details
+^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``logging-kb-http``
+| **Service Port:** ``5601``
+| **Service Type:** ``ClusterIP``
+
+Nginx Configuration
+-------------------
+
+IP
+^^
+
+The IP address with https. Ex: "10.10.120.211:32000"
+
+Nginx Setup Structure
+^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/nginx.png
+
+Ngnix Service details
+^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``nginx``
+| **Service Port:** ``32000``
+| **Service Type:** ``NodePort``
+
+Why NGINX is used?
+^^^^^^^^^^^^^^^^^^
+
+`Securing ELK using Nginx <https://logz.io/blog/securing-elk-nginx/>`_
+
+Nginx Configuration
+^^^^^^^^^^^^^^^^^^^
+
+**Path:** ``ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml``
+
+Fluentd Configuration - Clientside (Td-agent)
+---------------------------------------------
+
+Fluentd Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/fluentd-cs.png
+
+Log collection paths
+^^^^^^^^^^^^^^^^^^^^
+
+- ``/tmp/result*/*.log``
+- ``/tmp/result*/*.dat``
+- ``/tmp/result*/*.csv``
+- ``/tmp/result*/stc-liveresults.dat.*``
+- ``/var/log/userspace*.log``
+- ``/var/log/sriovdp/*.log.*``
+- ``/var/log/pods/**/*.log``
+
+Logs sent to
+^^^^^^^^^^^^
+
+Another fluentd instance of K8s cluster (K8s Master: 10.10.120.211) at Jumphost.
+
+Td-agent logs
+^^^^^^^^^^^^^
+
+Path of td-agent logs: ``/var/log/td-agent/td-agent.log``
+
+Td-agent configuration
+^^^^^^^^^^^^^^^^^^^^^^
+
+| Path of conf file: ``/etc/td-agent/td-agent.conf``
+| **If any changes is made in td-agent.conf then restart the td-agent service,** ``$ sudo service td-agent restart``
+
+Config Description
+^^^^^^^^^^^^^^^^^^
+
+- Get the logs from collection path
+- | Convert to this format
+ | {
+ | msg: "log line"
+ | log_path: “/file/path”
+ | file: “file.name”
+ | host: “pod12-node4”
+ | }
+- Sends it to fluentd
+
+Fluentd Configuration - Serverside
+----------------------------------
+
+Fluentd Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/fluentd-ss.png
+
+Fluentd Service details
+^^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``fluentd``
+| **Service Port:** ``32224``
+| **Service Type:** ``NodePort``
+
+Logs sent to
+^^^^^^^^^^^^
+Elasticsearch service (Example: logging-es-http at port 9200)
+
+Config Description
+^^^^^^^^^^^^^^^^^^
+
+- **Step 1**
+ - Get the logs from Node1 & Node4
+- **Step 2**
+ ======================================== ======================
+ log_path add tag (for routing)
+ ======================================== ======================
+ ``/tmp/result.*/.*errors.dat`` errordat.log
+ ``/tmp/result.*/.*counts.dat`` countdat.log
+ ``/tmp/result.*/stc-liveresults.dat.tx`` stcdattx.log
+ ``/tmp/result.*/stc-liveresults.dat.rx`` stcdatrx.log
+ ``/tmp/result.*/.*Statistics.csv`` ixia.log
+ ``/tmp/result.*/vsperf-overall*`` vsperf.log
+ ``/tmp/result.*/vswitchd*`` vswitchd.log
+ ``/var/log/userspace*`` userspace.log
+ ``/var/log/sriovdp*`` sriovdp.log
+ ``/var/log/pods*`` pods.log
+ ======================================== ======================
+
+- **Step 3**
+ Then parse each type using tags.
+ - error.conf: to find any error
+ - time-series.conf: to parse time series data
+ - time-analysis.conf: to calculate time analyasis
+- **Step 4**
+ ================================ ======================
+ host add tag (for routing)
+ ================================ ======================
+ ``pod12-node4`` node4
+ ``worker`` node1
+ ================================ ======================
+- **Step 5**
+ ================================ ======================
+ Tag elasticsearch
+ ================================ ======================
+ ``node4`` index “node4*”
+ ``node1`` index “node1*”
+ ================================ ======================
+
+Elastalert
+==========
+
+Send alert if
+-------------
+
+- Blacklist
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "dpdk|ERR|VHOST_CONFIG: * device not found"
+- Time
+ - vswitch_duration > 3 sec
+
+How to configure alert?
+-----------------------
+
+- Add your rule in ``ansible/roles/logging/files/elastalert/ealert-rule-cm.yaml`` (`Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_)
+ | name: anything
+ | type: <check-above-link> #The RuleType to use
+ | index: node4* #index name
+ | realert:
+ | minutes: 0 #to get alert for all cases after each interval
+ | alert: post #To send alert as HTTP POST
+ | http_post_url: # Provide URL
+
+- Mount this file to elastalert pod in ``ansible/roles/logging/files/elastalert/elastalert.yaml``.
+
+Alert Format
+------------
+
+{"type": "pattern-match", "label": "failed", "index": "node4-20200815", "log": "error-log-line", "log-path": "/tmp/result/file.log", "reson": "error-message" }
+
+Data Management
+===============
+
+Elasticsearch
+-------------
+
+Q&As
+^^^^
+
+Where data is stored now?
+Data is stored in NFS server with 1 replica of each index (default). Path of data are following:
+
+ - ``/srv/nfs/data (VM1)``
+ - ``/srv/nfs/data (VM2)``
+ - ``/srv/nfs/data (VM3)``
+ - ``/srv/nfs/master (VM1)``
+ - ``/srv/nfs/master (VM2)``
+ - ``/srv/nfs/master (VM3)``
+
+If user wants to change from NFS to local storage, can he do it?
+Yes, user can do this, need to configure persistent volume. (``ansible-server/roles/logging/files/persistentVolume.yaml``)
+
+Do we have backup of data?
+Yes. 1 replica of each index
+
+When K8s restart, the data is still accessible?
+Yes (If data is not deleted from /srv/nfs/data)
+
+Troubleshooting
+===============
+
+If no logs receiving in Elasticsearch
+-------------------------------------
+
+- Check IP & port of server-fluentd in client config.
+- Check client-fluentd logs, ``$ sudo tail -f /var/log/td-agent/td-agent.log``
+- Check server-fluentd logs, ``$ sudo kubectl logs -n logging <fluentd-pod-name>``
+
+If no notification received
+---------------------------
+
+- Search your "log" in Elasticsearch.
+- Check config of elastalert
+- Check IP of alert-receiver
+
+Reference
+=========
+- `Elastic cloud on K8s <https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html>`_
+- `HA Elasticsearch on K8s <https://www.elastic.co/blog/high-availability-elasticsearch-on-kubernetes-with-eck-and-gke>`_
+- `Fluentd Configuration <https://docs.fluentd.org/configuration/config-file>`_
+- `Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_
diff --git a/docs/lma/metrics/devguide.rst b/docs/lma/metrics/devguide.rst
new file mode 100644
index 00000000..40162397
--- /dev/null
+++ b/docs/lma/metrics/devguide.rst
@@ -0,0 +1,469 @@
+=======================
+Metrics Developer Guide
+=======================
+
+Anible File Organization
+========================
+
+Ansible-Server
+--------------
+
+Please follow the following file structure:
+
+.. code-block:: bash
+
+ ansible-server
+ | ansible.cfg
+ | hosts
+ |
+ +---group_vars
+ | all.yml
+ |
+ +---playbooks
+ | clean.yaml
+ | setup.yaml
+ |
+ \---roles
+ +---clean-monitoring
+ | \---tasks
+ | main.yml
+ |
+ +---monitoring
+ +---files
+ | | monitoring-namespace.yaml
+ | |
+ | +---alertmanager
+ | | alertmanager-config.yaml
+ | | alertmanager-deployment.yaml
+ | | alertmanager-service.yaml
+ | | alertmanager1-deployment.yaml
+ | | alertmanager1-service.yaml
+ | |
+ | +---cadvisor
+ | | cadvisor-daemonset.yaml
+ | | cadvisor-service.yaml
+ | |
+ | +---collectd-exporter
+ | | collectd-exporter-deployment.yaml
+ | | collectd-exporter-service.yaml
+ | |
+ | +---grafana
+ | | grafana-datasource-config.yaml
+ | | grafana-deployment.yaml
+ | | grafana-pv.yaml
+ | | grafana-pvc.yaml
+ | | grafana-service.yaml
+ | |
+ | +---kube-state-metrics
+ | | kube-state-metrics-deployment.yaml
+ | | kube-state-metrics-service.yaml
+ | |
+ | +---node-exporter
+ | | nodeexporter-daemonset.yaml
+ | | nodeexporter-service.yaml
+ | |
+ | \---prometheus
+ | main-prometheus-service.yaml
+ | prometheus-config.yaml
+ | prometheus-deployment.yaml
+ | prometheus-pv.yaml
+ | prometheus-pvc.yaml
+ | prometheus-service.yaml
+ | prometheus1-deployment.yaml
+ | prometheus1-service.yaml
+ |
+ \---tasks
+ main.yml
+
+
+Ansible - Client
+----------------
+
+Please follow the following file structure:
+
+.. code-block:: bash
+
+ ansible-server
+ | ansible.cfg
+ | hosts
+ |
+ +---group_vars
+ | all.yml
+ |
+ +---playbooks
+ | clean.yaml
+ | setup.yaml
+ |
+ \---roles
+ +---clean-collectd
+ | \---tasks
+ | main.yml
+ |
+ +---collectd
+ +---files
+ | collectd.conf.j2
+ |
+ \---tasks
+ main.yml
+
+
+Summary of Roles
+================
+
+A brief description of the Ansible playbook roles,
+which are used to deploy the monitoring cluster
+
+Ansible Server Roles
+--------------------
+
+Ansible Server, this part consists of the roles used to deploy
+Prometheus Alertmanager Grafana stack on the server-side
+
+Role: Monitoring
+~~~~~~~~~~~~~~~~
+
+Deployment and configuration of PAG stack along with collectd-exporter,
+cadvisor and node-exporter.
+
+Role: Clean-Monitoring
+~~~~~~~~~~~~~~~~~~~~~~
+
+Removes all the components deployed by the Monitoring role.
+
+
+File-Task Mapping and Configurable Parameters
+================================================
+
+Ansible Server
+----------------
+
+Role: Monitoring
+~~~~~~~~~~~~~~~~~~~
+
+Alert Manager
+^^^^^^^^^^^^^^^
+
+File: alertmanager-config.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-config.yaml
+
+Task: Configures Receivers for alertmanager
+
+Summary: A configmap, currently configures webhook for alertmanager,
+can be used to configure any kind of receiver
+
+Configurable Parameters:
+ receiver.url: change to the webhook receiver's URL
+ route: Can be used to add receivers
+
+
+File: alertmanager-deployment.yaml
+''''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-deployment.yaml
+
+Task: Deploys alertmanager instance
+
+Summary: A Deployment, deploys 1 replica of alertmanager
+
+
+File: alertmanager-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-service.yaml
+
+Task: Creates a K8s service for alertmanager
+
+Summary: A Nodeport type of service, so that user can create "silences",
+view the status of alerts from the native alertmanager dashboard / UI.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+**Note: alertmanager1-deployment, alertmanager1-service are the same as
+alertmanager-deployment and alertmanager-service respectively.**
+
+CAdvisor
+^^^^^^^^^^^
+
+File: cadvisor-daemonset.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/cadvisor/cadvisor-daemonset.yaml
+
+Task: To create a cadvisor daemonset
+
+Summary: A daemonset, used to scrape data of the kubernetes cluster itself,
+its a daemonset so an instance is run on every node.
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: cadvisor-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/cadvisor/cadvisor-service.yaml
+
+Task: To create a cadvisor service
+
+Summary: A ClusterIP service for cadvisor to communicate with prometheus
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Collectd Exporter
+^^^^^^^^^^^^^^^^^^^^
+
+File: collectd-exporter-deployment.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
+
+Task: To create a collectd replica
+
+Summary: A deployment, acts as receiver for collectd data sent by client machines,
+prometheus pulls data from this exporter
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: collectd-exporter.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/collectd-exporter/collectd-exporter.yaml
+
+Task: To create a collectd service
+
+Summary: A NodePort service for collectd-exporter to hold data for prometheus
+to scrape
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Grafana
+^^^^^^^^^
+
+File: grafana-datasource-config.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-datasource-config.yaml
+
+Task: To create config file for grafana
+
+Summary: A configmap, adds prometheus datasource in grafana
+
+
+File: grafana-deployment.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-deployment.yaml
+
+Task: To create a grafana deployment
+
+Summary: The grafana deployment creates a single replica of grafana,
+with preconfigured prometheus datasource.
+
+Configurable Parameters:
+ spec.template.spec.ports: Edit ports
+ spec.template.spec.env: Add / Edit environment variables
+
+
+File: grafana-pv.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-pv.yaml
+
+Task: To create a persistent volume for grafana
+
+Summary: A persistent volume for grafana.
+
+Configurable Parameters:
+ spec.capacity.storage: Increase / decrease size
+ spec.accessModes: To change the way PV is accessed.
+ spec.nfs.server: To change the ip address of NFS server
+ spec.nfs.path: To change the path of the server
+
+
+File: grafana-pvc.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-pvc.yaml
+
+Task: To create a persistent volume claim for grafana
+
+Summary: A persistent volume claim for grafana.
+
+Configurable Parameters:
+ spec.resources.requests.storage: Increase / decrease size
+
+
+File: grafana-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-service.yaml
+
+Task: To create a service for grafana
+
+Summary: A Nodeport type of service, so that users actually connect to,
+view the dashboard / UI.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+
+Kube State Metrics
+^^^^^^^^^^^^^^^^^^^^
+
+File: kube-state-metrics-deployment.yaml
+''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
+
+Task: To create a kube-state-metrics instance
+
+Summary: A deployment, used to collect metrics of the kubernetes cluster iteself
+
+Configurable Parameters:
+ spec.template.spec.containers.ports: Port of the container
+
+
+File: kube-state-metrics-service.yaml
+'''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
+
+Task: To create a collectd service
+
+Summary: A NodePort service for collectd-exporter to hold data for prometheus
+to scrape
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Node Exporter
+^^^^^^^^^^^^^^^
+
+File: node-exporter-daemonset.yaml
+''''''''''''''''''''''''''''''''''
+Path : monitoring/files/node-exporter/node-exporter-daemonset.yaml
+
+Task: To create a node exporter daemonset
+
+Summary: A daemonset, used to scrape data of the host machines / node,
+its a daemonset so an instance is run on every node.
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: node-exporter-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/node-exporter/node-exporter-service.yaml
+
+Task: To create a node exporter service
+
+Summary: A ClusterIP service for node exporter to communicate with Prometheus
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Prometheus
+^^^^^^^^^^^^^
+
+File: prometheus-config.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-config.yaml
+
+Task: To create a config file for Prometheus
+
+Summary: A configmap, adds alert rules.
+
+Configurable Parameters:
+ data.alert.rules: Add / Edit alert rules
+
+
+File: prometheus-deployment.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-deployment.yaml
+
+Task: To create a Prometheus deployment
+
+Summary: The Prometheus deployment creates a single replica of Prometheus,
+with preconfigured Prometheus datasource.
+
+Configurable Parameters:
+ spec.template.spec.affinity: To change the node affinity,
+ make sure only 1 instance of prometheus is
+ running on 1 node.
+
+ spec.template.spec.ports: Add / Edit container port
+
+
+File: prometheus-pv.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-pv.yaml
+
+Task: To create a persistent volume for Prometheus
+
+Summary: A persistent volume for Prometheus.
+
+Configurable Parameters:
+ spec.capacity.storage: Increase / decrease size
+ spec.accessModes: To change the way PV is accessed.
+ spec.hostpath.path: To change the path of the volume
+
+
+File: prometheus-pvc.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-pvc.yaml
+
+Task: To create a persistent volume claim for Prometheus
+
+Summary: A persistent volume claim for Prometheus.
+
+Configurable Parameters:
+ spec.resources.requests.storage: Increase / decrease size
+
+
+File: prometheus-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-service.yaml
+
+Task: To create a service for prometheus
+
+Summary: A Nodeport type of service, prometheus native dashboard
+available here.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+
+File: main-prometheus-server.yaml
+'''''''''''''''''''''''''''''''''''
+Path: monitoring/files/prometheus/main-prometheus-service.yaml
+
+Task: A service that connects both prometheus instances.
+
+Summary: A Nodeport service for other services to connect to the Prometheus cluster.
+As HA Prometheus needs to independent instances of Prometheus scraping the same inputs
+having the same configuration
+
+**Note: prometheus-deployment, prometheus1-service are the same as
+prometheus-deployment and prometheus-service respectively.**
+
+
+Ansible Client Roles
+----------------------
+
+Role: Collectd
+~~~~~~~~~~~~~~~~~~
+
+File: main.yml
+^^^^^^^^^^^^^^^^
+Path: collectd/tasks/main.yaml
+
+Task: Install collectd along with prerequisites
+
+Associated template file:
+
+collectd.conf.j2
+Path: collectd/files/collectd.conf.j2
+
+Summary: Edit this file to change the default configuration to
+be installed on the client's machine
diff --git a/docs/lma/metrics/images/dataflow.png b/docs/lma/metrics/images/dataflow.png
new file mode 100644
index 00000000..ca1ec908
--- /dev/null
+++ b/docs/lma/metrics/images/dataflow.png
Binary files differ
diff --git a/docs/lma/metrics/images/setup.png b/docs/lma/metrics/images/setup.png
new file mode 100644
index 00000000..ce6a1274
--- /dev/null
+++ b/docs/lma/metrics/images/setup.png
Binary files differ
diff --git a/docs/lma/metrics/userguide.rst b/docs/lma/metrics/userguide.rst
new file mode 100644
index 00000000..eae336d7
--- /dev/null
+++ b/docs/lma/metrics/userguide.rst
@@ -0,0 +1,226 @@
+==================
+Metrics User Guide
+==================
+
+Setup
+=======
+
+Prerequisites
+-------------------------
+- Require 3 VMs to setup K8s
+- ``$ sudo yum install ansible``
+- ``$ pip install openshift pyyaml kubernetes`` (required for ansible K8s module)
+- Update IPs in all these files (if changed)
+ - ``ansible-server/group_vars/all.yml`` (IP of apiserver and hostname)
+ - ``ansible-server/hosts`` (IP of VMs to install)
+ - ``ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml`` (IP of NFS-Server)
+ - ``ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml`` (IP of alert-receiver)
+
+Setup Structure
+---------------
+.. image:: images/setup.png
+
+Installation - Client Side
+----------------------------
+
+Nodes
+`````
+- **Node1** = 10.10.120.21
+- **Node4** = 10.10.120.24
+
+How installation is done?
+`````````````````````````
+Ansible playbook available in ``tools/lma/ansible-client`` folder
+
+- ``cd tools/lma/ansible-client``
+- ``ansible-playbook setup.yaml``
+
+This deploys collectd and configures it to send data to collectd exporter
+configured at 10.10.120.211 (ip address of current instance of collectd-exporter)
+Please make appropriate changes in the config file present in ``tools/lma/ansible-client/roles/collectd/files/``
+
+Installation - Server Side
+----------------------------
+
+Nodes
+``````
+
+Inside Jumphost - POD12
+ - **VM1** = 10.10.120.211
+ - **VM2** = 10.10.120.203
+ - **VM3** = 10.10.120.204
+
+
+How installation is done?
+`````````````````````````
+**Using Ansible:**
+ - **K8s**
+ - **Prometheus:** 2 independent deployments
+ - **Alertmanager:** 2 independent deployments (cluster peers)
+ - **Grafana:** 1 Replica deployment
+ - **cAdvisor:** 1 daemonset, i.e 3 replicas, one on each node
+ - **collectd-exporter:** 1 Replica
+ - **node-exporter:** 1 statefulset with 3 replicas
+ - **kube-state-metrics:** 1 deployment
+ - **NFS Server:** at each VM to store grafana data at following path
+ - ``/usr/share/monitoring_data/grafana``
+
+How to setup?
+`````````````
+- **To setup K8s cluster, EFK and PAG:** Run the ansible-playbook ``ansible/playbooks/setup.yaml``
+- **To clean everything:** Run the ansible-playbook ``ansible/playbooks/clean.yaml``
+
+Do we have HA?
+````````````````
+Yes
+
+Configuration
+=============
+
+K8s
+---
+Path to all yamls (Server Side)
+````````````````````````````````
+``tools/lma/ansible-server/roles/monitoring/files/``
+
+K8s namespace
+`````````````
+``monitoring``
+
+Configuration
+---------------------------
+
+Serivces and Ports
+``````````````````````````
+
+Services and their ports are listed below,
+one can go to IP of any node on the following ports,
+service will correctly redirect you
+
+
+ ====================== =======
+ Service Port
+ ====================== =======
+ Prometheus 30900
+ Prometheus1 30901
+ Main-Prometheus 30902
+ Alertmanager 30930
+ Alertmanager1 30931
+ Grafana 30000
+ Collectd-exporter 30130
+ ====================== =======
+
+How to change Configuration?
+------------------------------
+- Ports, names of the containers, pretty much every configuration can be modified by changing the required values in the respective yaml files (``/tools/lma/ansible-server/roles/monitoring/``)
+- For metrics, on the client's machine, edit the collectd's configuration (jinja2 template) file, and add required plugins (``/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2``).
+ For more details refer `this <https://collectd.org/wiki/index.php/First_steps>`_
+
+Where to send metrics?
+------------------------
+
+Metrics are sent to collectd exporter.
+UDP packets are sent to port 38026
+(can be configured and checked at
+``tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml``)
+
+Data Management
+================================
+
+DataFlow:
+--------------
+.. image:: images/dataflow.png
+
+Where is the data stored now?
+----------------------------------
+ - Grafana data (including dashboards) ==> On master, at ``/usr/share/monitoring_data/grafana`` (its accessed by Presistent volume via NFS)
+ - Prometheus Data ==> On VM2 and VM3, at /usr/share/monitoring_data/prometheus
+
+ **Note: Promethei data also are independent of each other, a shared data solution gave errors**
+
+Do we have backup of data?
+-------------------------------
+ Promethei even though independent scrape same targets,
+ have same alert rules, therefore generate very similar data.
+
+ Grafana's NFS part of the data has no backup
+ Dashboards' json are available in the ``/tools/lma/metrics/dashboards`` directory
+
+When containers are restarted, the data is still accessible?
+-----------------------------------------------------------------
+ Yes, unless the data directories are deleted ``(/usr/share/monitoring_data/*)`` from each node
+
+Alert Management
+==================
+
+Configure Alert receiver
+--------------------------
+- Go to file ``/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml``
+- Under the config.yml section under receivers, add, update, delete receivers
+- Currently ip of unified alert receiver is used.
+- Alertmanager supports multiple types of receivers, you can get a `list here <https://prometheus.io/docs/alerting/latest/configuration/>`_
+
+Add new alerts
+--------------------------------------
+- Go to file ``/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml``
+- Under the data section alert.rules file is mounted on the config-map.
+- In this file alerts are divided in 4 groups, namely:
+ - targets
+ - host and hardware
+ - container
+ - kubernetes
+- Add alerts under exisiting group or add new group. Please follow the structure of the file for adding new group
+- To add new alert:
+ - Use the following structure:
+
+ alert: alertname
+
+ expr: alert rule (generally promql conditional query)
+
+ for: time-range (eg. 5m, 10s, etc, the amount of time the condition needs to be true for the alert to be triggered)
+
+ labels:
+
+ severity: critical (other severity options and other labels can be added here)
+
+ type: hardware
+
+ annotations:
+
+ summary: <summary of the alert>
+
+ description: <descibe the alert here>
+
+- For an exhaustive alerts list you can have a look `here <https://awesome-prometheus-alerts.grep.to/>`_
+
+Troubleshooting
+===============
+No metrics received in grafana plot
+---------------------------------------------
+- Check if all configurations are correctly done.
+- Go to main-prometheus's port and any one VMs' ip, and check if prometheus is getting the metrics
+- If prometheus is getting them, read grafana's logs (``kubectl -n monitoring logs <name_of_grafana_pod>``)
+- Else, have a look at collectd exporter's metrics endpoint (eg. 10.10.120.211:30103/metrics)
+- If collectd is getting them, check prometheus's config file if collectd's ip is correct over there.
+- Else ssh to master, check which node collectd-exporter is scheduled (lets say vm2)
+- Now ssh to vm2
+- Use ``tcpdump -i ens3 #the interface used to connect to the internet > testdump``
+- Grep your client node's ip and check if packets are reaching our monitoring cluster (``cat testdump | grep <ip of client>``)
+- Ideally you should see packets reaching the node, if so please see if the collectd-exporter is running correctly, check its logs.
+- If no packets are received, error is on the client side, check collectd's config file and make sure correct collectd-exporter ip is used in the ``<network>`` section.
+
+If no notification received
+---------------------------
+- Go to main-prometheus's port and any one VMs' ip,(eg. 10.10.120.211:30902) and check if prometheus is getting the metrics
+- If no, read "No metrics received in grafana plot" section, else read ahead.
+- Check IP of alert-receiver, you can see this by going to alertmanager-ip:port and check if alertmanager is configured correctly.
+- If yes, paste the alert rule in the prometheus' query-box and see if any metric staisfy the condition.
+- You may need to change alert rules in the alert.rules section of prometheus-config.yaml if there was a bug in the alert's rule. (please read the "Add new alerts" section for detailed instructions)
+
+Reference
+=========
+- `Prometheus K8S deployment <https://www.metricfire.com/blog/how-to-deploy-prometheus-on-kubernetes/>`_
+- `HA Prometheus <https://prometheus.io/docs/introduction/faq/#can-prometheus-be-made-highly-available>`_
+- `Data Flow Diagram <https://drive.google.com/file/d/1D--LXFqU_H-fqpD57H3lJFOqcqWHoF0U/view?usp=sharing>`_
+- `Collectd Configuration <https://docs.opnfv.org/en/stable-fraser/submodules/barometer/docs/release/userguide/docker.userguide.html#build-the-collectd-docker-image>`_
+- `Alertmanager Rule Config <https://awesome-prometheus-alerts.grep.to/>`_
diff --git a/docs/openstack/index.rst b/docs/openstack/index.rst
new file mode 100644
index 00000000..6009e669
--- /dev/null
+++ b/docs/openstack/index.rst
@@ -0,0 +1,39 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent Communications, AT&T, Ixia and others.
+
+.. OPNFV VSPERF With Openstack master file.
+
+***************************
+OPNFV VSPERF with OPENSTACK
+***************************
+
+Introduction
+------------
+VSPERF performs the following, when run with openstack:
+
+1. Connect to Openstack (using the credentials)
+2. Deploy Traffic-Generators in a required way (defined by scenarios)
+3. Update the VSPERF configuration based on the deployment.
+4. Use the updated configuration to run test in "Trafficgen" Mode.
+5. Publish and store results.
+
+
+What to Configure?
+^^^^^^^^^^^^^^^^^^
+The configurable parameters are provided in *conf/11_openstackstack.conf*. The configurable parameters are:
+
+1. Access to Openstack Environment: Auth-URL, Username, Password, Project and Domain IDs/Name.
+2. VM Details - Name, Flavor, External-Network.
+3. Scenario - How many compute nodes to use, and how many instances of trafficgenerator to deploy.
+
+User can customize these parameters. Assume the customized values are placed in openstack.conf file. This file will be used to run the test.
+
+How to run?
+^^^^^^^^^^^
+Add --openstack flag as show below
+
+.. code-block:: console
+
+ vsperf --openstack --conf-file openstack.conf phy2phy_tput
+
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 860cca77..486beaf0 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,6 +1,193 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, Spirent Communications, AT&T and others.
+
+OPNFV Jerma Release
+===================
+
+* Supported Versions - DPDK:18.11, OVS:2.12.0, VPP:19.08.1, QEMU:3.1.1, Trex:2.86
+
+* Supported Release-Requirements.
+
+ * RELREQ-6 - Openstack dataplane performance benchmarking.
+ * RELREQ-9 - Kubernetes container-networking benchmarking.
+
+* Additional Features
+
+ * OPNFV Xtesting integration - Baremetal and Openstack.
+ * Analytics of metrics and logs using Jupyter notebooks.
+ * Custom Alarms from both metrics and logs.
+ * Container metrics collection.
+
+* Traffic Generators.
+
+ * Ixia - Support for using multiple instances of Traffic-generator.
+ * Ixia - Live results support (real-time collection and storage)
+ * TRex - ETSI-NFV GS-TST009 binary search with loss-verification support.
+
+* New Tools
+
+ * Kubernetes cluster deployment.
+ * TestVNF deployment in Openstack.
+ * Server-side telemetry collection from the test-environment.
+ * Version-1 of multi-dimensional TestVNF.
+
+* Multiple bugfixes and minor improvements
+
+ * matplotlib version and log-dump.
+ * VPP socket paths.
+ * Newer version of some python packages.
+
+
+OPNFV Iruya Release
+====================
+
+* Supported Versions - DPDK:18.11, OVS:2.12.0, VPP:19.08.1, QEMU:3.1.1
+* Few bugfixes and minor improvements
+
+* New Feature: Containers to manage VSPERF.
+
+ * VSPERF Containers for both deployment and test runs
+
+* Improvement
+
+ * Results Analysis to include all 5 types of data.
+
+ * Infrastructure data
+ * End-Of-Test Results
+ * Live-Results
+ * Events from VSPERF Logs
+ * Test Environment
+
+* Usability
+
+ * Configuration Wizard tool.
+
+
+OPNFV Hunter Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Few bugfixes and minor improvements
+
+* Traffic Generators
+
+ * Spirent - Live Results Support.
+ * T-Rex - Live Results Support.
+
+* Improvment
+
+ * Results container to receive logs from Logstash/Fluentd.
+
+* CI
+
+ * Bug Fixes.
+
+
+OPNFV Gambia Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Several bugfixes and minor improvements
+
+* Documentation
+
+ * Spirent Latency histogram documentation
+
+* Virtual-Switches
+
+ * OVS-Enhancement: default bridge name and offload support.
+ * OVS-Enhancement: proper deletion of flows and bridges after stop.
+ * VSPERF-vSwitch Architecture Improvement
+
+* Tools
+
+ * Pidstat improvements
+
+* Traffic Generators
+
+ * Xena Enhancements - multi-flow and stability.
+ * T-Rex Additions - burst traffic, scapy frame, customized scapy version.
+ * Ixia: Script enhancements.
+ * Spirent: Latency-histogram support included
+
+* Tests
+
+ * Continuous stream testcase
+ * Tunnelling protocol support
+ * Custom statistics
+ * Refactoring integration testcases
+
+* CI
+
+ * Reduced daily testscases
+
+OPNFV Fraser Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Pylint 1.8.2 code conformity
+* Python virtualenv moved to python-3.
+* LTD: Requirements specification for Soak/Long Duration Tests
+* Performance Matrix functionality support
+* Several bugfixes and minor improvements
+
+* Documentation
+
+ * Configuration and installation of additional tools.
+ * Xena install document update.
+ * Installation prerequisites update
+ * Traffic Capture methods explained
+
+* Virtual-Switches
+
+ * OVS: Configurable arguments for ovs-\*ctl
+ * OVS: Fix vswitch shutdown process
+ * VPP: Define vppctl socket name
+ * VPP: Multiqueue support for VPP
+ * OVS and VPP: Improve add_phy_port error messages
+ * OVS and VPP: Updated to recent version
+
+* Tools
+
+ * Support for Stressor-VMs as a Loadgen
+ * Support for collectd as one of the collectors
+ * Support for LLC management with Intel RMD
+
+* Traffic Generators
+
+ * All Traffic-Gens: Postponed call of connect operation.
+ * Ixia: Added support of LISTs in TRAFFIC
+ * T-Rex: Version v2.38 support added.
+ * T-Rex: Support for T-Rex Traffic generator in a VM.
+ * T-Rex: Add logic for dealing with high speed cards.
+ * T-Rex: Improve error handling.
+ * T-Rex: Added support for traffic capture.
+ * T-Rex: RFC2544 verification functionality included.
+ * T-Rex: Added learning packet option.
+ * T-Rex: Added packet counts for reporting
+ * T-Rex: Added multistream support
+ * T-Rex: Added promiscuous option for SRIOV tests
+ * T-Rex: RFC2544 Throughput bugfixing
+
+* Tests
+
+ * Tests with T-Rex in VM
+ * Improvements of step driven Testcases
+ * OVS/DPDK regression tests
+ * Traffic Capture testcases added.
+
+* Installation Scripts
+
+ * Support for SLES15 and openSuse Tumbleweed
+ * Fedora installation script update
+ * rhel_path_fix: Fix pathing issue introduce by other commit
+ * Updated build scripts for Centos and RHEL to python34
+
+* CI
+
+ * Update hugepages configuration
+ * Support disabling VPP tests, if required
OPNFV Euphrates Release
=======================
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..9fde2df2
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst b/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
index c88b80ed..671c7fd8 100644
--- a/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
+++ b/docs/testing/developer/devguide/design/trafficgen_integration_guide.rst
@@ -199,13 +199,20 @@ functions:
Note: There are parameters specific to testing of tunnelling protocols,
which are discussed in detail at :ref:`integration-tests` userguide.
+ Note: A detailed description of the ``TRAFFIC`` dictionary can be found at
+ :ref:`configuration-of-traffic-dictionary`.
+
* param **traffic_type**: One of the supported traffic types,
- e.g. **rfc2544_throughput**, **rfc2544_continuous**
- or **rfc2544_back2back**.
- * param **frame_rate**: Defines desired percentage of frame
- rate used during continuous stream tests.
+ e.g. **rfc2544_throughput**, **rfc2544_continuous**,
+ **rfc2544_back2back** or **burst**.
* param **bidir**: Specifies if generated traffic will be full-duplex
(true) or half-duplex (false).
+ * param **frame_rate**: Defines desired percentage of frame
+ rate used during continuous stream tests.
+ * param **burst_size**: Defines a number of frames in the single burst,
+ which is sent by burst traffic type. Burst size is applied for each
+ direction, i.e. the total number of tx frames will be 2*burst_size
+ in case of bidirectional traffic.
* param **multistream**: Defines number of flows simulated by traffic
generator. Value 0 disables MultiStream feature.
* param **stream_type**: Stream Type defines ISO OSI network layer
@@ -224,6 +231,8 @@ functions:
**dstport** and l4 on/off switch **enabled**.
* param **vlan**: A dictionary with vlan specific parameters,
e.g. **priority**, **cfi**, **id** and vlan on/off switch **enabled**.
+ * param **scapy**: A dictionary with definition of the frame content for both traffic
+ directions. The frame content is defined by a SCAPY notation.
* param **tests**: Number of times the test is executed.
* param **duration**: Duration of continuous test or per iteration duration
diff --git a/docs/testing/developer/devguide/design/vswitchperf_design.rst b/docs/testing/developer/devguide/design/vswitchperf_design.rst
index 33051493..5fa892e0 100644
--- a/docs/testing/developer/devguide/design/vswitchperf_design.rst
+++ b/docs/testing/developer/devguide/design/vswitchperf_design.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, AT&T, Tieto and others.
.. _vsperf-design:
@@ -23,7 +23,7 @@ Example Connectivity to DUT
Establish connectivity to the VSPERF DUT Linux host. If this is in an OPNFV lab
following the steps provided by `Pharos <https://www.opnfv.org/community/projects/pharos>`_
-to `access the POD <https://wiki.opnfv.org/display/pharos/Pharos+Lab+Support>`_
+to `access the POD <https://wiki.opnfv.org/display/INF/INFRA+Lab+Support>`_
The followign steps establish the VSPERF environment.
@@ -291,8 +291,8 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
.. code-block:: console
'traffic_type' - One of the supported traffic types.
- E.g. rfc2544_throughput, rfc2544_back2back
- or rfc2544_continuous
+ E.g. rfc2544_throughput, rfc2544_back2back,
+ rfc2544_continuous or burst
Data type: str
Default value: "rfc2544_throughput".
'bidir' - Specifies if generated traffic will be full-duplex (True)
@@ -304,6 +304,12 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
continuous stream tests.
Data type: int
Default value: 100.
+ 'burst_size' - Defines a number of frames in the single burst, which is sent
+ by burst traffic type. Burst size is applied for each direction,
+ i.e. the total number of tx frames will be 2*burst_size in case of
+ bidirectional traffic.
+ Data type: int
+ Default value: 100.
'multistream' - Defines number of flows simulated by traffic generator.
Value 0 disables multistream feature
Data type: int
@@ -326,7 +332,6 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
feature. If enabled, it will implicitly insert a flow
for each stream. If multistream is disabled, then
pre-installed flows will be ignored.
- Note: It is supported only for p2p deployment scenario.
Data type: str
Supported values:
"Yes" - flows will be inserted into OVS
@@ -415,6 +420,77 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
congestion (DEI header field).
Data type: int (NOTE: must fit to 1 bit)
Default value: 0
+ 'capture' - A dictionary with traffic capture configuration.
+ NOTE: It is supported only by T-Rex traffic generator.
+ 'enabled' - Specifies if traffic should be captured
+ Data type: bool
+ Default value: False
+ 'tx_ports' - A list of ports, where frames transmitted towards DUT will
+ be captured. Ports have numbers 0 and 1. TX packet capture
+ is disabled if list of ports is empty.
+ Data type: list
+ Default value: [0]
+ 'rx_ports' - A list of ports, where frames received from DUT will
+ be captured. Ports have numbers 0 and 1. RX packet capture
+ is disabled if list of ports is empty.
+ Data type: list
+ Default value: [1]
+ 'count' - A number of frames to be captured. The same count value
+ is applied to both TX and RX captures.
+ Data type: int
+ Default value: 1
+ 'filter' - An expression used to filter TX and RX packets. It uses the same
+ syntax as pcap library. See pcap-filter man page for additional
+ details.
+ Data type: str
+ Default value: ''
+ 'scapy' - A dictionary with definition of a frame content for both traffic
+ directions. The frame content is defined by a SCAPY notation.
+ NOTE: It is supported only by the T-Rex traffic generator.
+ Following keywords can be used to refer to the related parts of
+ the TRAFFIC dictionary:
+ Ether_src - refers to TRAFFIC['l2']['srcmac']
+ Ether_dst - refers to TRAFFIC['l2']['dstmac']
+ IP_proto - refers to TRAFFIC['l3']['proto']
+ IP_PROTO - refers to upper case version of TRAFFIC['l3']['proto']
+ IP_src - refers to TRAFFIC['l3']['srcip']
+ IP_dst - refers to TRAFFIC['l3']['dstip']
+ IP_PROTO_sport - refers to TRAFFIC['l4']['srcport']
+ IP_PROTO_dport - refers to TRAFFIC['l4']['dstport']
+ Dot1Q_prio - refers to TRAFFIC['vlan']['priority']
+ Dot1Q_id - refers to TRAFFIC['vlan']['cfi']
+ Dot1Q_vlan - refers to TRAFFIC['vlan']['id']
+ '0' - A string with the frame definition for the 1st direction.
+ Data type: str
+ Default value: 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})'
+ '1' - A string with the frame definition for the 2nd direction.
+ Data type: str
+ Default value: 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+ 'latency_histogram'
+ - A dictionary with definition of a latency histogram provision in results.
+ 'enabled' - Specifies if the histogram provisioning is enabled or not.
+ 'type' - Defines how histogram is provided. Currenty only 'Default' is defined.
+ 'Default' - Default histogram as provided by the Traffic-generator.
+ 'imix' - A dictionary for IMIX Specification.
+ 'enabled' - Specifies if IMIX is enabled or NOT.
+ 'type' - The specification type - denotes how IMIX is specified.
+ Currently only 'genome' type is defined.
+ Other types (ex: table-of-proportions) can be added in future.
+ 'genome' - The Genome Encoding of Pkt-Sizes and Ratio for IMIX.
+ The Ratio is inferred from the number of particular geneome characters
+ Genome encoding is described in RFC 6985. This specification is closest
+ to the method described in section 6.2 of RFC 6985.
+ Ex: 'aaaaaaaddddg' denotes ratio of 7:4:1 of packets sizes 64:512:1518.
+ Note: Exact-sequence is not maintained, only the ratio of packets
+ is ensured.
+ Data type: str
+ Default Value: 'aaaaaaaddddg'
.. _configuration-of-guest-options:
@@ -719,6 +795,13 @@ As it is able to forward traffic between multiple VM NIC pairs.
Note: In case of ``linux_bridge``, all NICs are connected to the same
bridge inside the VM.
+Note: In case that multistream feature is configured and ``pre_installed_flows``
+is set to ``Yes``, then stream specific flows will be inserted only for connections
+originating at physical ports. The rest of the flows will be based on port
+numbers only. The same logic applies in case that ``flow_type`` TRAFFIC option
+is set to ``ip``. This configuration will avoid a testcase malfunction if frame headers
+are modified inside VM (e.g. MAC swap or IP change).
+
VM, vSwitch, Traffic Generator Independence
===========================================
@@ -762,7 +845,7 @@ ITrafficGenerator
connect()
disconnect()
- send_burst_traffic(traffic, numpkts, time, framerate)
+ send_burst_traffic(traffic, time)
send_cont_traffic(traffic, time, framerate)
start_cont_traffic(traffic, time, framerate)
@@ -854,6 +937,10 @@ Vsperf uses a standard set of routing tables in order to allow tests to easily
mix and match Deployment Scenarios (PVP, P2P topology), Tuple Matching and
Frame Modification requirements.
+The usage of routing tables is driven by configuration parameter ``OVS_ROUTING_TABLES``.
+Routing tables are disabled by default (i.e. parameter is set to ``False``) for better
+comparison of results among supported vSwitches (e.g. OVS vs. VPP).
+
.. code-block:: console
+--------------+
diff --git a/docs/testing/developer/devguide/index.rst b/docs/testing/developer/devguide/index.rst
index 49659792..64a4758c 100644
--- a/docs/testing/developer/devguide/index.rst
+++ b/docs/testing/developer/devguide/index.rst
@@ -31,7 +31,7 @@ new techniques together. A new IETF benchmarking specification (RFC8204) is base
2015. VSPERF is also contributing to development of ETSI NFV test specifications through the Test and Open Source
Working Group.
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
@@ -43,7 +43,6 @@ Design Guides
.. toctree::
:caption: Traffic Gen Integration, VSPERF Design, Test Design, Test Plan
:maxdepth: 2
- :numbered:
./design/trafficgen_integration_guide.rst
./design/vswitchperf_design.rst
@@ -75,6 +74,3 @@ VSPERF CI Test Cases
:numbered:
CI Test cases run daily on the VSPERF Pharos POD for master and stable branches.
-
- ./results/scenario.rst
- ./results/results.rst
diff --git a/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst b/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
index ee7f98b5..10b07d54 100644
--- a/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
+++ b/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
@@ -13,7 +13,7 @@ informational RFC published by the IETF available here https://tools.ietf.org/ht
For more information about VSPERF refer to:
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
diff --git a/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst b/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
index e1372520..1ea99f7e 100644
--- a/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
+++ b/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
@@ -62,21 +62,21 @@ References
==========
* `RFC 1242 Benchmarking Terminology for Network Interconnection
- Devices <http://www.ietf.org/rfc/rfc1242.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc1242.txt>`__
* `RFC 2544 Benchmarking Methodology for Network Interconnect
- Devices <http://www.ietf.org/rfc/rfc2544.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2544.txt>`__
* `RFC 2285 Benchmarking Terminology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2285.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2285.txt>`__
* `RFC 2889 Benchmarking Methodology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2889.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2889.txt>`__
* `RFC 3918 Methodology for IP Multicast
- Benchmarking <http://www.ietf.org/rfc/rfc3918.txt>`__
+ Benchmarking <https://www.ietf.org/rfc/rfc3918.txt>`__
* `RFC 4737 Packet Reordering
- Metrics <http://www.ietf.org/rfc/rfc4737.txt>`__
+ Metrics <https://www.ietf.org/rfc/rfc4737.txt>`__
* `RFC 5481 Packet Delay Variation Applicability
- Statement <http://www.ietf.org/rfc/rfc5481.txt>`__
+ Statement <https://www.ietf.org/rfc/rfc5481.txt>`__
* `RFC 6201 Device Reset
- Characterization <http://tools.ietf.org/html/rfc6201>`__
+ Characterization <https://tools.ietf.org/html/rfc6201>`__
.. 3.2
@@ -413,7 +413,21 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoak
**Title**: RFC 2889 X% packet loss Max Forwarding Rate Soak Test
- **Prerequisite Test** LTD.Throughput.RFC2544.PacketLossRatio
+ **Prerequisite Tests**:
+
+ LTD.Throughput.RFC2544.PacketLossRatio will determine the offered load and
+ frame size for which the maximum theoretical throughput of the interface
+ has not been achieved. As described in RFC 2544 section 24, the final
+ determination of the benchmark SHOULD be conducted using a full length
+ trial, and for this purpose the duration is 5 minutes with zero loss ratio.
+
+ It is also essential to verify that the Traffic Generator has sufficient
+ stability to conduct Soak tests. Therefore, a prerequisite is to perform
+ this test with the DUT removed and replaced with a cross-over cable (or
+ other equivalent very low overhead method such as a loopback in a HW switch),
+ so that the traffic generator (and any other network involved) can be tested
+ over the Soak period. Note that this test may be challenging for software-
+ based traffic generators.
**Priority**:
@@ -422,12 +436,19 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoak
The aim of this test is to understand the Max Forwarding Rate stability
over an extended test duration in order to uncover any outliers. To allow
for an extended test duration, the test should ideally run for 24 hours
- or, if this is not possible, for at least 6 hours. For this test, each frame
- size must be sent at the highest Throughput rate with X% packet loss, as
- determined in the prerequisite test. The default loss percentages to be
- tested are: - X = 0% - X = 10^-7%
+ or if this is not possible, for at least 6 hours.
- Note: Other values can be tested if required by the user.
+ For this test, one frame size must be sent at the highest frame rate with
+ X% packet loss ratio, as determined in the prerequisite test (a short trial).
+ The loss ratio shall be measured and recorded every 5 minutes during the test
+ (it may be sufficient to collect lost frame counts and divide by the number
+ of frames sent in 5 minutes to see if a threshold has been crossed,
+ and accept some small inaccuracy in the threshold evaluation, not the result).
+ The default loss ratio is X = 0% and loss ratio > 10^-7% is the default
+ threshold to terminate the test early (or inform the test operator of
+ the failure status).
+
+ Note: Other values of X and loss threshold can be tested if required by the user.
**Expected Result**:
@@ -441,13 +462,13 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoak
and reporting any time intervals with packet loss. The
`RFC2889 <https://www.rfc-editor.org/rfc/rfc2289.txt>`__
Forwarding Rate shall be measured in each interval.
- An interval of 60s is suggested.
+ An interval of 300s is suggested.
- CPU and memory utilization may also be collected as part of this
test, to determine the vSwitch's performance footprint on the system.
- The `RFC5481 <https://www.rfc-editor.org/rfc/rfc5481.txt>`__
PDV form of delay variation on the traffic flow,
- using the 99th percentile.
+ using the 99th percentile, may also be collected.
.. 3.2.2.1.7
@@ -457,7 +478,22 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoakFrameModification
**Title**: RFC 2889 Max Forwarding Rate Soak Test with Frame Modification
**Prerequisite Test**:
+
LTD.Throughput.RFC2544.PacketLossRatioFrameModification (0% Packet Loss)
+ will determine the offered load and
+ frame size for which the maximum theoretical throughput of the interface
+ has not been achieved. As described in RFC 2544 section 24, the final
+ determination of the benchmark SHOULD be conducted using a full length
+ trial, and for this purpose the duration is 5 minutes with zero loss ratio.
+
+ It is also essential to verify that the Traffic Generator has sufficient
+ stability to conduct Soak tests. Therefore, a prerequisite is to perform
+ this test with the DUT removed and replaced with a cross-over cable (or
+ other equivalent very low overhead method such as a loopback in a HW switch),
+ so that the traffic generator (and any other network involved) can be tested
+ over the Soak period. Note that this test may be challenging for software-
+ based traffic generators.
+
**Priority**:
@@ -466,9 +502,19 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoakFrameModification
The aim of this test is to understand the Max Forwarding Rate stability over an
extended test duration in order to uncover any outliers. To allow for an
extended test duration, the test should ideally run for 24 hours or, if
- this is not possible, for at least 6 hour. For this test, each frame
- size must be sent at the highest Throughput rate with 0% packet loss, as
- determined in the prerequisite test.
+ this is not possible, for at least 6 hours.
+
+ For this test, one frame size must be sent at the highest frame rate with
+ X% packet loss ratio, as determined in the prerequisite test (a short trial).
+ The loss ratio shall be measured and recorded every 5 minutes during the test
+ (it may be sufficient to collect lost frame counts and divide by the number
+ of frames sent in 5 minutes to see if a threshold has been crossed,
+ and accept some small inaccuracy in the threshold evaluation, not the result).
+ The default loss ratio is X = 0% and loss ratio > 10^-7% is the default
+ threshold to terminate the test early (or inform the test operator of
+ the failure status).
+
+ Note: Other values of X and loss threshold can be tested if required by the user.
During this test, the DUT must perform the following operations on the
traffic flow:
@@ -498,13 +544,13 @@ Test ID: LTD.Throughput.RFC2889.MaxForwardingRateSoakFrameModification
and reporting any time intervals with packet loss. The
`RFC2889 <https://www.rfc-editor.org/rfc/rfc2289.txt>`__
Forwarding Rate shall be measured in each interval.
- An interval of 60s is suggested.
+ An interval of 300s is suggested.
- CPU and memory utilization may also be collected as part of this
test, to determine the vSwitch's performance footprint on the system.
- The `RFC5481 <https://www.rfc-editor.org/rfc/rfc5481.txt>`__
PDV form of delay variation on the traffic flow, using the 99th
- percentile.
+ percentile, may also be collected.
.. 3.2.2.1.8
@@ -1150,7 +1196,22 @@ Test ID: LTD.PacketDelayVariation.RFC3393.Soak
**Title**: Packet Delay Variation Soak Test
- **Prerequisite Tests**: LTD.Throughput.RFC2544.PacketLossRatio (0% Packet Loss)
+ **Prerequisite Tests**:
+
+ LTD.Throughput.RFC2544.PacketLossRatio will determine the offered load and
+ frame size for which the maximum theoretical throughput of the interface
+ has not been achieved. As described in RFC 2544 section 24, the final
+ determination of the benchmark SHOULD be conducted using a full length
+ trial, and for this purpose the duration is 5 minutes with zero loss ratio.
+
+ It is also essential to verify that the Traffic Generator has sufficient
+ stability to conduct Soak tests. Therefore, a prerequisite is to perform
+ this test with the DUT removed and replaced with a cross-over cable (or
+ other equivalent very low overhead method such as a loopback in a HW switch),
+ so that the traffic generator (and any other network involved) can be tested
+ over the Soak period. Note that this test may be challenging for software-
+ based traffic generators.
+
**Priority**:
@@ -1160,9 +1221,20 @@ Test ID: LTD.PacketDelayVariation.RFC3393.Soak
variation for different frame sizes over an extended test duration and
to determine if there are any outliers. To allow for an extended test
duration, the test should ideally run for 24 hours or, if this is not
- possible, for at least 6 hour. For this test, each frame size must be
- sent at the highest possible throughput with 0% packet loss, as
- determined in the prerequisite test.
+ possible, for at least 6 hours.
+
+ For this test, one frame size must be sent at the highest frame rate with
+ X% packet loss ratio, as determined in the prerequisite test (a short trial).
+ The loss ratio shall be measured and recorded every 5 minutes during the test
+ (it may be sufficient to collect lost frame counts and divide by the number
+ of frames sent in 5 minutes to see if a threshold has been crossed,
+ and accept some small inaccuracy in the threshold evaluation, not the result).
+ The default loss ratio is X = 0% and loss ratio > 10^-7% is the default
+ threshold to terminate the test early (or inform the test operator of
+ the failure status).
+
+ Note: Other values of X and loss threshold can be tested if required by the user.
+
**Expected Result**:
@@ -1173,7 +1245,7 @@ Test ID: LTD.PacketDelayVariation.RFC3393.Soak
- The packet delay variation value for traffic passing through the DUT.
- The `RFC5481 <https://www.rfc-editor.org/rfc/rfc5481.txt>`__
PDV form of delay variation on the traffic flow,
- using the 99th percentile, for each 60s interval during the test.
+ using the 99th percentile, for each 300s interval during the test.
- CPU and memory utilization may also be collected as part of this
test, to determine the vSwitch's performance footprint on the system.
diff --git a/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst b/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
index e5147bea..c0b63859 100644
--- a/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
+++ b/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
@@ -63,21 +63,21 @@ References
===============
* `RFC 1242 Benchmarking Terminology for Network Interconnection
- Devices <http://www.ietf.org/rfc/rfc1242.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc1242.txt>`__
* `RFC 2544 Benchmarking Methodology for Network Interconnect
- Devices <http://www.ietf.org/rfc/rfc2544.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2544.txt>`__
* `RFC 2285 Benchmarking Terminology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2285.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2285.txt>`__
* `RFC 2889 Benchmarking Methodology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2889.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2889.txt>`__
* `RFC 3918 Methodology for IP Multicast
- Benchmarking <http://www.ietf.org/rfc/rfc3918.txt>`__
+ Benchmarking <https://www.ietf.org/rfc/rfc3918.txt>`__
* `RFC 4737 Packet Reordering
- Metrics <http://www.ietf.org/rfc/rfc4737.txt>`__
+ Metrics <https://www.ietf.org/rfc/rfc4737.txt>`__
* `RFC 5481 Packet Delay Variation Applicability
- Statement <http://www.ietf.org/rfc/rfc5481.txt>`__
+ Statement <https://www.ietf.org/rfc/rfc5481.txt>`__
* `RFC 6201 Device Reset
- Characterization <http://tools.ietf.org/html/rfc6201>`__
+ Characterization <https://tools.ietf.org/html/rfc6201>`__
.. 3.1.4
@@ -633,7 +633,7 @@ General Methodology:
--------------------------
To establish the baseline performance of the virtual switch, tests would
initially be run with a simple workload in the VNF (the recommended
-simple workload VNF would be `DPDK <http://www.dpdk.org/>`__'s testpmd
+simple workload VNF would be `DPDK <https://www.dpdk.org/>`__'s testpmd
application forwarding packets in a VM or vloop\_vnf a simple kernel
module that forwards traffic between two network interfaces inside the
virtualized environment while bypassing the networking stack).
@@ -656,7 +656,7 @@ tests:
- Reference application: Simple forwarding or Open Source VNF.
- Frame size (bytes): 64, 128, 256, 512, 1024, 1280, 1518, 2K, 4k OR
Packet size based on use-case (e.g. RTP 64B, 256B) OR Mix of packet sizes as
- maintained by the Functest project <https://wiki.opnfv.org/traffic_profile_management>.
+ maintained by the Functest project <https://wiki.opnfv.org/display/functest/Traffic+Profile+Management>.
- Reordering check: Tests should confirm that packets within a flow are
not reordered.
- Duplex: Unidirectional / Bidirectional. Default: Full duplex with
diff --git a/docs/testing/developer/devguide/results/scenario.rst b/docs/testing/developer/devguide/results/scenario.rst
index dbdc7877..f7eadd33 100644
--- a/docs/testing/developer/devguide/results/scenario.rst
+++ b/docs/testing/developer/devguide/results/scenario.rst
@@ -34,7 +34,7 @@ Deployment topologies:
Loopback applications in the Guest:
-* `DPDK testpmd <http://dpdk.org/doc/guides/testpmd_app_ug/index.html>`_.
+* `DPDK testpmd <http://doc.dpdk.org/guides/testpmd_app_ug/index.html>`_.
* Linux Bridge.
* :ref:`l2fwd-module`
diff --git a/docs/testing/user/configguide/index.rst b/docs/testing/user/configguide/index.rst
index 83908a97..87c32d11 100644
--- a/docs/testing/user/configguide/index.rst
+++ b/docs/testing/user/configguide/index.rst
@@ -31,7 +31,7 @@ new techniques together. A new IETF benchmarking specification (RFC8204) is base
2015. VSPERF is also contributing to development of ETSI NFV test specifications through the Test and Open Source
Working Group.
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
@@ -48,6 +48,7 @@ VSPERF Install and Configuration
./installation.rst
./upgrade.rst
./trafficgen.rst
+ ./tools.rst
=================
VSPERF Test Guide
@@ -56,10 +57,10 @@ VSPERF Test Guide
.. toctree::
:caption: VSPERF Test Execution
:maxdepth: 2
- :numbered:
../userguide/testusage.rst
../userguide/teststeps.rst
../userguide/integration.rst
+ ../userguide/trafficcapture.rst
../userguide/yardstick.rst
../userguide/testlist.rst
diff --git a/docs/testing/user/configguide/installation.rst b/docs/testing/user/configguide/installation.rst
index 7f4d640b..b950442e 100644
--- a/docs/testing/user/configguide/installation.rst
+++ b/docs/testing/user/configguide/installation.rst
@@ -53,6 +53,7 @@ Supported Operating Systems
* SLES 15
* RedHat 7.2 Enterprise Linux
* RedHat 7.3 Enterprise Linux
+* RedHat 7.5 Enterprise Linux
* Ubuntu 14.04
* Ubuntu 16.04
* Ubuntu 16.10 (kernel 4.8 requires DPDK 16.11 and newer)
@@ -166,8 +167,12 @@ repository provided by Software Collections (`a link`_). The installation script
will also use `virtualenv`_ to create a vsperf virtual environment, which is
isolated from the default Python environment, using the Python3 package located
in **/usr/bin/python3**. This environment will reside in a directory called
-**vsperfenv** in $HOME. It will ensure, that system wide Python installation
- is not modified or broken by VSPERF installation. The complete list of Python
+**vsperfenv** in $HOME.
+
+It will ensure, that system wide Python installation is not modified or
+broken by VSPERF installation.
+
+The complete list of Python
packages installed inside virtualenv can be found in the file
``requirements.txt``, which is located at the vswitchperf repository.
@@ -176,6 +181,11 @@ built from upstream source due to kernel incompatibilities. Please see the
instructions in the vswitchperf_design document for details on configuring
OVS Vanilla for binary package usage.
+**NOTE:** For RHEL 7.5 Enterprise DPDK and Openvswitch are not built from
+upstream sources due to kernel incompatibilities. Please use subscription
+channels to obtain binary equivalents of openvswitch and dpdk packages or
+build binaries using instructions from openvswitch.org and dpdk.org.
+
.. _vpp-installation:
VPP installation
@@ -202,7 +212,7 @@ new shell session. Its activation is specific to your OS:
.. code:: bash
- $ scl enable python33 bash
+ $ scl enable rh-python34 bash
$ source $HOME/vsperfenv/bin/activate
* Fedora and Ubuntu
@@ -260,8 +270,8 @@ running any of the above. For example:
export http_proxy=proxy.mycompany.com:123
export https_proxy=proxy.mycompany.com:123
-.. _a link: http://www.softwarecollections.org/en/scls/rhscl/python33/
-.. _virtualenv: https://virtualenv.readthedocs.org/en/latest/
+.. _a link: https://www.softwarecollections.org/en/scls/rhscl/python33/
+.. _virtualenv: https://virtualenv.pypa.io/en/latest/
.. _vloop-vnf-ubuntu-14.04_20160823: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160823.qcow2
.. _vloop-vnf-ubuntu-14.04_20160804: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160804.qcow2
.. _vloop-vnf-ubuntu-14.04_20160303: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160303.qcow2
@@ -320,7 +330,7 @@ to your OS documentation to set hugepages correctly. It is recommended to set
the required amount of hugepages to be allocated by default on reboots.
Information on hugepage requirements for dpdk can be found at
-http://dpdk.org/doc/guides/linux_gsg/sys_reqs.html
+http://doc.dpdk.org/guides/linux_gsg/sys_reqs.html
You can review your hugepage amounts by executing the following command
@@ -350,7 +360,7 @@ default on the Linux DUT
VSPerf recommends the latest tuned-adm package, which can be downloaded from the
following location:
-http://www.tuned-project.org/2017/04/27/tuned-2-8-0-released/
+https://github.com/redhat-performance/tuned/releases
Follow the instructions to install the latest tuned-adm onto your system. For
current RHEL customers you should already have the most current version. You
diff --git a/docs/testing/user/configguide/tools.rst b/docs/testing/user/configguide/tools.rst
new file mode 100644
index 00000000..72e515fa
--- /dev/null
+++ b/docs/testing/user/configguide/tools.rst
@@ -0,0 +1,227 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, Spirent, AT&T and others.
+
+.. _additional-tools-configuration:
+
+=============================================
+'vsperf' Additional Tools Configuration Guide
+=============================================
+
+Overview
+--------
+
+VSPERF supports the following categories additional tools:
+
+ * `Infrastructure Metrics Collectors`_
+ * `Load Generators`_
+ * `L3 Cache Management`_
+
+Under each category, there are one or more tools supported by VSPERF.
+This guide provides the details of how to install (if required)
+and configure the above mentioned tools.
+
+.. _`Infrastructure Metrics Collectors`:
+
+Infrastructure Metrics Collection
+---------------------------------
+
+VSPERF supports following two tools for collecting and reporting the metrics:
+
+* pidstat
+* collectd
+
+*pidstat* is a command in linux systems, which is used for monitoring individual
+tasks currently being managed by Linux kernel. In VSPERF this command is used to
+monitor *ovs-vswitchd*, *ovsdb-server* and *kvm* processes.
+
+*collectd* is linux application that collects, stores and transfers various system
+metrics. For every category of metrics, there is a separate plugin in collectd. For
+example, CPU plugin and Interface plugin provides all the cpu metrics and interface
+metrics, respectively. CPU metrics may include user-time, system-time, etc., whereas
+interface metrics may include received-packets, dropped-packets, etc.
+
+Installation
+^^^^^^^^^^^^
+
+No installation is required for *pidstat*, whereas, collectd has to be installed
+separately. For installation of collectd, we recommend to follow the process described
+in *OPNFV-Barometer* project, which can be found here `Barometer <https://opnfv-barometer.readthedocs.io/en/latest/release/userguide>`_
+recent release.
+
+VSPERF assumes that collectd is installed and configured to send metrics over localhost.
+The metrics sent should be for the following categories: CPU, Processes, Interface,
+OVS, DPDK, Intel-RDT.
+
+For multicmd, apart from collectd, installation of PROX is also necessary.
+Installation steps for PROX can be found here - `DPPD-PROX <https://github.com/opnfv/samplevnf/tree/master/VNFs/DPPD-PROX>`_
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for the collectors can be found in **conf/05_collector.conf**.
+*pidstat* specific configuration includes:
+
+* ``PIDSTAT_MONITOR`` - processes to be monitored by pidstat
+* ``PIDSTAT_OPTIONS`` - options which will be passed to pidstat command
+* ``PIDSTAT_SAMPLE_INTERVAL`` - sampling interval used by pidstat to collect statistics
+* ``LOG_FILE_PIDSTAT`` - prefix of pidstat's log file
+
+The *collectd* configuration option includes:
+
+* ``COLLECTD_IP`` - IP address where collectd is running
+* ``COLLECTD_PORT`` - Port number over which collectd is sending the metrics
+* ``COLLECTD_SECURITY_LEVEL`` - Security level for receiving metrics
+* ``COLLECTD_AUTH_FILE`` - Authentication file for receiving metrics
+* ``LOG_FILE_COLLECTD`` - Prefix for collectd's log file.
+* ``COLLECTD_CPU_KEYS`` - Interesting metrics from CPU
+* ``COLLECTD_PROCESSES_KEYS`` - Interesting metrics from processes
+* ``COLLECTD_INTERFACE_KEYS`` - Interesting metrics from interface
+* ``COLLECTD_OVSSTAT_KEYS`` - Interesting metrics from OVS
+* ``COLLECTD_DPDKSTAT_KEYS`` - Interesting metrics from DPDK.
+* ``COLLECTD_INTELRDT_KEYS`` - Interesting metrics from Intel-RDT
+* ``COLLECTD_INTERFACE_XKEYS`` - Metrics to exclude from Interface
+* ``COLLECTD_INTELRDT_XKEYS`` - Metrics to exclude from Intel-RDT
+* ``MC_COLLECTD_CSV`` - Path where collectd writes its metrics as CSV.
+* ``MC_COLLECTD_CMD`` - Path where Collectd is installed
+* ``MC_PROX_HOME`` - Path where PROX-IRQ is installed.
+* ``MC_PROX_CMD`` - Command to run PROX-IRQ
+* ``MC_PROX_OUT`` - Output file generated by PROX-IRQ stats collector.
+* ``MC_CRON_OUT`` - Output file path of the command run through CROND
+* ``MC_BEAT_CFILE`` - Filebeat configuration file path.
+
+
+.. _`Load Generators`:
+
+
+Load Generation
+---------------
+
+In VSPERF, load generation refers to creating background cpu and memory loads to
+study the impact of these loads on system under test. There are two options to
+create loads in VSPERF. These options are used for different use-cases. The options are:
+
+* stress or stress-ng
+* Stressor-VMs
+
+*stress and stress-ng* are linux tools to stress the system in various ways.
+It can stress different subsystems such as CPU and memory. *stress-ng* is the
+improvised version of *stress*. StressorVMs are custom build virtual-machines
+for the noisy-neighbor use-cases.
+
+Installation
+^^^^^^^^^^^^
+
+stress and stress-ng can be installed through standard linux installation process.
+Information about stress-ng, including the steps for installing can be found
+here: `stress-ng <https://github.com/ColinIanKing/stress-ng>`_
+
+There are two options for StressorVMs - one is VMs based on stress-ng and second
+is VM based on Spirent's cloudstress. VMs based on stress-ng can be found in this
+`link <https://github.com/opensource-tnbt/stressng-images>`_ . Spirent's cloudstress
+based VM can be downloaded from this `site <https://github.com/spirent/cloudstress>`_
+
+These stressorVMs are of OSV based VMs, which are very small in size. Download
+these VMs and place it in appropriate location, and this location will used in
+the configuration - as mentioned below.
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for loadgens can be found in **conf/07_loadgen.conf**.
+There are no specific configurations for stress and stress-ng commands based
+load-generation. However, for StressorVMs, following configurations apply:
+
+* ``NN_COUNT`` - Number of stressor VMs required.
+* ``NN_MEMORY`` - Comma separated memory configuration for each VM
+* ``NN_SMP`` - Comma separated configuration for each VM
+* ``NN_IMAGE`` - Comma separated list of Paths for each VM image
+* ``NN_SHARED_DRIVE_TYPE`` - Comma separated list of shaed drive type for each VM
+* ``NN_BOOT_DRIVE_TYPE`` - Comma separated list of boot drive type for each VM
+* ``NN_CORE_BINDING`` - Comma separated lists of list specifying the cores associated with each VM.
+* ``NN_NICS_NR`` - Comma seprated list of number of NICS for each VM
+* ``NN_BASE_VNC_PORT`` - Base VNC port Index.
+* ``NN_LOG_FILE`` - Name of the log file
+
+.. _`L3 Cache Management`:
+
+Last Level Cache Management
+---------------------------
+
+VSPERF support last-level cache management using Intel's RDT tool(s) - the
+relavant ones are `Intel CAT-CMT <https://github.com/intel/intel-cmt-cat>`_ and
+`Intel RMD <https://github.com/intel/rmd>`_. RMD is a linux daemon that runs on
+individual hosts, and provides a REST API for control/orchestration layer to
+request LLC for the VMs/Containers/Applications. RDT receives resource policy
+form orchestration layer - in this case, from VSPERF - and enforce it on the host.
+It achieves this enforcement via kernel interfaces such as resctrlfs and libpqos.
+The resource here refer to the last-level cache. User can configure policies to
+define how much of cache a CPU can get. The policy configuration is described below.
+
+Installation
+^^^^^^^^^^^^
+
+For installation of RMD tool, please install CAT-CMT first and then install RMD.
+The details of installation can be found here: `Intel CAT-CMT <https://github.com/intel/intel-cmt-cat>`_
+and `Intel RMD <https://github.com/intel/rmd>`_
+
+Configuration
+^^^^^^^^^^^^^
+
+The configuration file for cache management can be found in **conf/08_llcmanagement.conf**.
+
+VSPERF provides following configuration options, for user to define and enforce policies via RMD.
+
+* ``LLC_ALLOCATION`` - Enable or Disable LLC management.
+* ``RMD_PORT`` - RMD port (port number on which API server is listening)
+* ``RMD_SERVER_IP`` - IP address where RMD is running. Currently only localhost.
+* ``RMD_API_VERSION`` - RMD version. Currently it is 'v1'
+* ``POLICY_TYPE`` - Specify how the policy is defined - either COS or CUSTOM
+* ``VSWITCH_COS`` - Class of service (CoS for Vswitch. CoS can be gold, silver-bf or bronze-shared.
+* ``VNF_COS`` - Class of service for VNF
+* ``PMD_COS`` - Class of service for PMD
+* ``NOISEVM_COS`` - Class of service of Noisy VM.
+* ``VSWITCH_CA`` - [min-cache-value, maxi-cache-value] for vswitch
+* ``VNF_CA`` - [min-cache-value, max-cache-value] for VNF
+* ``PMD_CA`` - [min-cache-value, max-cache-value] for PMD
+* ``NOISEVM_CA`` - [min-cache-value, max-cache-value] for Noisy VM
+
+VSPERF Containers
+-----------------
+
+VSPERF containers are found in tools/docker folder.
+
+RESULTS CONTAINER
+^^^^^^^^^^^^^^^^^
+
+The results container includes multiple services - ELK Stack, Barometer-Grafana, OPNFV-TestAPI & Jupyter.
+
+Pre-Deployment Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Set the limit on mmap counts equal to 262144 or more.
+ You can do this by the command - ``sysctl -w vm.max_map_count = 262144``.
+ Or to set it permanently, update the ``vm.max_map_count`` field in ``/etc/sysctl.conf``.
+
+2. You may want to modify the IP address from 0.0.0.0 to appropriate host-ip in ``docker-compose.yml``
+
+3. Please add dashboards folder from OPNFV-Barometer-Grafana into the grafana folder. It can be found in `Barometer Grafana <https://github.com/opnfv/barometer/tree/master/docker/barometer-grafana`
+
+Build
+~~~~~
+
+Run ``docker-compose build`` command to build the container.
+
+Run
+~~~
+
+Run the container with ``docker-compose up`` command.
+
+Post-Deployment Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The directory ``resultsdb`` contains the source from Dovetail/Dovetail-webportal project.
+Once the results container is deployed, please run the python script as follows, to ensure that results can be
+pushed and queried correctly - ``python init_db.py host_ip_address testapi_port``.
+For example, if the host on which the container is running is 10.10.120.22, and container is exposing 8000 as the port,
+the command should be: ``python init_db.py 10.10.120.22 8000``
diff --git a/docs/testing/user/configguide/trafficgen.rst b/docs/testing/user/configguide/trafficgen.rst
index 91c4084e..3bb09d52 100644
--- a/docs/testing/user/configguide/trafficgen.rst
+++ b/docs/testing/user/configguide/trafficgen.rst
@@ -39,12 +39,14 @@ and is configured as follows:
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
+ 'burst_size' : 100,
'bidir' : 'True', # will be passed as string in title format to tgen
'multistream' : 0,
'stream_type' : 'L4',
'pre_installed_flows' : 'No', # used by vswitch implementation
'flow_type' : 'port', # used by vswitch implementation
-
+ 'flow_control' : False, # supported only by IxNet
+ 'learning_frames' : True, # supported only by IxNet
'l2': {
'framesize': 64,
'srcmac': '00:00:00:00:00:00',
@@ -67,8 +69,38 @@ and is configured as follows:
'priority': 0,
'cfi': 0,
},
+ 'capture': {
+ 'enabled': False,
+ 'tx_ports' : [0],
+ 'rx_ports' : [1],
+ 'count': 1,
+ 'filter': '',
+ },
+ 'scapy': {
+ 'enabled': False,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+ },
+ 'latency_histogram': {
+ 'enabled': False,
+ 'type': 'Default',
+ },
+ 'imix': {
+ 'enabled': True,
+ 'type': 'genome',
+ 'genome': 'aaaaaaaddddg',
+ },
}
+A detailed description of the ``TRAFFIC`` dictionary can be found at
+:ref:`configuration-of-traffic-dictionary`.
+
The framesize parameter can be overridden from the configuration
files by adding the following to your custom configuration file
``10_custom.conf``:
@@ -92,6 +124,13 @@ commandline above to:
$ ./vsperf --test-params "TRAFFICGEN_PKT_SIZES=(x,y);TRAFFICGEN_DURATION=10;" \
"TRAFFICGEN_RFC2544_TESTS=1" $TESTNAME
+If you use imix, set the TRAFFICGEN_PKT_SIZES to 0.
+
+.. code-block:: console
+
+ TRAFFICGEN_PKT_SIZES = (0,)
+
+
.. _trafficgen-dummy:
Dummy
@@ -368,7 +407,7 @@ Spirent Setup
Spirent installation files and instructions are available on the
Spirent support website at:
-http://support.spirent.com
+https://support.spirent.com
Select a version of Spirent TestCenter software to utilize. This example
will use Spirent TestCenter v4.57 as an example. Substitute the appropriate
@@ -420,7 +459,7 @@ STC ReST API. Basic ReST functionality is provided by the resthttp module,
and may be used for writing ReST clients independent of STC.
- Project page: <https://github.com/Spirent/py-stcrestclient>
-- Package download: <http://pypi.python.org/pypi/stcrestclient>
+- Package download: <https://pypi.python.org/project/stcrestclient>
To use REST interface, follow the instructions in the Project page to
install the package. Once installed, the scripts named with 'rest' keyword
@@ -543,6 +582,22 @@ Note that 'FORWARDING_RATE_FPS', 'CACHING_CAPACITY_ADDRS',
'ADDR_LEARNED_PERCENT' and 'OPTIMAL_LEARNING_RATE_FPS' are the new
result-constants added to support RFC2889 tests.
+4. Latency Histogram. To enable latency histogram as in results,
+enable latency_histogram in conf/03_traffic.conf.
+
+.. code-block:: python
+
+ 'Latency_hisotgram':
+ {
+ "enabled": True,
+ "tpe": "Default,
+ }
+
+Once, enabled, a 'Histogram.csv' file will be generated in the results folder.
+The Histogram.csv will include latency histogram in the following order.
+(a) Packet size (b) Ranges in 10ns (c) Packet counts. These set of 3 lines,
+will be repeated for every packet-sizes.
+
.. _`Xena Networks`:
Xena Networks
@@ -563,13 +618,13 @@ support contract.
To execute the Xena2544.exe file under Linux distributions the mono-complete
package must be installed. To install this package follow the instructions
below. Further information can be obtained from
-http://www.mono-project.com/docs/getting-started/install/linux/
+https://www.mono-project.com/docs/getting-started/install/linux/
.. code-block:: console
rpm --import "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF"
yum-config-manager --add-repo http://download.mono-project.com/repo/centos/
- yum -y install mono-complete
+ yum -y install mono-complete-5.8.0.127-0.xamarin.3.epel7.x86_64
To prevent gpg errors on future yum installation of packages the mono-project
repo should be disabled once installed.
@@ -659,6 +714,14 @@ or modify the length of the learning by modifying the following settings.
TRAFFICGEN_XENA_CONT_PORT_LEARNING_ENABLED = False
TRAFFICGEN_XENA_CONT_PORT_LEARNING_DURATION = 3
+Multistream Modifier
+~~~~~~~~~~~~~~~~~~~~
+
+Xena has a modifier maximum value or 64k in size. For this reason when specifying
+Multistream values of greater than 64k for Layer 2 or Layer 3 it will use two
+modifiers that may be modified to a value that can be square rooted to create the
+two modifiers. You will see a log notification for the new value that was calculated.
+
MoonGen
-------
@@ -691,7 +754,7 @@ trafficgen.lua
Follow MoonGen set up and execution instructions here:
-https://github.com/atheurer/lua-trafficgen/blob/master/README.md
+https://github.com/atheurer/trafficgen/blob/master/README.md
Note one will need to set up ssh login to not use passwords between the server
running MoonGen and the device under test (running the VSPERF test
@@ -737,11 +800,14 @@ You can directly download from GitHub:
git clone https://github.com/cisco-system-traffic-generator/trex-core
-and use the master branch:
+and use the same Trex version for both server and client API.
+
+**NOTE:** The Trex API version used by VSPERF is defined by variable ``TREX_TAG``
+in file ``src/package-list.mk``.
.. code-block:: console
- git checkout master
+ git checkout v2.38
or Trex latest release you can download from here:
@@ -783,6 +849,17 @@ It is neccesary for proper connection between Trex server and VSPERF.
cd trex-core/scripts/
./t-rex-64 -i
+**NOTE:** Please check your firewall settings at both DUT and T-Rex server.
+Firewall must allow a connection from DUT (VSPERF) to the T-Rex server running
+at TCP port 4501.
+
+**NOTE:** For high speed cards it may be advantageous to start T-Rex with more transmit queues/cores.
+
+.. code-block:: console
+
+ cd trex-cores/scripts/
+ ./t-rex-64 -i -c 10
+
For additional information about Trex stateless mode see Trex stateless documentation:
https://trex-tgn.cisco.com/trex/doc/trex_stateless.html
@@ -835,6 +912,21 @@ place. This can be adjusted with the following configurations:
TRAFFICGEN_TREX_LEARNING_MODE=True
TRAFFICGEN_TREX_LEARNING_DURATION=5
+Latency measurements have impact on T-Rex performance. Thus vswitchperf uses a separate
+latency stream for each direction with limited speed. This workaround is used for RFC2544
+**Throughput** and **Continuous** traffic types. In case of **Burst** traffic type,
+the latency statistics are measured for all frames in the burst. Collection of latency
+statistics is driven by configuration option ``TRAFFICGEN_TREX_LATENCY_PPS`` as follows:
+
+ * value ``0`` - disables latency measurements
+ * non zero integer value - enables latency measurements; In case of Throughput
+ and Continuous traffic types, it specifies a speed of latency specific stream
+ in PPS. In case of burst traffic type, it enables latency measurements for all frames.
+
+.. code-block:: console
+
+ TRAFFICGEN_TREX_LATENCY_PPS = 1000
+
SR-IOV and Multistream layer 2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
T-Rex by default only accepts packets on the receive side if the destination mac matches the
@@ -850,6 +942,22 @@ modified. Enable Promiscuous mode when doing multistream at layer 2 testing with
TRAFFICGEN_TREX_PROMISCUOUS=True
+Card Bandwidth Options
+~~~~~~~~~~~~~~~~~~~~~~
+
+T-Rex API will attempt to retrieve the highest possible speed from the card using internal
+calls to port information. If you are using two separate cards then it will take the lowest
+of the two cards as the max speed. If necessary you can try to force the API to use a
+specific maximum speed per port. The below configurations can be adjusted to enable this.
+
+.. code-block:: console
+
+ TRAFFICGEN_TREX_FORCE_PORT_SPEED = True
+ TRAFFICGEN_TREX_PORT_SPEED = 40000 # 40 gig
+
+**Note::** Setting higher than possible speeds will result in unpredictable behavior when running
+tests such as duration inaccuracy and/or complete test failure.
+
RFC2544 Validation
~~~~~~~~~~~~~~~~~~
@@ -869,3 +977,68 @@ The duration and maximum number of attempted verification trials can be set to c
behavior of this step. If the verification step fails, it will resume the binary search
with new values where the maximum output will be the last attempted frame rate minus the
current set thresh hold.
+
+Scapy frame definition
+~~~~~~~~~~~~~~~~~~~~~~
+
+It is possible to use a SCAPY frame definition to generate various network protocols
+by the **T-Rex** traffic generator. In case that particular network protocol layer
+is disabled by the TRAFFIC dictionary (e.g. TRAFFIC['vlan']['enabled'] = False),
+then disabled layer will be removed from the scapy format definition by VSPERF.
+
+The scapy frame definition can refer to values defined by the TRAFFIC dictionary
+by following keywords. These keywords are used in next examples.
+
+* ``Ether_src`` - refers to ``TRAFFIC['l2']['srcmac']``
+* ``Ether_dst`` - refers to ``TRAFFIC['l2']['dstmac']``
+* ``IP_proto`` - refers to ``TRAFFIC['l3']['proto']``
+* ``IP_PROTO`` - refers to upper case version of ``TRAFFIC['l3']['proto']``
+* ``IP_src`` - refers to ``TRAFFIC['l3']['srcip']``
+* ``IP_dst`` - refers to ``TRAFFIC['l3']['dstip']``
+* ``IP_PROTO_sport`` - refers to ``TRAFFIC['l4']['srcport']``
+* ``IP_PROTO_dport`` - refers to ``TRAFFIC['l4']['dstport']``
+* ``Dot1Q_prio`` - refers to ``TRAFFIC['vlan']['priority']``
+* ``Dot1Q_id`` - refers to ``TRAFFIC['vlan']['cfi']``
+* ``Dot1Q_vlan`` - refers to ``TRAFFIC['vlan']['id']``
+
+In following examples of SCAPY frame definition only relevant parts of TRAFFIC
+dictionary are shown. The rest of the TRAFFIC dictionary is set to default values
+as they are defined in ``conf/03_traffic.conf``.
+
+Please check official documentation of SCAPY project for details about SCAPY frame
+definition and supported network layers at: https://scapy.net
+
+#. Generate ICMP frames:
+
+ .. code-block:: console
+
+ 'scapy': {
+ 'enabled': True,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/IP(proto="icmp", src={IP_src}, dst={IP_dst})/ICMP()',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/IP(proto="icmp", src={IP_dst}, dst={IP_src})/ICMP()',
+ }
+
+#. Generate IPv6 ICMP Echo Request
+
+ .. code-block:: console
+
+ 'l3' : {
+ 'srcip': 'feed::01',
+ 'dstip': 'feed::02',
+ },
+ 'scapy': {
+ 'enabled': True,
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/IPv6(src={IP_src}, dst={IP_dst})/ICMPv6EchoRequest()',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/IPv6(src={IP_dst}, dst={IP_src})/ICMPv6EchoRequest()',
+ }
+
+#. Generate TCP frames:
+
+ Example uses default SCAPY frame definition, which can reflect ``TRAFFIC['l3']['proto']`` settings.
+
+ .. code-block:: console
+
+ 'l3' : {
+ 'proto' : 'tcp',
+ },
+
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index 64d91657..2c7a78ff 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -11,10 +11,10 @@ VSPERF Test Guide
.. toctree::
:caption: VSPERF Test Execution
:maxdepth: 2
- :numbered:
./testusage.rst
./teststeps.rst
./integration.rst
+ ./trafficcapture.rst
./yardstick.rst
./testlist.rst
diff --git a/docs/testing/user/userguide/integration.rst b/docs/testing/user/userguide/integration.rst
index 66808400..9d847fd8 100644
--- a/docs/testing/user/userguide/integration.rst
+++ b/docs/testing/user/userguide/integration.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, AT&T, Tieto and others.
.. _integration-tests:
@@ -22,6 +22,12 @@ P2P (Physical to Physical scenarios).
NOTE: The configuration for overlay tests provided in this guide is for
unidirectional traffic only.
+NOTE: The overlay tests require an IxNet traffic generator. The tunneled traffic
+is configured by ``ixnetrfc2544v2.tcl`` script. This script can be used
+with all supported deployment scenarios for generation of frames with VXLAN, GRE
+or GENEVE protocols. In that case options "Tunnel Operation" and
+"TRAFFICGEN_IXNET_TCL_SCRIPT" must be properly configured at testcase definition.
+
Executing Integration Tests
---------------------------
@@ -63,8 +69,8 @@ the following variables in you user_settings.py file:
VTEP_IP2_SUBNET = '192.168.240.0/24'
# Bridge names
- TUNNEL_INTEGRATION_BRIDGE = 'br0'
- TUNNEL_EXTERNAL_BRIDGE = 'br-ext'
+ TUNNEL_INTEGRATION_BRIDGE = 'vsperf-br0'
+ TUNNEL_EXTERNAL_BRIDGE = 'vsperf-br-ext'
# IP of br-ext
TUNNEL_EXTERNAL_BRIDGE_IP = '192.168.240.1/24'
diff --git a/docs/testing/user/userguide/testlist.rst b/docs/testing/user/userguide/testlist.rst
index 2b0e9d7f..fe8c840a 100644
--- a/docs/testing/user/userguide/testlist.rst
+++ b/docs/testing/user/userguide/testlist.rst
@@ -68,14 +68,13 @@ vswitch_pvvp_tput vSwitch - configure switch, two chained v
vswitch_pvvp_back2back vSwitch - configure switch, two chained vnfs and execute RFC2544 back2back test
vswitch_pvvp_cont vSwitch - configure switch, two chained vnfs and execute RFC2544 continuous stream test
vswitch_pvvp_all vSwitch - configure switch, two chained vnfs and execute all test types
-vswitch_p4vp Just configure 4 chained vnfs
-vswitch_p4vp_tput 4 chained vnfs, execute RFC2544 throughput test
-vswitch_p4vp_back2back 4 chained vnfs, execute RFC2544 back2back test
-vswitch_p4vp_cont 4 chained vnfs, execute RFC2544 continuous stream test
-vswitch_p4vp_all 4 chained vnfs, execute RFC2544 throughput test
-2pvp_udp_dest_flows RFC2544 Continuous TC with 2 Parallel VMs, flows on UDP Dest Port
-4pvp_udp_dest_flows RFC2544 Continuous TC with 4 Parallel VMs, flows on UDP Dest Port
-6pvp_udp_dest_flows RFC2544 Continuous TC with 6 Parallel VMs, flows on UDP Dest Port
+vswitch_p4vp_tput 4 chained vnfs, execute RFC2544 throughput test, deployment pvvp4
+vswitch_p4vp_back2back 4 chained vnfs, execute RFC2544 back2back test, deployment pvvp4
+vswitch_p4vp_cont 4 chained vnfs, execute RFC2544 continuous stream test, deployment pvvp4
+vswitch_p4vp_all 4 chained vnfs, execute RFC2544 throughput tests, deployment pvvp4
+2pvp_udp_dest_flows RFC2544 Continuous TC with 2 Parallel VMs, flows on UDP Dest Port, deployment pvpv2
+4pvp_udp_dest_flows RFC2544 Continuous TC with 4 Parallel VMs, flows on UDP Dest Port, deployment pvpv4
+6pvp_udp_dest_flows RFC2544 Continuous TC with 6 Parallel VMs, flows on UDP Dest Port, deployment pvpv6
vhost_numa_awareness vSwitch DPDK - verify that PMD threads are served by the same NUMA slot as QEMU instances
ixnet_pvp_tput_1nic PVP Scenario with 1 port towards IXIA
vswitch_vports_add_del_connection_vpp VPP: vSwitch - configure switch with vports, add and delete connection
@@ -388,3 +387,46 @@ ovsdpdk_qos_p2p In a p2p setup, ensure when a QoS egres
ovsdpdk_qos_pvp In a pvp setup, ensure when a QoS egress policer is created that the
traffic is limited to the specified rate.
======================================== ======================================================================================
+
+Custom Statistics
++++++++++++++++++
+
+A set of functional testcases for validation of Custom Statistics support by OVS.
+This feature allows Custom Statistics to be accessed by VSPERF.
+
+These testcases require DPDK v17.11, the latest Open vSwitch(v2.9.90)
+and the IxNet traffic-generator.
+
+======================================== ======================================================================================
+ovsdpdk_custstat_check Test if custom statistics are supported.
+ovsdpdk_custstat_rx_error Test bad ethernet CRC counter 'rx_crc_errors' exposed by custom
+ statistics.
+
+======================================== ======================================================================================
+
+T-Rex in VM TestCases
+^^^^^^^^^^^^^^^^^^^^^
+
+A set of functional testcases, which use T-Rex running in VM as a traffic generator.
+These testcases require a VM image with T-Rex server installed. An example of such
+image is a vloop-vnf image with T-Rex available for download at:
+
+http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-16.04_trex_20180209.qcow2
+
+This image can be used for both T-Rex VM and loopback VM in ``vm2vm`` testcases.
+
+**NOTE:** The performance of T-Rex running inside the VM is lower if compared to T-Rex
+execution on bare-metal. The user should perform a calibration of the VM maximum FPS
+capability, to ensure this limitation is understood.
+
+======================================== ======================================================================================
+trex_vm_cont T-Rex VM - execute RFC2544 Continuous Stream from T-Rex VM and loop
+ it back through Open vSwitch.
+trex_vm_tput T-Rex VM - execute RFC2544 Throughput from T-Rex VM and loop it back
+ through Open vSwitch.
+trex_vm2vm_cont T-Rex VM2VM - execute RFC2544 Continuous Stream from T-Rex VM and
+ loop it back through 2nd VM.
+trex_vm2vm_tput T-Rex VM2VM - execute RFC2544 Throughput from T-Rex VM and loop it back
+ through 2nd VM.
+
+======================================== ======================================================================================
diff --git a/docs/testing/user/userguide/teststeps.rst b/docs/testing/user/userguide/teststeps.rst
index 08c95311..cb627bc5 100644
--- a/docs/testing/user/userguide/teststeps.rst
+++ b/docs/testing/user/userguide/teststeps.rst
@@ -23,6 +23,13 @@ the step number by one which is indicated in the log.
(testcases.integration) - Step 0 'vswitch add_vport ['br0']' start
+Test steps are defined as a list of steps within a ``TestSteps`` item of test
+case definition. Each step is a list with following structure:
+
+.. code-block:: python
+
+ '[' [ optional-alias ',' ] test-object ',' test-function [ ',' optional-function-params ] '],'
+
Step driven tests can be used for both performance and integration testing.
In case of integration test, each step in the test case is validated. If a step
does not pass validation the test will fail and terminate. The test will continue
@@ -57,8 +64,14 @@ Step driven testcases can be used in two different ways:
Test objects and their functions
--------------------------------
-Every test step can call a function of one of the supported test objects. The list
-of supported objects and their most common functions follows:
+Every test step can call a function of one of the supported test objects. In general
+any existing function of supported test object can be called by test step. In case
+that step validation is required (valid for integration test steps, which are not
+suppressed), then appropriate ``validate_`` method must be implemented.
+
+The list of supported objects and their most common functions is listed below. Please
+check implementation of test objects for full list of implemented functions and their
+parameters.
* ``vswitch`` - provides functions for vSwitch configuration
@@ -176,6 +189,8 @@ of supported objects and their most common functions follows:
* ``getValue param`` - returns value of given ``param``
* ``setValue param value`` - sets value of ``param`` to given ``value``
+ * ``resetValue param`` - if ``param`` was overridden by ``TEST_PARAMS`` (e.g. by "Parameters"
+ section of the test case definition), then it will be set to its original value.
Examples:
@@ -185,6 +200,8 @@ of supported objects and their most common functions follows:
['settings', 'setValue', 'GUEST_USERNAME', ['root']]
+ ['settings', 'resetValue', 'WHITELIST_NICS'],
+
It is possible and more convenient to access any VSPERF configuration option directly
via ``$NAME`` notation. Option evaluation is done during runtime and vsperf will
automatically translate it to the appropriate call of ``settings.getValue``.
@@ -747,6 +764,8 @@ destination UDP port.
]
},
+The same test can be written in a shorter form using "Deployment" : "pvpv".
+
To run the test:
.. code-block:: console
@@ -779,20 +798,20 @@ and available in both csv and rst report files.
},
},
"TestSteps": [
- ['vswitch', 'add_vport', 'br0'],
- ['vswitch', 'add_vport', 'br0'],
+ ['vswitch', 'add_vport', '$VSWITCH_BRIDGE_NAME'],
+ ['vswitch', 'add_vport', '$VSWITCH_BRIDGE_NAME'],
# priority must be higher than default 32768, otherwise flows won't match
- ['vswitch', 'add_flow', 'br0',
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME',
{'in_port': '1', 'actions': ['output:#STEP[-2][1]'], 'idle_timeout': '0', 'dl_type':'0x0800',
'nw_proto':'17', 'tp_dst':'0', 'priority': '33000'}],
- ['vswitch', 'add_flow', 'br0',
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME',
{'in_port': '2', 'actions': ['output:#STEP[-2][1]'], 'idle_timeout': '0', 'dl_type':'0x0800',
'nw_proto':'17', 'tp_dst':'0', 'priority': '33000'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '#STEP[-4][1]', 'actions': ['output:1'],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '#STEP[-4][1]', 'actions': ['output:1'],
'idle_timeout': '0'}],
- ['vswitch', 'add_flow', 'br0', {'in_port': '#STEP[-4][1]', 'actions': ['output:2'],
+ ['vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '#STEP[-4][1]', 'actions': ['output:2'],
'idle_timeout': '0'}],
- ['vswitch', 'dump_flows', 'br0'],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
['vnf1', 'start'],
]
},
diff --git a/docs/testing/user/userguide/testusage.rst b/docs/testing/user/userguide/testusage.rst
index 20c30a40..3dd41846 100644
--- a/docs/testing/user/userguide/testusage.rst
+++ b/docs/testing/user/userguide/testusage.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, Spirent, AT&T and others.
vSwitchPerf test suites userguide
---------------------------------
@@ -91,55 +91,41 @@ Using a custom settings file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If your ``10_custom.conf`` doesn't reside in the ``./conf`` directory
-of if you want to use an alternative configuration file, the file can
+or if you want to use an alternative configuration file, the file can
be passed to ``vsperf`` via the ``--conf-file`` argument.
.. code-block:: console
$ ./vsperf --conf-file <path_to_custom_conf> ...
-Note that configuration passed in via the environment (``--load-env``)
-or via another command line argument will override both the default and
-your custom configuration files. This "priority hierarchy" can be
-described like so (1 = max priority):
-
-1. Testcase definition section ``Parameters``
-2. Command line arguments
-3. Environment variables
-4. Configuration file(s)
-
-Further details about configuration files evaluation and special behaviour
+Evaluation of configuration parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The value of configuration parameter can be specified at various places,
+e.g. at the test case definition, inside configuration files, by the command
+line argument, etc. Thus it is important to understand the order of configuration
+parameter evaluation. This "priority hierarchy" can be described like so
+(1 = max priority):
+
+1. Testcase definition keywords ``vSwitch``, ``Trafficgen``, ``VNF`` and ``Tunnel Type``
+2. Parameters inside testcase definition section ``Parameters``
+3. Command line arguments (e.g. ``--test-params``, ``--vswitch``, ``--trafficgen``, etc.)
+4. Environment variables (see ``--load-env`` argument)
+5. Custom configuration file specified via ``--conf-file`` argument
+6. Standard configuration files, where higher prefix number means higher
+ priority.
+
+For example, if the same configuration parameter is defined in custom configuration
+file (specified via ``--conf-file`` argument), via ``--test-params`` argument
+and also inside ``Parameters`` section of the testcase definition, then parameter
+value from the ``Parameters`` section will be used.
+
+Further details about order of configuration files evaluation and special behaviour
of options with ``GUEST_`` prefix could be found at :ref:`design document
<design-configuration>`.
.. _overriding-parameters-documentation:
-Referencing parameter values
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-It is possible to use a special macro ``#PARAM()`` to refer to the value of
-another configuration parameter. This reference is evaluated during
-access of the parameter value (by ``settings.getValue()`` call), so it
-can refer to parameters created during VSPERF runtime, e.g. NICS dictionary.
-It can be used to reflect DUT HW details in the testcase definition.
-
-Example:
-
-.. code:: python
-
- {
- ...
- "Name": "testcase",
- "Parameters" : {
- "TRAFFIC" : {
- 'l2': {
- # set destination MAC to the MAC of the first
- # interface from WHITELIST_NICS list
- 'dstmac' : '#PARAM(NICS[0]["mac"])',
- },
- },
- ...
-
Overriding values defined in configuration files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -155,6 +141,17 @@ Example:
$ ./vsperf --test-params "TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,);" \
"GUEST_LOOPBACK=['testpmd','l2fwd']" pvvp_tput
+The ``--test-params`` command line argument can also be used to override default
+configuration values for multiple tests. Providing a list of parameters will apply each
+element of the list to the test with the same index. If more tests are run than
+parameters provided the last element of the list will repeat.
+
+.. code:: console
+
+ $ ./vsperf --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"
+ "'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)']" \
+ pvvp_tput pvvp_tput
+
The second option is to override configuration items by ``Parameters`` section
of the test case definition. The configuration items can be added into ``Parameters``
dictionary with their new values. These values will override values defined in
@@ -186,6 +183,36 @@ parameter name is passed via ``--test-params`` CLI argument or defined in ``Para
section of test case definition. It is also forbidden to redefine a value of
``TEST_PARAMS`` configuration item via CLI or ``Parameters`` section.
+**NOTE:** The new definition of the dictionary parameter, specified via ``--test-params``
+or inside ``Parameters`` section, will not override original dictionary values. Instead
+the original dictionary will be updated with values from the new dictionary definition.
+
+Referencing parameter values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It is possible to use a special macro ``#PARAM()`` to refer to the value of
+another configuration parameter. This reference is evaluated during
+access of the parameter value (by ``settings.getValue()`` call), so it
+can refer to parameters created during VSPERF runtime, e.g. NICS dictionary.
+It can be used to reflect DUT HW details in the testcase definition.
+
+Example:
+
+.. code:: python
+
+ {
+ ...
+ "Name": "testcase",
+ "Parameters" : {
+ "TRAFFIC" : {
+ 'l2': {
+ # set destination MAC to the MAC of the first
+ # interface from WHITELIST_NICS list
+ 'dstmac' : '#PARAM(NICS[0]["mac"])',
+ },
+ },
+ ...
+
vloop_vnf
^^^^^^^^^
@@ -205,6 +232,12 @@ A Kernel Module that provides OSI Layer 2 Ipv4 termination or forwarding with
support for Destination Network Address Translation (DNAT) for both the MAC and
IP addresses. l2fwd can be found in <vswitchperf_dir>/src/l2fwd
+Additional Tools Setup
+^^^^^^^^^^^^^^^^^^^^^^
+
+Follow the `Additional tools instructions <additional-tools-configuration>` to
+install and configure additional tools such as collectors and loadgens.
+
Executing tests
^^^^^^^^^^^^^^^
@@ -234,6 +267,12 @@ To run a single test:
Where $TESTNAME is the name of the vsperf test you would like to run.
+To run a test multiple times, repeat it:
+
+.. code-block:: console
+
+ $ ./vsperf $TESTNAME $TESTNAME $TESTNAME
+
To run a group of tests, for example all tests with a name containing
'RFC2544':
@@ -256,6 +295,30 @@ Some tests allow for configurable parameters, including test duration
--tests RFC2544Tput \
--test-params "TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)"
+To specify configurable parameters for multiple tests, use a list of
+parameters. One element for each test.
+
+.. code:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"\
+ "'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)']" \
+ phy2phy_cont phy2phy_cont
+
+If the ``CUMULATIVE_PARAMS`` setting is set to True and there are different parameters
+provided for each test using ``--test-params``, each test will take the parameters of
+the previous test before appyling it's own.
+With ``CUMULATIVE_PARAMS`` set to True the following command will be equivalent to the
+previous example:
+
+.. code:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)',"\
+ "'TRAFFICGEN_PKT_SIZES=(64,)']" \
+ phy2phy_cont phy2phy_cont
+ "
+
For all available options, check out the help dialog:
.. code-block:: console
@@ -425,10 +488,6 @@ set ``PATHS['dpdk']['bin']['modules']`` instead.
**NOTE:** Please ensure your boot/grub parameters include
the following:
-**NOTE:** In case of VPP, it is required to explicitly define, that vfio-pci
-DPDK driver should be used. It means to update dpdk part of VSWITCH_VPP_ARGS
-dictionary with uio-driver section, e.g. VSWITCH_VPP_ARGS['dpdk'] = 'uio-driver vfio-pci'
-
.. code-block:: console
iommu=pt intel_iommu=on
@@ -448,6 +507,10 @@ To check that IOMMU is enabled on your platform:
[ 3.335746] IOMMU: dmar1 using Queued invalidation
....
+**NOTE:** In case of VPP, it is required to explicitly define, that vfio-pci
+DPDK driver should be used. It means to update dpdk part of VSWITCH_VPP_ARGS
+dictionary with uio-driver section, e.g. VSWITCH_VPP_ARGS['dpdk'] = 'uio-driver vfio-pci'
+
.. _SRIOV-support:
Using SRIOV support
@@ -584,7 +647,7 @@ The supported dpdk guest bind drivers are:
.. code-block:: console
- 'uio_pci_generic' - Use uio_pci_generic driver
+ 'uio_pci_generic' - Use uio_pci_generic driver
'igb_uio_from_src' - Build and use the igb_uio driver from the dpdk src
files
'vfio_no_iommu' - Use vfio with no iommu option. This requires custom
@@ -599,7 +662,7 @@ modified to use igb_uio_from_src instead.
Note: vfio_no_iommu requires kernels equal to or greater than 4.5 and dpdk
16.04 or greater. Using this option will also taint the kernel.
-Please refer to the dpdk documents at http://dpdk.org/doc/guides for more
+Please refer to the dpdk documents at https://doc.dpdk.org/guides for more
information on these drivers.
Guest Core and Thread Binding
@@ -915,6 +978,39 @@ Example of execution of VSPERF in "trafficgen" mode:
$ ./vsperf -m trafficgen --trafficgen IxNet --conf-file vsperf.conf \
--test-params "TRAFFIC={'traffic_type':'rfc2544_continuous','bidir':'False','framerate':60}"
+Performance Matrix
+^^^^^^^^^^^^^^^^^^
+
+The ``--matrix`` command line argument analyses and displays the performance of
+all the tests run. Using the metric specified by ``MATRIX_METRIC`` in the conf-file,
+the first test is set as the baseline and all the other tests are compared to it.
+The ``MATRIX_METRIC`` must always refer to a numeric value to enable comparision.
+A table, with the test ID, metric value, the change of the metric in %, testname
+and the test parameters used for each test, is printed out as well as saved into the
+results directory.
+
+Example of 2 tests being compared using Performance Matrix:
+
+.. code-block:: console
+
+ $ ./vsperf --conf-file user_settings.py \
+ --test-params "['TRAFFICGEN_PKT_SIZES=(64,)',"\
+ "'TRAFFICGEN_PKT_SIZES=(128,)']" \
+ phy2phy_cont phy2phy_cont --matrix
+
+Example output:
+
+.. code-block:: console
+
+ +------+--------------+---------------------+----------+---------------------------------------+
+ | ID | Name | throughput_rx_fps | Change | Parameters, CUMULATIVE_PARAMS = False |
+ +======+==============+=====================+==========+=======================================+
+ | 0 | phy2phy_cont | 23749000.000 | 0 | 'TRAFFICGEN_PKT_SIZES': [64] |
+ +------+--------------+---------------------+----------+---------------------------------------+
+ | 1 | phy2phy_cont | 16850500.000 | -29.048 | 'TRAFFICGEN_PKT_SIZES': [128] |
+ +------+--------------+---------------------+----------+---------------------------------------+
+
+
Code change verification by pylint
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/testing/user/userguide/trafficcapture.rst b/docs/testing/user/userguide/trafficcapture.rst
new file mode 100644
index 00000000..8a224dcb
--- /dev/null
+++ b/docs/testing/user/userguide/trafficcapture.rst
@@ -0,0 +1,297 @@
+Traffic Capture
+---------------
+
+Tha ability to capture traffic at multiple points of the system is crucial to
+many of the functional tests. It allows the verification of functionality for
+both the vSwitch and the NICs using hardware acceleration for packet
+manipulation and modification.
+
+There are three different methods of traffic capture supported by VSPERF.
+Detailed descriptions of these methods as well as their pros and cons can be
+found in the following chapters.
+
+Traffic Capture inside of a VM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method uses the standard PVP scenario, in which vSwitch first processes
+and modifies the packet before forwarding it to the VM. Inside of the VM we
+capture the traffic using **tcpdump** or a similiar technique. The capture
+information is the used to verify the expected modifications to the packet done
+by vSwitch.
+
+.. code-block:: console
+
+ _
+ +--------------------------------------------------+ |
+ | | |
+ | +------------------------------------------+ | |
+ | | Traffic capture and Packet Forwarding | | |
+ | +------------------------------------------+ | |
+ | ^ : | |
+ | | | | | Guest
+ | : v | |
+ | +---------------+ +---------------+ | |
+ | | logical port 0| | logical port 1| | |
+ +---+---------------+----------+---------------+---+ _|
+ ^ :
+ | |
+ : v _
+ +---+---------------+----------+---------------+---+ |
+ | | logical port 0| | logical port 1| | |
+ | +---------------+ +---------------+ | |
+ | ^ : | |
+ | | | | | Host
+ | : v | |
+ | +--------------+ +--------------+ | |
+ | | phy port | vSwitch | phy port | | |
+ +---+--------------+------------+--------------+---+ _|
+ ^ :
+ | |
+ : v
+ +--------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +--------------------------------------------------+
+
+PROS:
+
+- supports testing with all traffic generators
+- easy to use and implement into test
+- allows testing hardware offloading on the ingress side
+
+CONS:
+
+- does not allow testing hardware offloading on the egress side
+
+An example of Traffic Capture in VM test:
+
+.. code-block:: python
+
+ # Capture Example 1 - Traffic capture inside VM (PVP scenario)
+ # This TestCase will modify VLAN ID set by the traffic generator to the new value.
+ # Correct VLAN ID settings is verified by inspection of captured frames.
+ {
+ Name: capture_pvp_modify_vid,
+ Deployment: pvp,
+ Description: Test and verify VLAN ID modification by Open vSwitch,
+ Parameters : {
+ VSWITCH : OvsDpdkVhost, # works also for Vanilla OVS
+ TRAFFICGEN_DURATION : 5,
+ TRAFFIC : {
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ GUEST_LOOPBACK : ['linux_bridge'],
+ },
+ TestSteps: [
+ # replace original flows with vlan ID modification
+ ['!vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '1', 'actions': ['mod_vlan_vid:4','output:3']}],
+ ['!vswitch', 'add_flow', '$VSWITCH_BRIDGE_NAME', {'in_port': '2', 'actions': ['mod_vlan_vid:4','output:4']}],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
+ # verify that received frames have modified vlan ID
+ ['VNF0', 'execute_and_wait', 'tcpdump -i eth0 -c 5 -w dump.pcap vlan 4 &'],
+ ['trafficgen', 'send_traffic',{}],
+ ['!VNF0', 'execute_and_wait', 'tcpdump -qer dump.pcap vlan 4 2>/dev/null | wc -l','|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ],
+ },
+
+Traffic Capture for testing NICs with HW offloading/acceleration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The NIC with hardware acceleration/offloading is inserted as an additional card
+into the server. Two ports on this card are then connected together using
+a patch cable as shown in the diagram. Only a single port of the tested NIC is
+setup with DPDK acceleration, while the other is handled by the Linux Ip stack
+allowing for traffic capture. The two NICs are then connected by vSwitch so the
+original card can forward the processed packets to the traffic generator. The
+ports handled by Linux IP stack allow for capturing packets, which are then
+analyzed for changes done by both the vSwitch and the NIC with hardware
+acceleration.
+
+.. code-block:: console
+
+ _
+ +------------------------------------------------+ |
+ | | |
+ | +----------------------------------------+ | |
+ | | vSwitch | | |
+ | | +----------------------------------+ | | |
+ | | | | | | |
+ | | | +------------------+ | | | |
+ | | | | | v | | |
+ | +----------------------------------------+ | | Device under Test
+ | ^ | ^ | | |
+ | | | | | | |
+ | | v | v | |
+ | +--------------+ +--------------+ | |
+ | | | | NIC w HW acc | | |
+ | | phy ports | | phy ports | | |
+ +---+--------------+----------+--------------+---+ _|
+ ^ : ^ :
+ | | | |
+ | | +-------+
+ : v Patch Cable
+ +------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +------------------------------------------------+
+
+PROS:
+
+- allows testing hardware offloading on both the ingress and egress side
+- supports testing with all traffic generators
+- relatively easy to use and implement into tests
+
+CONS:
+
+- a more complex setup with two cards
+- if the tested card only has one port, an additional card is needed
+
+An example of Traffic Capture for testing NICs with HW offloading test:
+
+.. code-block:: python
+
+ # Capture Example 2 - Setup with 2 NICs, where traffic is captured after it is
+ # processed by NIC under the test (2nd NIC). See documentation for further details.
+ # This TestCase will strip VLAN headers from traffic sent by the traffic generator.
+ # The removal of VLAN headers is verified by inspection of captured frames.
+ #
+ # NOTE: This setup expects a DUT with two NICs with two ports each. First NIC is
+ # connected to the traffic generator (standard VSPERF setup). Ports of a second NIC
+ # are interconnected by a patch cable. PCI addresses of all four ports have to be
+ # properly configured in the WHITELIST_NICS parameter.
+ {
+ Name: capture_p2p2p_strip_vlan_ovs,
+ Deployment: clean,
+ Description: P2P Continuous Stream,
+ Parameters : {
+ _CAPTURE_P2P2P_OVS_ACTION : 'strip_vlan',
+ TRAFFIC : {
+ bidir : False,
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ 'l2': {
+ 'srcmac': ca:fe:00:00:00:00,
+ 'dstmac': 00:00:00:00:00:01
+ },
+ 'vlan': {
+ 'enabled': True,
+ 'id': 8,
+ 'priority': 1,
+ 'cfi': 0,
+ },
+ },
+ # suppress DPDK configuration, so physical interfaces are not bound to DPDK driver
+ 'WHITELIST_NICS' : [],
+ 'NICS' : [],
+ },
+ TestSteps: _CAPTURE_P2P2P_SETUP + [
+ # capture traffic after processing by NIC under the test (after possible egress HW offloading)
+ ['tools', 'exec_shell_background', 'tcpdump -i [2][device] -c 5 -w capture.pcap '
+ 'ether src [l2][srcmac]'],
+ ['trafficgen', 'send_traffic', {}],
+ ['vswitch', 'dump_flows', '$VSWITCH_BRIDGE_NAME'],
+ ['vswitch', 'dump_flows', 'br1'],
+ # there must be 5 captured frames...
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ # ...but no vlan headers
+ ['tools', 'exec_shell', 'tcpdump -r capture.pcap vlan | wc -l', '|^(\d+)$'],
+ ['tools', 'assert', '#STEP[-1][0] == 0'],
+ ],
+ },
+
+
+Traffic Capture on the Traffic Generator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using the functionality of the Traffic generator makes it possible to configure
+Traffic Capture on both it's ports. With Traffic Capture enabled, VSPERF
+instructs the Traffic Generator to automatically export captured data into
+a pcap file. The captured packets are then sent to VSPERF for analysis and
+verification, monitoring any changes done by both vSwitch and the NICs.
+
+Vsperf currently only supports this functionality with the **T-Rex** generator.
+
+.. code-block:: console
+
+ _
+ +--------------------------------------------------+ |
+ | | |
+ | +--------------------------+ | |
+ | | | | |
+ | | v | | Host
+ | +--------------+ +--------------+ | |
+ | | phy port | vSwitch | phy port | | |
+ +---+--------------+------------+--------------+---+ _|
+ ^ :
+ | |
+ : v
+ +--------------------------------------------------+
+ | |
+ | traffic generator |
+ | |
+ +--------------------------------------------------+
+
+PROS:
+
+- allows testing hardware offloading on both the ingress and egress side
+- does not require an additional NIC
+
+CONS:
+
+- currently only supported by **T-Rex** traffic generator
+
+An example Traffic Capture on the Traffic Generator test:
+
+.. code-block:: python
+
+
+ # Capture Example 3 - Traffic capture by traffic generator.
+ # This TestCase uses OVS flow to add VLAN tag with given ID into every
+ # frame send by traffic generator. Correct frame modificaiton is verified by
+ # inspection of packet capture received by T-Rex.
+ {
+ Name: capture_p2p_add_vlan_ovs_trex,
+ Deployment: clean,
+ Description: OVS: Test VLAN tag modification and verify it by traffic capture,
+ vSwitch : OvsDpdkVhost, # works also for Vanilla OVS
+ Parameters : {
+ TRAFFICGEN : Trex,
+ TRAFFICGEN_DURATION : 5,
+ TRAFFIC : {
+ traffic_type : rfc2544_continuous,
+ frame_rate : 100,
+ # enable capture of five RX frames
+ 'capture': {
+ 'enabled': True,
+ 'tx_ports' : [],
+ 'rx_ports' : [1],
+ 'count' : 5,
+ },
+ },
+ },
+ TestSteps : STEP_VSWITCH_P2P_INIT + [
+ # replace standard L2 flows by flows, which will add VLAN tag with ID 3
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '1', 'actions': ['mod_vlan_vid:3','output:2']}],
+ ['!vswitch', 'add_flow', 'int_br0', {'in_port': '2', 'actions': ['mod_vlan_vid:3','output:1']}],
+ ['vswitch', 'dump_flows', 'int_br0'],
+ ['trafficgen', 'send_traffic', {}],
+ ['trafficgen', 'get_results'],
+ # verify that captured frames have vlan tag with ID 3
+ ['tools', 'exec_shell', 'tcpdump -qer /#STEP[-1][0][capture_rx] vlan 3 '
+ '2>/dev/null | wc -l', '|^(\d+)$'],
+ # number of received frames with expected VLAN id must match the number of captured frames
+ ['tools', 'assert', '#STEP[-1][0] == 5'],
+ ] + STEP_VSWITCH_P2P_FINIT,
+ },
+
diff --git a/docs/xtesting/index.rst b/docs/xtesting/index.rst
new file mode 100644
index 00000000..9259a12a
--- /dev/null
+++ b/docs/xtesting/index.rst
@@ -0,0 +1,85 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent, AT&T, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+********************************
+OPNFV VSPERF with OPNFV Xtesting
+********************************
+
+============
+Introduction
+============
+User can use VSPERF with Xtesting for two different usecases.
+
+1. Baremetal Dataplane Testing/Benchmarking.
+2. Openstack Dataplane Testing/Benchmarking.
+
+The Baremetal usecase is the legacy usecase of OPNFV VSPERF.
+
+The below figure summarizes both the usecases.
+
+.. image:: ./vsperf-xtesting.png
+ :width: 400
+
+===========
+How to Use?
+===========
+
+Step-1: Build the container
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Go the xtesting/baremetal or xtesting/openstack and run the following command.
+
+.. code-block:: console
+
+ docker build -t 127.0.0.1:5000/vsperfbm
+
+
+Step-2: Install and run Xtesting Playbook
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These commands are described in OPNFV Xtesting Documentation. Please refere to OPNFV Xtesting wiki for description of these commands.
+
+.. code-block:: console
+
+ virtualenv xtesting
+ . xtesting/bin/activate
+ ansible-galaxy install collivier.xtesting
+ ansible-playbook site.yml
+
+======================
+Accessing the Results?
+======================
+
+VSPERF automatically publishes the results to any OPNFV Testapi deployment.
+User has to configure following two parameters in VSPERF.
+
+1. OPNFVPOD - The name of the pod.
+2. OPNFV_URL - The endpoint serving testapi.
+
+As Xtesting runs its own testapi, user should point to this (testapi endpoint of Xtesting) using the above two configuration.
+
+The above two configurations should be done wherever VSPERF is running (refer to the figure above)
+
+NOTE: Before running the test, it would help if user can prepre the testapi of Xtesting (if needed). The preparation include setting up the following:
+
+1. Projects
+2. Testcases.
+3. Pods.
+
+Please refer to the documentation of testapi for more details.
+
+=======================================
+Accessing other components of Xtesting?
+=======================================
+
+Please refer to the documentation of Xtesting in OPNFV Wiki.
+
+===========
+Limitations
+===========
+For Jerma Release, following limitations apply:
+
+1. For both baremetal and openstack, only phy2phy_tput testcase is supported.
+2. For openstack, only Spirent's STCv and Keysight's Ixnet-Virtual is supported.
diff --git a/docs/xtesting/vsperf-xtesting.png b/docs/xtesting/vsperf-xtesting.png
new file mode 100755
index 00000000..64cad722
--- /dev/null
+++ b/docs/xtesting/vsperf-xtesting.png
Binary files differ
diff --git a/pods/__init__.py b/pods/__init__.py
new file mode 100644
index 00000000..e3ce18d9
--- /dev/null
+++ b/pods/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package for POD wrappers for use with VSPERF.
+
+This package contains an interface the VSPERF core uses for controlling
+PODs and POD-specific implementation modules of this interface.
+"""
diff --git a/pods/papi/__init__.py b/pods/papi/__init__.py
new file mode 100644
index 00000000..16760b86
--- /dev/null
+++ b/pods/papi/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package for POD wrappers for use with VSPERF.
+
+This package contains an implementation of the interface the VSPERF core
+uses for controlling PODs using Kubernetes Python-API (PAPI)
+"""
diff --git a/pods/papi/papi.py b/pods/papi/papi.py
new file mode 100644
index 00000000..5a21f1d6
--- /dev/null
+++ b/pods/papi/papi.py
@@ -0,0 +1,143 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Automation of Pod Deployment with Kubernetes Python API
+"""
+
+# import os
+import logging
+import json
+import time
+import yaml
+from kubernetes import client, config
+from kubernetes.client.rest import ApiException
+
+from conf import settings as S
+from pods.pod.pod import IPod
+
+class Papi(IPod):
+ """
+ Class for controlling the pod through PAPI
+ """
+
+ def __init__(self):
+ """
+ Initialisation function.
+ """
+ #super(Papi, self).__init__()
+ super().__init__()
+
+ self._logger = logging.getLogger(__name__)
+ self._sriov_config = None
+ self._sriov_config_ns = None
+ config.load_kube_config(S.getValue('K8S_CONFIG_FILEPATH'))
+
+ def create(self):
+ """
+ Creation Process
+ """
+ # create vswitchperf namespace
+ api = client.CoreV1Api()
+ namespace = 'default'
+ #namespace = 'vswitchperf'
+ # replace_namespace(api, namespace)
+
+ # sriov configmap
+ if S.getValue('PLUGIN') == 'sriov':
+ configmap = load_manifest(S.getValue('CONFIGMAP_FILEPATH'))
+ self._sriov_config = configmap['metadata']['name']
+ self._sriov_config_ns = configmap['metadata']['namespace']
+ api.create_namespaced_config_map(self._sriov_config_ns, configmap)
+
+
+ # create nad(network attachent definitions)
+ group = 'k8s.cni.cncf.io'
+ version = 'v1'
+ kind_plural = 'network-attachment-definitions'
+ api = client.CustomObjectsApi()
+
+ for nad_filepath in S.getValue('NETWORK_ATTACHMENT_FILEPATH'):
+ nad_manifest = load_manifest(nad_filepath)
+
+ try:
+ response = api.create_namespaced_custom_object(group, version, namespace,
+ kind_plural, nad_manifest)
+ self._logger.info(str(response))
+ self._logger.info("Created Network Attachment Definition: %s", nad_filepath)
+ except ApiException as err:
+ raise Exception from err
+
+ #create pod workloads
+ pod_manifest = load_manifest(S.getValue('POD_MANIFEST_FILEPATH'))
+ api = client.CoreV1Api()
+
+ try:
+ response = api.create_namespaced_pod(namespace, pod_manifest)
+ self._logger.info(str(response))
+ self._logger.info("Created POD %d ...", self._number)
+ except ApiException as err:
+ raise Exception from err
+
+ time.sleep(12)
+
+ def terminate(self):
+ """
+ Cleanup Process
+ """
+ #self._logger.info(self._log_prefix + "Cleaning vswitchperf namespace")
+ self._logger.info("Terminating Pod")
+ api = client.CoreV1Api()
+ # api.delete_namespace(name="vswitchperf", body=client.V1DeleteOptions())
+
+ if S.getValue('PLUGIN') == 'sriov':
+ api.delete_namespaced_config_map(self._sriov_config, self._sriov_config_ns)
+
+
+def load_manifest(filepath):
+ """
+ Reads k8s manifest files and returns as string
+
+ :param str filepath: filename of k8s manifest file to read
+
+ :return: k8s resource definition as string
+ """
+ with open(filepath) as handle:
+ data = handle.read()
+
+ try:
+ manifest = json.loads(data)
+ except ValueError:
+ try:
+ manifest = yaml.safe_load(data)
+ except yaml.parser.ParserError as err:
+ raise Exception from err
+
+ return manifest
+
+def replace_namespace(api, namespace):
+ """
+ Creates namespace if does not exists
+ """
+ namespaces = api.list_namespace()
+ for nsi in namespaces.items:
+ if namespace == nsi.metadata.name:
+ api.delete_namespace(name=namespace,
+ body=client.V1DeleteOptions())
+ break
+
+ time.sleep(0.5)
+ api.create_namespace(client.V1Namespace(
+ metadata=client.V1ObjectMeta(name=namespace)))
diff --git a/pods/pod/__init__.py b/pods/pod/__init__.py
new file mode 100644
index 00000000..b91706e2
--- /dev/null
+++ b/pods/pod/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""POD interface and helpers.
+"""
+
+import pods
diff --git a/pods/pod/pod.py b/pods/pod/pod.py
new file mode 100644
index 00000000..c25744d2
--- /dev/null
+++ b/pods/pod/pod.py
@@ -0,0 +1,63 @@
+# Copyright 2020 Spirent Communications, University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Interface for POD
+"""
+
+#import time
+#import pexpect
+from tools import tasks
+
+class IPod(tasks.Process):
+ """
+ Interface for POD
+
+ Inheriting from Process helps in managing system process.
+ execute a command, wait, kill, etc.
+ """
+ _number_pods = 0
+
+ def __init__(self):
+ """
+ Initialization Method
+ """
+ self._number = IPod._number_pods
+ self._logger.debug('Initializing %s. Pod with index %s',
+ self._number + 1, self._number)
+ IPod._number_pods = IPod._number_pods + 1
+ self._log_prefix = 'pod_%d_cmd : ' % self._number
+ # raise NotImplementedError()
+
+ def create(self):
+ """
+ Start the Pod
+ """
+ raise NotImplementedError()
+
+
+ def terminate(self):
+ """
+ Stop the Pod
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ def reset_pod_counter():
+ """
+ Reset internal POD counter
+
+ This method is static
+ """
+ IPod._number_pods = 0
diff --git a/pylintrc b/pylintrc
index d35114e1..3e7e9645 100644
--- a/pylintrc
+++ b/pylintrc
@@ -76,7 +76,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=E1602,E1603,E1601,E1606,E1607,E1604,E1605,E1608,W0401,W1604,W1605,W1606,W1607,W1601,W1602,W1603,W1622,W1623,W1620,W1621,W1608,W1609,W1624,W1625,W1618,W1626,W1627,I0021,I0020,W0704,R0903,W1613,W1638,W1611,W1610,W1617,W1616,W1615,W1614,W1630,W1619,W1632,W1635,W1634,W1637,W1636,W1639,W1612,W1628,W1633,W1629,I0011,W1640
+disable=E1602,E1603,E1601,E1606,E1607,E1604,E1605,E1608,W0401,W1604,W1605,W1606,W1607,W1601,W1602,W1603,W1622,W1623,W1620,W1621,W1608,W1609,W1624,W1625,W1618,W1626,W1627,I0021,I0020,W0704,R0903,W1613,W1638,W1611,W1610,W1617,W1616,W1615,W1614,W1630,W1619,W1632,W1635,W1634,W1637,W1636,W1639,W1612,W1628,W1633,W1629,I0011,W1640,R1705
[REPORTS]
diff --git a/requirements.txt b/requirements.txt
index 33bee1bf..a50569dd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-# Copyright (c) 2015-2017 Intel corporation.
+# Copyright (c) 2015-2018 Intel corporation, Spirent Communications
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -9,8 +9,34 @@ pexpect==3.3
tox==1.8.1
jinja2==2.7.3
xmlrunner==1.7.7
-requests==2.8.1
+requests>=2.14.2
netaddr==0.7.18
scapy-python3==0.18
-pyzmq==14.5.0
+pylint==1.8.2
distro
+stcrestclient
+matplotlib
+numpy
+pycrypto
+tabulate
+pypsi
+paramiko
+keystoneauth1>=2.18.0
+os-client-config>=1.22.0
+oslo.concurrency>=3.8.0
+oslo.config>=3.14.0
+oslo.log>=3.11.0
+oslo.serialization>=1.10.0
+oslo.utils>=3.18.0
+pygal
+pykwalify
+python-glanceclient>=2.5.0
+python-neutronclient>=5.1.0
+python-novaclient>=7.1.0
+python-heatclient>=1.6.1
+python-subunit>=0.0.18
+PyYAML>=3.10.0
+pyzmq>=16.0
+six>=1.9.0
+timeout-decorator>=0.4.0
+kubernetes
diff --git a/src/__init__.py b/src/__init__.py
index 9293b4f8..c784ea3c 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -18,4 +18,3 @@ No functionality is expected for this package and its purpose is just to
keep Python package structure intact without extra requirements for
PYTHONPATH.
"""
-
diff --git a/src/dpdk/Makefile b/src/dpdk/Makefile
index 4b4330d7..1a1521db 100755
--- a/src/dpdk/Makefile
+++ b/src/dpdk/Makefile
@@ -82,13 +82,13 @@ endif
# CentOS 7.3 specific config changes to compile
ifeq ($(ID),"centos")
ifeq ($(VERSION_ID),"7")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
# RHEL 7.3 specific config changes to compile
ifeq ($(ID),"rhel")
ifeq ($(VERSION_ID),"7.3")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
$(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST=./CONFIG_RTE_LIBRTE_VHOST=y/g' $(CONFIG_FILE_LINUXAPP)
diff --git a/src/dpdk/dpdk.py b/src/dpdk/dpdk.py
index 2f120129..c2e656ef 100644
--- a/src/dpdk/dpdk.py
+++ b/src/dpdk/dpdk.py
@@ -140,7 +140,7 @@ def _vhost_user_cleanup():
def _bind_nics():
"""Bind NICs using the bind tool specified in the configuration.
"""
- if not len(_NICS_PCI):
+ if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to bind')
return
try:
@@ -171,7 +171,7 @@ def _bind_nics():
def _unbind_nics():
"""Unbind NICs using the bind tool specified in the configuration.
"""
- if not len(_NICS_PCI):
+ if not _NICS_PCI:
_LOGGER.info('NICs are not configured - nothing to unbind')
return
try:
diff --git a/src/dpdk/testpmd_proc.py b/src/dpdk/testpmd_proc.py
index a8fa8eee..b89bcec2 100644
--- a/src/dpdk/testpmd_proc.py
+++ b/src/dpdk/testpmd_proc.py
@@ -27,8 +27,12 @@ from tools import tasks
_TESTPMD_PROMPT = 'Done'
+_NAME, _EXT = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
_LOG_FILE_VSWITCHD = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD'))
+ settings.getValue('LOG_DIR'),
+ ("{name}_{uid}{ex}".format(name=_NAME, uid=settings.getValue(
+ 'LOG_TIMESTAMP'), ex=_EXT)))
+
class TestPMDProcess(tasks.Process):
"""Class wrapper for controlling a TestPMD instance.
diff --git a/src/ovs/dpctl.py b/src/ovs/dpctl.py
index 015fb38c..5030223e 100644
--- a/src/ovs/dpctl.py
+++ b/src/ovs/dpctl.py
@@ -60,5 +60,5 @@ class DPCtl(object):
:return: None
"""
- self.logger.debug('delete datapath ' + dp_name)
+ self.logger.debug('delete datapath %s', dp_name)
self.run_dpctl(['del-dp', dp_name])
diff --git a/src/ovs/ofctl.py b/src/ovs/ofctl.py
index 64d54466..21da850a 100644
--- a/src/ovs/ofctl.py
+++ b/src/ovs/ofctl.py
@@ -25,10 +25,10 @@ import re
import netaddr
from tools import tasks
-from conf import settings
+from conf import settings as S
-_OVS_BRIDGE_NAME = settings.getValue('VSWITCH_BRIDGE_NAME')
-_OVS_CMD_TIMEOUT = settings.getValue('OVS_CMD_TIMEOUT')
+_OVS_BRIDGE_NAME = S.getValue('VSWITCH_BRIDGE_NAME')
+_OVS_CMD_TIMEOUT = S.getValue('OVS_CMD_TIMEOUT')
_CACHE_FILE_NAME = '/tmp/vsperf_flows_cache'
@@ -62,9 +62,11 @@ class OFBase(object):
:return: None
"""
if self.timeout == -1:
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--no-wait'] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-vsctl'], '--no-wait'] + \
+ S.getValue('OVS_VSCTL_ARGS') + args
else:
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-vsctl'], '--timeout', str(self.timeout)] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-vsctl'], '--timeout',
+ str(self.timeout)] + S.getValue('OVS_VSCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-vsctl...', check_error)
@@ -77,9 +79,9 @@ class OFBase(object):
:return: None
"""
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-appctl'],
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-appctl'],
'--timeout',
- str(self.timeout)] + args
+ str(self.timeout)] + S.getValue('OVS_APPCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-appctl...', check_error)
@@ -154,21 +156,6 @@ class OFBridge(OFBase):
self._ports = {}
self._cache_file = None
- # context manager
-
- def __enter__(self):
- """Create datapath
-
- :returns: self
- """
- return self
-
- def __exit__(self, type_, value, traceback):
- """Remove datapath.
- """
- if not traceback:
- self.destroy()
-
# helpers
def run_ofctl(self, args, check_error=False, timeout=None):
@@ -180,8 +167,8 @@ class OFBridge(OFBase):
:return: None
"""
tmp_timeout = self.timeout if timeout is None else timeout
- cmd = ['sudo', settings.getValue('TOOLS')['ovs-ofctl'], '-O',
- 'OpenFlow13', '--timeout', str(tmp_timeout)] + args
+ cmd = ['sudo', S.getValue('TOOLS')['ovs-ofctl'], '--timeout',
+ str(tmp_timeout)] + S.getValue('OVS_OFCTL_ARGS') + args
return tasks.run_task(
cmd, self.logger, 'Running ovs-ofctl...', check_error)
@@ -467,4 +454,4 @@ def flow_match(flow_dump, flow_src):
for rule in flow_src_list:
if rule in flow_dump_list:
flow_src_ctrl.remove(rule)
- return True if not len(flow_src_ctrl) else False
+ return True if not flow_src_ctrl else False
diff --git a/src/package-list.mk b/src/package-list.mk
index 7b82ee6f..1e40a60d 100644
--- a/src/package-list.mk
+++ b/src/package-list.mk
@@ -13,20 +13,20 @@
# dpdk section
# DPDK_URL ?= git://dpdk.org/dpdk
DPDK_URL ?= http://dpdk.org/git/dpdk
-DPDK_TAG ?= v17.08
+DPDK_TAG ?= v18.11-rc2
# OVS section
OVS_URL ?= https://github.com/openvswitch/ovs
-OVS_TAG ?= v2.8.1
+OVS_TAG ?= v2.12.0
# VPP section
VPP_URL ?= https://git.fd.io/vpp
-VPP_TAG ?= v17.07
+VPP_TAG ?= v19.08.1
# QEMU section
QEMU_URL ?= https://github.com/qemu/qemu.git
-QEMU_TAG ?= v2.9.1
+QEMU_TAG ?= v3.1.1
# TREX section
TREX_URL ?= https://github.com/cisco-system-traffic-generator/trex-core.git
-TREX_TAG ?= 8bf9c16556843e55c232b64d9a5061bf588fad42
+TREX_TAG ?= v2.86
diff --git a/src/trex/Makefile b/src/trex/Makefile
index 41eb52ab..fd5c47bb 100644
--- a/src/trex/Makefile
+++ b/src/trex/Makefile
@@ -29,6 +29,8 @@ all: force_pull
force_pull: $(TAG_DONE_FLAG)
$(AT)cd $(WORK_DIR) && git pull $(TREX_URL) $(TREX_TAG)
@echo "git pull done"
+# $(AT)wget https://raw.githubusercontent.com/phaethon/scapy/v0.18/scapy/layers/all.py -O $(WORK_DIR)/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py
+# @echo "orignal SCAPY 2.3.1 layers/all.py was restored"
$(WORK_DIR):
$(AT)git clone $(TREX_URL) $(WORK_DIR)
diff --git a/systems/README.md b/systems/README.md
index d72aae65..ca6557ea 100644
--- a/systems/README.md
+++ b/systems/README.md
@@ -12,3 +12,7 @@ On a freshly built system, run the following with a super user privilege
or with password less sudo access.
./build_base_machine.sh
+
+If you want to use vsperf in trafficgen-mode ONLY, then add a parameter.
+
+./build_base_machine.sh trafficgen
diff --git a/systems/build_base_machine.sh b/systems/build_base_machine.sh
index 59712b96..37b74ffe 100755
--- a/systems/build_base_machine.sh
+++ b/systems/build_base_machine.sh
@@ -68,15 +68,30 @@ else
die "$distro_dir is not yet supported"
fi
-if [ ! -d /lib/modules/`uname -r`/build ] ; then
- die "Kernel devel is not available for active kernel. It can be caused by recent kernel update. Please reboot and run $0 again."
+if [ $# -eq 0 ]; then
+ echo "No parameters provided - continuing with Lib checking"
+ if [ ! -d /lib/modules/`uname -r`/build ] ; then
+ die "Kernel devel is not available for active kernel. It can be caused by recent kernel update. Please reboot and run $0 again."
+ fi
fi
-# download and compile DPDK, OVS and QEMU
-if [ -f ../src/Makefile ] ; then
- cd ../src
- make || die "Make failed"
- cd -
+if [ $# -eq 0 ]; then
+ echo "No parameters provided - continuing with SRC Download and Compile"
+ # download and compile DPDK, OVS and QEMU
+ if [ -f ../src/Makefile ] ; then
+ cd ../src
+ make || die "Make failed"
+ cd -
+ else
+ die "Make failed; No Makefile"
+ fi
else
- die "Make failed; No Makefile"
+ echo "Downloading and compiling only T-Rex"
+ if [ -f ../src/trex/Makefile ]; then
+ cd ../src/trex/
+ make || die "Make failed"
+ cd -
+ else
+ die "Make failed; No Makefile"
+ fi
fi
diff --git a/systems/centos/build_base_machine.sh b/systems/centos/build_base_machine.sh
index f2efb541..0e1ed830 100755
--- a/systems/centos/build_base_machine.sh
+++ b/systems/centos/build_base_machine.sh
@@ -46,6 +46,7 @@ automake
pciutils
cifs-utils
sysstat
+sshpass
# libs
libpcap-devel
@@ -60,6 +61,8 @@ pixman-devel
socat
numactl
numactl-devel
+libpng-devel
+freetype-devel
# install gvim
vim-X11
@@ -68,13 +71,13 @@ vim-X11
epel-release
" | grep -v ^#)
-# install SCL for python33
-sudo yum -y install centos-release-scl
+# install SCL for python34
+sudo yum -y install centos-release-scl-rh
-# install python33 packages and git-review tool
+# install python34 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python36
+rh-python36-python-tkinter
git-review
" | grep -v ^#)
# prevent ovs vanilla from building from source due to kernel incompatibilities
diff --git a/systems/centos/prepare_python_env.sh b/systems/centos/prepare_python_env.sh
index 8bce53cc..4f5c0065 100755
--- a/systems/centos/prepare_python_env.sh
+++ b/systems/centos/prepare_python_env.sh
@@ -21,9 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
"
diff --git a/systems/debian/build_base_machine.sh b/systems/debian/build_base_machine.sh
new file mode 100755
index 00000000..cc3f1eb8
--- /dev/null
+++ b/systems/debian/build_base_machine.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# Build a base machine for Debian style distro
+#
+# Copyright 2020 OPNFV
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Sridhar K. N. Rao Spirent Communications
+
+# This is meant to be used only for Containerized VSPERF.
+
+# Synchronize package index files
+apt-get -y update
+apt-get -y install curl
+apt-get -y install git
+apt-get -y install wget
+apt-get -y install python3-venv
+
+# Make and Compilers
+apt-get -y install make
+apt-get -y install automake
+apt-get -y install gcc
+apt-get -y install g++
+apt-get -y install libssl1.1
+apt-get -y install libxml2
+apt-get -y install zlib1g-dev
+apt-get -y install scapy
diff --git a/systems/debian/prepare_python_env.sh b/systems/debian/prepare_python_env.sh
new file mode 100755
index 00000000..7c3b530b
--- /dev/null
+++ b/systems/debian/prepare_python_env.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Prepare Python environment for vsperf execution on Debian systems
+#
+# Copyright 2020 OPNFV, Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -d "$VSPERFENV_DIR" ] ; then
+ echo "Directory $VSPERFENV_DIR already exists. Skipping python virtualenv creation."
+ exit
+fi
+
+# enable virtual environment in a subshell
+
+(python3 -m venv "$VSPERFENV_DIR"
+source "$VSPERFENV_DIR"/bin/activate
+pip install -r ../requirements.txt)
diff --git a/systems/fedora/24/build_base_machine.sh b/systems/fedora/24/build_base_machine.sh
index 2aceb34f..bbde9b77 100644
--- a/systems/fedora/24/build_base_machine.sh
+++ b/systems/fedora/24/build_base_machine.sh
@@ -48,6 +48,7 @@ pciutils
cifs-utils
socat
sysstat
+sshpass
# install python packages
python3
diff --git a/systems/fedora/24/prepare_python_env.sh b/systems/fedora/24/prepare_python_env.sh
index 920604c2..bc93f703 100644
--- a/systems/fedora/24/prepare_python_env.sh
+++ b/systems/fedora/24/prepare_python_env.sh
@@ -25,5 +25,4 @@ fi
(virtualenv-3.5 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
-pip install -r ../requirements.txt
-pip install pylint)
+pip install -r ../requirements.txt)
diff --git a/systems/fedora/25/build_base_machine.sh b/systems/fedora/25/build_base_machine.sh
index 241b79cb..df8ae620 100644
--- a/systems/fedora/25/build_base_machine.sh
+++ b/systems/fedora/25/build_base_machine.sh
@@ -48,6 +48,7 @@ pciutils
cifs-utils
socat
sysstat
+sshpass
# install python packages
python3
diff --git a/systems/fedora/25/prepare_python_env.sh b/systems/fedora/25/prepare_python_env.sh
index c4613ca4..b5df11ee 100644
--- a/systems/fedora/25/prepare_python_env.sh
+++ b/systems/fedora/25/prepare_python_env.sh
@@ -26,5 +26,4 @@ fi
(virtualenv-3.5 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
pip install six)
diff --git a/systems/fedora/26/build_base_machine.sh b/systems/fedora/26/build_base_machine.sh
index b8b65965..84c0695b 100644
--- a/systems/fedora/26/build_base_machine.sh
+++ b/systems/fedora/26/build_base_machine.sh
@@ -48,6 +48,7 @@ pciutils
cifs-utils
socat
sysstat
+sshpass
# install python packages
python3
diff --git a/systems/fedora/26/prepare_python_env.sh b/systems/fedora/26/prepare_python_env.sh
index 05eedfd9..4db2eaed 100644
--- a/systems/fedora/26/prepare_python_env.sh
+++ b/systems/fedora/26/prepare_python_env.sh
@@ -26,5 +26,4 @@ fi
(virtualenv-3.6 "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
pip install six)
diff --git a/systems/opensuse/42.2/build_base_machine.sh b/systems/opensuse/42.2/build_base_machine.sh
index 44d6f02b..9915b634 100755
--- a/systems/opensuse/42.2/build_base_machine.sh
+++ b/systems/opensuse/42.2/build_base_machine.sh
@@ -49,6 +49,7 @@ socat
sysstat
java-1_8_0-openjdk
git-review
+sshpass
# python
python3
diff --git a/systems/opensuse/42.2/prepare_python_env.sh b/systems/opensuse/42.2/prepare_python_env.sh
index ab668ca4..a563740a 100755
--- a/systems/opensuse/42.2/prepare_python_env.sh
+++ b/systems/opensuse/42.2/prepare_python_env.sh
@@ -24,5 +24,4 @@ fi
virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
diff --git a/systems/opensuse/42.3/build_base_machine.sh b/systems/opensuse/42.3/build_base_machine.sh
index cc9f24ef..2124e6cb 100755
--- a/systems/opensuse/42.3/build_base_machine.sh
+++ b/systems/opensuse/42.3/build_base_machine.sh
@@ -50,6 +50,7 @@ sysstat
java-1_8_0-openjdk
git-review
mlocate
+sshpass
# python
python3
diff --git a/systems/opensuse/42.3/prepare_python_env.sh b/systems/opensuse/42.3/prepare_python_env.sh
index ab668ca4..a563740a 100755
--- a/systems/opensuse/42.3/prepare_python_env.sh
+++ b/systems/opensuse/42.3/prepare_python_env.sh
@@ -24,5 +24,4 @@ fi
virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
diff --git a/systems/opensuse/prepare_python_env.sh b/systems/opensuse/prepare_python_env.sh
index 69871670..6ac196f0 100755
--- a/systems/opensuse/prepare_python_env.sh
+++ b/systems/opensuse/prepare_python_env.sh
@@ -24,5 +24,4 @@ fi
virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
diff --git a/systems/rhel/7.2/build_base_machine.sh b/systems/rhel/7.2/build_base_machine.sh
index 9eb8bbd2..c0f367ab 100755
--- a/systems/rhel/7.2/build_base_machine.sh
+++ b/systems/rhel/7.2/build_base_machine.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Build a base machine for RHEL 7.2
+# Build a base machine for RHEL 7.3
#
# Copyright 2016 OPNFV, Intel Corporation & Red Hat Inc.
#
@@ -52,6 +52,8 @@ pkglist=(
wget\
numactl\
numactl-devel\
+ libpng-devel\
+ sshpass\
)
# python tools for proper QEMU, DPDK, and OVS make
@@ -60,6 +62,9 @@ pkglist=(
python-six\
)
+# install RHEL compatible epel for sshpass
+yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
# Iterate installing each package. If packages fail to install, record those
# packages and exit with an error message on completion. Customer may need to
# add repo locations and subscription levels.
@@ -78,28 +83,24 @@ if [ "${#failedinstall[*]}" -gt 0 ]; then
exit 1
fi
-# install SCL for python33 by adding a repo to find its location to install it
-cat <<'EOT' >> /etc/yum.repos.d/python33.repo
-[rhscl-python33-el7]
-name=Copr repo for python33-el7 owned by rhscl
-baseurl=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/epel-7-$basearch/
-type=rpm-md
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/pubkey.gpg
-repo_gpgcheck=0
+# install SCL for python34 by adding a repo to find its location to install it
+cat <<'EOT' >> /etc/yum.repos.d/python34.repo
+[centos-sclo-rh]
+name=CentOS-7 - SCLo rh
+baseurl=http://mirror.centos.org/centos/7/sclo/$basearch/rh/
+gpgcheck=0
enabled=1
-enabled_metadata=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python33 packages and git-review tool
+# install python36 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python36
+rh-python36-python-tkinter
" | grep -v ^#)
-# cleanup python 33 repo file
-rm -f /etc/yum.repos.d/python33.repo
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.2/prepare_python_env.sh b/systems/rhel/7.2/prepare_python_env.sh
index fb5882f1..b7506568 100755
--- a/systems/rhel/7.2/prepare_python_env.sh
+++ b/systems/rhel/7.2/prepare_python_env.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Prepare Python environment for vsperf execution on Red Hat 7.2 systems.
+# Prepare Python environment for vsperf execution on RHEL 7.3 systems.
#
# Copyright 2016-2017 OPNFV, Intel Corporation, Red Hat Inc.
#
@@ -21,9 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/python33/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
-" \ No newline at end of file
+"
diff --git a/systems/rhel/7.3/build_base_machine.sh b/systems/rhel/7.3/build_base_machine.sh
index 5a9b4b2e..42c36e4c 100755
--- a/systems/rhel/7.3/build_base_machine.sh
+++ b/systems/rhel/7.3/build_base_machine.sh
@@ -52,6 +52,8 @@ pkglist=(
wget\
numactl\
numactl-devel\
+ libpng-devel\
+ sshpass\
)
# python tools for proper QEMU, DPDK, and OVS make
@@ -60,6 +62,9 @@ pkglist=(
python-six\
)
+# install RHEL compatible epel for sshpass
+yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
# Iterate installing each package. If packages fail to install, record those
# packages and exit with an error message on completion. Customer may need to
# add repo locations and subscription levels.
@@ -78,31 +83,27 @@ if [ "${#failedinstall[*]}" -gt 0 ]; then
exit 1
fi
-# install SCL for python33 by adding a repo to find its location to install it
-cat <<'EOT' >> /etc/yum.repos.d/python33.repo
-[rhscl-python33-el7]
-name=Copr repo for python33-el7 owned by rhscl
-baseurl=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/epel-7-$basearch/
-type=rpm-md
-skip_if_unavailable=True
-gpgcheck=1
-gpgkey=https://copr-be.cloud.fedoraproject.org/results/rhscl/python33-el7/pubkey.gpg
-repo_gpgcheck=0
+# install SCL for python34 by adding a repo to find its location to install it
+cat <<'EOT' >> /etc/yum.repos.d/python34.repo
+[centos-sclo-rh]
+name=CentOS-7 - SCLo rh
+baseurl=http://mirror.centos.org/centos/7/sclo/$basearch/rh/
+gpgcheck=0
enabled=1
-enabled_metadata=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python33 packages and git-review tool
+# install python36 packages and git-review tool
yum -y install $(echo "
-python33
-python33-python-tkinter
+rh-python36
+rh-python36-python-tkinter
" | grep -v ^#)
-# cleanup python 33 repo file
-rm -f /etc/yum.repos.d/python33.repo
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
# prevent ovs vanilla from building from source due to kernel incompatibilities
-sed -i s/'SUBBUILDS = src_vanilla'/'#SUBBUILDS = src_vanilla'/ ../src/Makefile \ No newline at end of file
+sed -i s/'SUBBUILDS = src_vanilla'/'#SUBBUILDS = src_vanilla'/ ../src/Makefile
diff --git a/systems/rhel/7.3/prepare_python_env.sh b/systems/rhel/7.3/prepare_python_env.sh
index b573bb9f..b7506568 100755
--- a/systems/rhel/7.3/prepare_python_env.sh
+++ b/systems/rhel/7.3/prepare_python_env.sh
@@ -21,9 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable python33 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/python33/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
-" \ No newline at end of file
+"
diff --git a/systems/rhel/7.5/build_base_machine.sh b/systems/rhel/7.5/build_base_machine.sh
new file mode 100755
index 00000000..deb4e8a2
--- /dev/null
+++ b/systems/rhel/7.5/build_base_machine.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+#
+# Build a base machine for RHEL 7.3
+#
+# Copyright 2016 OPNFV, Intel Corporation & Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Aihua Li, Huawei Technologies.
+# Martin Klozik, Intel Corporation.
+# Abdul Halim, Intel Corporation.
+# Christian Trautman, Red Hat Inc.
+
+# Make and Compilers
+pkglist=(\
+ automake\
+ fuse-devel\
+ gcc\
+ gcc-c++\
+ glib2-devel\
+ glibc\
+ kernel-devel\
+ openssl-devel\
+ pixman-devel\
+ sysstat\
+)
+
+# Tools
+pkglist=(
+ "${pkglist[@]}"\
+ git\
+ libtool\
+ libpcap-devel\
+ libnet\
+ net-tools\
+ openssl\
+ openssl-devel\
+ pciutils\
+ socat\
+ tk-devel\
+ wget\
+ numactl\
+ numactl-devel\
+ libpng-devel\
+ sshpass\
+)
+
+# python tools for proper QEMU, DPDK, and OVS make
+pkglist=(
+ "${pkglist[@]}"\
+ python-six\
+)
+
+# install RHEL compatible epel for sshpass
+yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+# Iterate installing each package. If packages fail to install, record those
+# packages and exit with an error message on completion. Customer may need to
+# add repo locations and subscription levels.
+failedinstall=()
+for pkg in ${pkglist[*]}; do
+ echo "Installing ${pkg}"
+ yum -y install ${pkg} || failedinstall=("${failedinstall[*]}" "$pkg")
+done
+
+if [ "${#failedinstall[*]}" -gt 0 ]; then
+ echo "The following packages failed to install. Please add appropriate repo\
+ locations and/or subscription levels. Then run the build script again."
+ for fail in ${failedinstall[*]}; do
+ echo $fail
+ done
+ exit 1
+fi
+
+# install SCL for python34 by adding a repo to find its location to install it
+cat <<'EOT' >> /etc/yum.repos.d/python34.repo
+[centos-sclo-rh]
+name=CentOS-7 - SCLo rh
+baseurl=http://mirror.centos.org/centos/7/sclo/$basearch/rh/
+gpgcheck=0
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
+EOT
+
+# install python36 packages and git-review tool
+yum -y install $(echo "
+rh-python36
+rh-python36-python-tkinter
+" | grep -v ^#)
+
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
+
+# Create hugepage dirs
+mkdir -p /dev/hugepages
+
+# prevent upstream from building from source due to kernel incompatibilities
+sed -i s/'SUBBUILDS = src_vanilla'/'#SUBBUILDS = src_vanilla'/ ../src/Makefile
+sed -i s/'SUBDIRS += dpdk'/'#SUBDIRS += dpdk'/ ../src/Makefile
+sed -i s/'SUBDIRS += ovs'/'#SUBDIRS += ovs'/ ../src/Makefile
diff --git a/systems/rhel/7.5/prepare_python_env.sh b/systems/rhel/7.5/prepare_python_env.sh
new file mode 100755
index 00000000..b7506568
--- /dev/null
+++ b/systems/rhel/7.5/prepare_python_env.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Prepare Python environment for vsperf execution on RHEL 7.3 systems.
+#
+# Copyright 2016-2017 OPNFV, Intel Corporation, Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -d "$VSPERFENV_DIR" ] ; then
+ echo "Directory $VSPERFENV_DIR already exists. Skipping python virtualenv creation."
+ exit
+fi
+
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
+source "$VSPERFENV_DIR"/bin/activate
+pip install -r ../requirements.txt
+"
diff --git a/systems/sles/15/build_base_machine.sh b/systems/sles/15/build_base_machine.sh
index 9c161dd7..166fe649 100755
--- a/systems/sles/15/build_base_machine.sh
+++ b/systems/sles/15/build_base_machine.sh
@@ -20,6 +20,7 @@
# Jose Lausuch, SUSE LINUX GmbH
zypper -q -n dup
+zypper ar -G http://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/ backports
zypper -q -n in -y $(echo "
# compiler, tools and dependencies
make
@@ -32,7 +33,6 @@ fuse
fuse-devel
glib2-devel
zlib-devel
-ncurses-devel
kernel-default
kernel-default-devel
pkg-config
@@ -47,7 +47,7 @@ pciutils
cifs-utils
socat
sysstat
-java-9-openjdk
+java-10-openjdk
mlocate
# python
@@ -65,14 +65,16 @@ libpixman-1-0-devel
libtool
libpcap-devel
libnet9
-libncurses5
+libncurses6
libcurl4
libcurl-devel
libxml2
libfuse2
-libopenssl1_1_0
+libopenssl1_1
libopenssl-devel
libpython3_6m1_0
+libzmq5
+sshpass
" | grep -v ^#)
diff --git a/systems/sles/15/prepare_python_env.sh b/systems/sles/15/prepare_python_env.sh
index 12ada3cc..18b0e545 100755
--- a/systems/sles/15/prepare_python_env.sh
+++ b/systems/sles/15/prepare_python_env.sh
@@ -24,5 +24,4 @@ fi
virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
-pip install pylint
diff --git a/systems/ubuntu/14.04/build_base_machine.sh b/systems/ubuntu/14.04/build_base_machine.sh
index 04f4a7e1..5501cab2 100755
--- a/systems/ubuntu/14.04/build_base_machine.sh
+++ b/systems/ubuntu/14.04/build_base_machine.sh
@@ -62,6 +62,7 @@ socat
libpixman-1-0
libpixman-1-dev
sysstat
+sshpass
# Java runtime environment: Required for Ixia TclClient
default-jre
@@ -74,7 +75,7 @@ python3-setuptools
python3-dbus
python3-dev
python3-tk
-libpython3.4
+libpython3.6
python3-reportlab
# libs
diff --git a/systems/ubuntu/14.04/prepare_python_env.sh b/systems/ubuntu/14.04/prepare_python_env.sh
index 4c98dc42..f3d92dce 100755
--- a/systems/ubuntu/14.04/prepare_python_env.sh
+++ b/systems/ubuntu/14.04/prepare_python_env.sh
@@ -25,5 +25,4 @@ fi
(virtualenv "$VSPERFENV_DIR" --python /usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
-pip install -r ../requirements.txt
-pip install pylint)
+pip install -r ../requirements.txt)
diff --git a/systems/ubuntu/build_base_machine.sh b/systems/ubuntu/build_base_machine.sh
index 1b42a790..2f3e7b25 100755
--- a/systems/ubuntu/build_base_machine.sh
+++ b/systems/ubuntu/build_base_machine.sh
@@ -46,6 +46,7 @@ apt-get -y install libglib2.0
apt-get -y install libfuse-dev
apt-get -y install libnuma1
apt-get -y install libnuma-dev
+apt-get -y install sshpass
# Some useful tools you may optionally install
#apt-get -y install ctags
diff --git a/testcases/__init__.py b/testcases/__init__.py
index 0b6b77e4..736be883 100644
--- a/testcases/__init__.py
+++ b/testcases/__init__.py
@@ -17,3 +17,4 @@
from testcases.testcase import (TestCase)
from testcases.performance import (PerformanceTestCase)
from testcases.integration import (IntegrationTestCase)
+from testcases.k8s_performance import (K8sPerformanceTestCase)
diff --git a/testcases/integration.py b/testcases/integration.py
index f87a8ee2..8cfe5af5 100644
--- a/testcases/integration.py
+++ b/testcases/integration.py
@@ -17,7 +17,7 @@
import logging
from collections import OrderedDict
-from testcases import TestCase
+from testcases.testcase import TestCase
class IntegrationTestCase(TestCase):
"""IntegrationTestCase class
diff --git a/testcases/k8s_performance.py b/testcases/k8s_performance.py
new file mode 100644
index 00000000..3c31430c
--- /dev/null
+++ b/testcases/k8s_performance.py
@@ -0,0 +1,39 @@
+# Copyright 2015-2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PerformanceTestCase class
+"""
+
+import logging
+
+from testcases.testcase import TestCase
+#from tools.report import report
+
+class K8sPerformanceTestCase(TestCase):
+ """K8sPerformanceTestCase class
+
+ In this basic form runs RFC2544 throughput test
+ """
+ def __init__(self, cfg):
+ """ Testcase initialization
+ """
+ self._type = 'k8s_performance'
+ super().__init__(cfg)
+ self._logger = logging.getLogger(__name__)
+ self._k8s = True
+
+ def run_report(self):
+ pass
+ #super().run_report()
+ #if self._tc_results:
+ # report.generate(self)
diff --git a/testcases/performance.py b/testcases/performance.py
index a82b5d1c..1b67911e 100644
--- a/testcases/performance.py
+++ b/testcases/performance.py
@@ -16,7 +16,7 @@
import logging
-from testcases import TestCase
+from testcases.testcase import TestCase
from tools.report import report
class PerformanceTestCase(TestCase):
diff --git a/testcases/testcase.py b/testcases/testcase.py
index 991c2890..51d212b4 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation, Tieto and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import subprocess
from datetime import datetime as dt
from conf import settings as S
-from conf import get_test_param, merge_spec
+from conf import merge_spec
import core.component_factory as component_factory
from core.loader import Loader
from core.results.results_constants import ResultsConstants
@@ -36,9 +36,19 @@ from tools import functions
from tools import namespace
from tools import veth
from tools.teststepstools import TestStepsTools
+from tools.llc_management import rmd
+# Validation methods required for integration TCs will have following prefix before the name
+# of original method.
CHECK_PREFIX = 'validate_'
+# Several parameters can be defined by both TC definition keywords and configuration parameters.
+# Following mapping table is used to correctly evaluate priority of testcase configuration, where
+# TC definition keywords (i.e. mapping table keys) have higher priority than appropriate TC
+# parameters (i.e. mapping table values). TC parameters can be defined within "Parameters"
+# section, via CLI parameters or within configuration files.
+MAPPING_TC_CFG2CONF = {'vSwitch':'VSWITCH', 'VNF':'VNF', 'Trafficgen':'TRAFFICGEN', 'Tunnel Type':'TUNNEL_TYPE'}
+
# pylint: disable=too-many-instance-attributes
class TestCase(object):
"""TestCase base class
@@ -63,6 +73,8 @@ class TestCase(object):
self._hugepages_mounted = False
self._traffic_ctl = None
self._vnf_ctl = None
+ self._pod_ctl = None
+ self._pod_list = None
self._vswitch_ctl = None
self._collector = None
self._loadgen = None
@@ -71,6 +83,7 @@ class TestCase(object):
self._settings_paths_modified = False
self._testcast_run_time = None
self._versions = []
+ self._k8s = False
# initialization of step driven specific members
self._step_check = False # by default don't check result for step driven testcases
self._step_vnf_list = {}
@@ -84,9 +97,16 @@ class TestCase(object):
S.setValue('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH')))
S.setValue('VNF', cfg.get('VNF', S.getValue('VNF')))
S.setValue('TRAFFICGEN', cfg.get('Trafficgen', S.getValue('TRAFFICGEN')))
+ S.setValue('TUNNEL_TYPE', cfg.get('Tunnel Type', S.getValue('TUNNEL_TYPE')))
test_params = copy.deepcopy(S.getValue('TEST_PARAMS'))
tc_test_params = cfg.get('Parameters', S.getValue('TEST_PARAMS'))
test_params = merge_spec(test_params, tc_test_params)
+
+ # ensure that parameters from TC definition have the highest priority, see MAPPING_TC_CFG2CONF
+ for (cfg_param, param) in MAPPING_TC_CFG2CONF.items():
+ if cfg_param in cfg and param in test_params:
+ del test_params[param]
+
S.setValue('TEST_PARAMS', test_params)
S.check_test_params()
@@ -124,16 +144,7 @@ class TestCase(object):
self.deployment = cfg['Deployment']
self._frame_mod = cfg.get('Frame Modification', None)
- self._tunnel_type = None
- self._tunnel_operation = None
-
- if self.deployment == 'op2p':
- self._tunnel_operation = cfg['Tunnel Operation']
-
- if 'Tunnel Type' in cfg:
- self._tunnel_type = cfg['Tunnel Type']
- self._tunnel_type = get_test_param('TUNNEL_TYPE',
- self._tunnel_type)
+ self._tunnel_operation = cfg.get('Tunnel Operation', None)
# check if test requires background load and which generator it uses
self._load_cfg = cfg.get('Load', None)
@@ -144,16 +155,14 @@ class TestCase(object):
# set traffic details, so they can be passed to vswitch and traffic ctls
self._traffic = copy.deepcopy(S.getValue('TRAFFIC'))
- self._traffic.update({'bidir': bidirectional,
- 'tunnel_type': self._tunnel_type,})
-
- self._traffic = functions.check_traffic(self._traffic)
+ self._traffic.update({'bidir': bidirectional})
# Packet Forwarding mode
self._vswitch_none = str(S.getValue('VSWITCH')).strip().lower() == 'none'
# trafficgen configuration required for tests of tunneling protocols
- if self.deployment == "op2p":
+ if self._tunnel_operation:
+ self._traffic.update({'tunnel_type': S.getValue('TUNNEL_TYPE')})
self._traffic['l2'].update({'srcmac':
S.getValue('TRAFFICGEN_PORT1_MAC'),
'dstmac':
@@ -165,9 +174,9 @@ class TestCase(object):
S.getValue('TRAFFICGEN_PORT2_IP')})
if self._tunnel_operation == "decapsulation":
- self._traffic['l2'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L2')
- self._traffic['l3'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L3')
- self._traffic['l4'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L4')
+ self._traffic['l2'].update(S.getValue(S.getValue('TUNNEL_TYPE').upper() + '_FRAME_L2'))
+ self._traffic['l3'].update(S.getValue(S.getValue('TUNNEL_TYPE').upper() + '_FRAME_L3'))
+ self._traffic['l4'].update(S.getValue(S.getValue('TUNNEL_TYPE').upper() + '_FRAME_L4'))
self._traffic['l2']['dstmac'] = S.getValue('NICS')[1]['mac']
elif len(S.getValue('NICS')) >= 2 and \
(S.getValue('NICS')[0]['type'] == 'vf' or
@@ -179,12 +188,18 @@ class TestCase(object):
else:
self._logger.debug("MAC addresses can not be read")
+ self._traffic = functions.check_traffic(self._traffic)
+
# count how many VNFs are involved in TestSteps
if self.test:
for step in self.test:
if step[0].startswith('vnf'):
self._step_vnf_list[step[0]] = None
+ # if llc allocation is required, initialize it.
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd = rmd.CacheAllocator()
+
def run_initialize(self):
""" Prepare test execution environment
"""
@@ -204,6 +219,12 @@ class TestCase(object):
self._vnf_list = self._vnf_ctl.get_vnfs()
+ self._pod_ctl = component_factory.create_pod(
+ self.deployment,
+ loader.get_pod_class())
+
+ self._pod_list = self._pod_ctl.get_pods()
+
# verify enough hugepages are free to run the testcase
if not self._check_for_enough_hugepages():
raise RuntimeError('Not enough hugepages free to run test.')
@@ -252,11 +273,15 @@ class TestCase(object):
loader.get_loadgen_class(),
self._load_cfg)
- self._output_file = os.path.join(self._results_dir, "result_" + self.name +
- "_" + self.deployment + ".csv")
+ self._output_file = os.path.join(self._results_dir, "result_{}_{}_{}.csv".format(
+ str(S.getValue('_TEST_INDEX')), self.name, self.deployment))
self._step_status = {'status' : True, 'details' : ''}
+ # Perform LLC-allocations
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd.setup_llc_allocation()
+
self._logger.debug("Setup:")
def run_finalize(self):
@@ -265,6 +290,14 @@ class TestCase(object):
# Stop all VNFs started by TestSteps in case that something went wrong
self.step_stop_vnfs()
+ if self._k8s:
+ self._pod_ctl.stop()
+
+
+ # Cleanup any LLC-allocations
+ if S.getValue('LLC_ALLOCATION'):
+ self._rmd.cleanup_llc_allocation()
+
# Stop all processes executed by testcase
tasks.terminate_all_tasks(self._logger)
@@ -274,7 +307,7 @@ class TestCase(object):
# cleanup any namespaces created
if os.path.isdir('/tmp/namespaces'):
namespace_list = os.listdir('/tmp/namespaces')
- if len(namespace_list):
+ if namespace_list:
self._logger.info('Cleaning up namespaces')
for name in namespace_list:
namespace.delete_namespace(name)
@@ -282,7 +315,7 @@ class TestCase(object):
# cleanup any veth ports created
if os.path.isdir('/tmp/veth'):
veth_list = os.listdir('/tmp/veth')
- if len(veth_list):
+ if veth_list:
self._logger.info('Cleaning up veth ports')
for eth in veth_list:
port1, port2 = eth.split('-')
@@ -309,8 +342,8 @@ class TestCase(object):
if len(self._tc_results) < len(results):
if len(self._tc_results) > 1:
raise RuntimeError('Testcase results do not match:'
- 'results: {}\n'
- 'trafficgen results: {}\n',
+ 'results: %s\n'
+ 'trafficgen results: %s\n' %
self._tc_results,
results)
else:
@@ -330,15 +363,18 @@ class TestCase(object):
"""Run the test
All setup and teardown through controllers is included.
+
"""
# prepare test execution environment
self.run_initialize()
try:
- with self._vswitch_ctl, self._loadgen:
- with self._vnf_ctl, self._collector:
- if not self._vswitch_none:
+ with self._vswitch_ctl:
+ with self._vnf_ctl, self._pod_ctl, self._collector, self._loadgen:
+ if not self._vswitch_none and not self._k8s:
self._add_flows()
+ if self._k8s:
+ self._add_connections()
self._versions += self._vswitch_ctl.get_vswitch().get_version()
@@ -353,7 +389,7 @@ class TestCase(object):
# dump vswitch flows before they are affected by VNF termination
if not self._vswitch_none:
- self._vswitch_ctl.dump_vswitch_flows()
+ self._vswitch_ctl.dump_vswitch_connections()
# garbage collection for case that TestSteps modify existing deployment
self.step_stop_vnfs()
@@ -366,7 +402,7 @@ class TestCase(object):
self._testcase_run_time = time.strftime("%H:%M:%S",
time.gmtime(self._testcase_stop_time -
self._testcase_start_time))
- logging.info("Testcase execution time: " + self._testcase_run_time)
+ logging.info("Testcase execution time: %s", self._testcase_run_time)
# report test results
self.run_report()
@@ -396,8 +432,8 @@ class TestCase(object):
item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows']
if self._vnf_ctl.get_vnfs_number():
item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(S.getValue('GUEST_LOOPBACK'))
- if self._tunnel_type:
- item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type
+ if self._tunnel_operation:
+ item[ResultsConstants.TUNNEL_TYPE] = S.getValue('TUNNEL_TYPE')
return results
def _copy_fwd_tools_for_all_guests(self, vm_count):
@@ -549,7 +585,7 @@ class TestCase(object):
"""
with open(output, 'a') as csvfile:
- logging.info("Write results to file: " + output)
+ logging.info("Write results to file: %s", output)
fieldnames = TestCase._get_unique_keys(results)
writer = csv.DictWriter(csvfile, fieldnames)
@@ -575,6 +611,43 @@ class TestCase(object):
return list(result.keys())
+ def _add_connections(self):
+ """
+ Add connections for Kubernetes Usecases
+ """
+ logging.info("Kubernetes: Adding Connections")
+ vswitch = self._vswitch_ctl.get_vswitch()
+ bridge = S.getValue('VSWITCH_BRIDGE_NAME')
+ if S.getValue('K8S') and 'sriov' not in S.getValue('PLUGIN'):
+ if 'Ovs' in S.getValue('VSWITCH'):
+ # Add OVS Flows
+ logging.info("Kubernetes: Adding OVS Connections")
+ flow = {'table':'0', 'in_port':'1',
+ 'idle_timeout':'0', 'actions': ['output:3']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'3',
+ 'idle_timeout':'0', 'actions': ['output:1']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'2',
+ 'idle_timeout':'0', 'actions': ['output:4']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'4',
+ 'idle_timeout':'0', 'actions': ['output:2']}
+ vswitch.add_flow(bridge, flow)
+ elif 'vpp' in S.getValue('VSWITCH'):
+ phy_ports = vswitch.get_ports()
+ virt_port0 = 'memif1/0'
+ virt_port1 = 'memif2/0'
+ vswitch.add_connection(bridge, phy_ports[0],
+ virt_port0, None)
+ vswitch.add_connection(bridge, virt_port0,
+ phy_ports[0], None)
+ vswitch.add_connection(bridge, phy_ports[1],
+ virt_port1, None)
+ vswitch.add_connection(bridge, virt_port1,
+ phy_ports[1], None)
+
+
def _add_flows(self):
"""Add flows to the vswitch
"""
@@ -707,7 +780,7 @@ class TestCase(object):
self._logger.debug("Skipping %s as it isn't a configuration "
"parameter.", '${}'.format(macro[0]))
return param
- elif isinstance(param, list) or isinstance(param, tuple):
+ elif isinstance(param, (list, tuple)):
tmp_list = []
for item in param:
tmp_list.append(self.step_eval_param(item, step_result))
@@ -745,9 +818,6 @@ class TestCase(object):
# initialize list with results
self._step_result = [None] * len(self.test)
- # We have to suppress pylint report, because test_object has to be set according
- # to the test step definition
- # pylint: disable=redefined-variable-type
# run test step by step...
for i, step in enumerate(self.test):
step_ok = not self._step_check
diff --git a/tools/collectors/cadvisor/__init__.py b/tools/collectors/cadvisor/__init__.py
new file mode 100755
index 00000000..235ab875
--- /dev/null
+++ b/tools/collectors/cadvisor/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for cAdvisor as a collector
+"""
diff --git a/tools/collectors/cadvisor/cadvisor.py b/tools/collectors/cadvisor/cadvisor.py
new file mode 100644
index 00000000..de48cecd
--- /dev/null
+++ b/tools/collectors/cadvisor/cadvisor.py
@@ -0,0 +1,218 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects container metrics from cAdvisor.
+Sends metrics to influxDB and also stores results locally.
+"""
+
+import subprocess
+import logging
+import os
+from collections import OrderedDict
+
+from tools.collectors.collector import collector
+from tools import tasks
+from conf import settings
+
+
+
+# inherit from collector.Icollector.
+class Cadvisor(collector.ICollector):
+ """A collector of container metrics based on cAdvisor
+
+ It starts cadvisor and collects metrics.
+ """
+
+ def __init__(self, results_dir, test_name):
+ """
+ Initialize collection of statistics
+ """
+ self._logger = logging.getLogger(__name__)
+ self.resultsdir = results_dir
+ self.testname = test_name
+ self._pid = 0
+ self._results = OrderedDict()
+ self._log = os.path.join(results_dir,
+ settings.getValue('LOG_FILE_CADVISOR') +
+ '_' + test_name + '.log')
+ self._logfile = 0
+
+
+ def start(self):
+ """
+ Starts collection of statistics by cAdvisor and stores them
+ into-
+ 1. The file in directory with test results
+ 2. InfluxDB result container
+ """
+
+ # CMD options for cAdvisor
+ cmd = ['sudo', '/opt/cadvisor/cadvisor',
+ '-storage_driver='+settings.getValue('CADVISOR_STORAGE_DRIVER'),
+ '-storage_driver_host='+settings.getValue('CADVISOR_STORAGE_HOST'),
+ '-storage_driver_db='+settings.getValue('CADVISOR_DRIVER_DB'),
+ '-housekeeping_interval=0.5s',
+ '-storage_driver_buffer_duration=1s'
+ ]
+
+ self._logfile = open(self._log, 'a')
+
+ self._pid = subprocess.Popen(map(os.path.expanduser, cmd), stdout=self._logfile, bufsize=0)
+ self._logger.info('Starting cAdvisor')
+
+
+
+ def stop(self):
+ """
+ Stops collection of metrics by cAdvisor and stores statistic
+ summary for each monitored container into self._results dictionary
+ """
+ try:
+ subprocess.check_output(["pidof", "cadvisor"])
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'cadvisor'],
+ self._logger, 'Stopping cAdvisor', True)
+ except subprocess.CalledProcessError:
+ self._logger.error('Failed to stop cAdvisor, maybe process does not exist')
+
+
+ self._logfile.close()
+ self._logger.info('cAdvisor log available at %s', self._log)
+
+ containers = settings.getValue('CADVISOR_CONTAINERS')
+ self._results = cadvisor_log_result(self._log, containers)
+
+
+ def get_results(self):
+ """Returns collected statistics.
+ """
+ return self._results
+
+ def print_results(self):
+ """Logs collected statistics.
+ """
+ for cnt in self._results:
+ logging.info("Container: %s", cnt)
+ for (key, value) in self._results[cnt].items():
+
+ postfix = ''
+
+ if key == 'cpu_cumulative_usage':
+ key = 'CPU_usage'
+ value = round(float(value) / 1000000000, 4)
+ postfix = '%'
+
+ if key in ['memory_usage', 'memory_working_set']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'MB'
+
+ if key in ['rx_bytes', 'tx_bytes']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'mBps'
+
+ logging.info(" Statistic: %s Value: %s %s",
+ str(key), str(value), postfix)
+
+
+def cadvisor_log_result(filename, containers):
+ """
+ Processes cAdvisor logfile and returns average results
+
+ :param filename: Name of cadvisor logfile
+ :param containers: List of container names
+
+ :returns: Result as average stats of Containers
+ """
+ result = OrderedDict()
+ previous = OrderedDict()
+ logfile = open(filename, 'r')
+ with logfile:
+ # for every line
+ for _, line in enumerate(logfile):
+ # skip lines having root '/' metrics
+ if line[0:7] == 'cName=/':
+ continue
+
+ # parse line into OrderedDict
+ tmp_res = parse_line(line)
+
+ cnt = tmp_res['cName']
+
+ # skip if cnt is not in container list
+ if cnt not in containers:
+ continue
+
+ # add metrics to result
+ if cnt not in result:
+ result[cnt] = tmp_res
+ previous[cnt] = tmp_res
+ result[cnt]['count'] = 1
+ else:
+ for field in tmp_res:
+
+ if field in ['rx_errors', 'tx_errors', 'memory_usage', 'memory_working_set']:
+ val = float(tmp_res[field])
+ elif field in ['cpu_cumulative_usage', 'rx_bytes', 'tx_bytes']:
+ val = float(tmp_res[field]) - float(previous[cnt][field])
+ else:
+ # discard remaining fields
+ try:
+ result[cnt].pop(field)
+ except KeyError:
+ continue
+ continue
+
+ result[cnt][field] = float(result[cnt][field]) + val
+
+ result[cnt]['count'] += 1
+ previous[cnt] = tmp_res
+
+ # calculate average results for containers
+ result = calculate_average(result)
+ return result
+
+
+def calculate_average(results):
+ """
+ Calculates average for container stats
+ """
+ for cnt in results:
+ for field in results[cnt]:
+ if field != 'count':
+ val = float(results[cnt][field])/results[cnt]['count']
+ results[cnt][field] = '{0:.2f}'.format(val)
+
+ results[cnt].pop('count')
+ #sort results
+ results[cnt] = OrderedDict(sorted(results[cnt].items()))
+
+ return results
+
+
+def parse_line(line):
+ """
+ Reads single line from cAdvisor logfile
+
+ :param line: single line as str
+
+ :returns: OrderedDict of line read
+ """
+ tmp_res = OrderedDict()
+ # split line into array of "key=value" metrics
+ metrics = line.split()
+ for metric in metrics:
+ key, value = metric.split('=')
+ tmp_res[key] = value
+
+ return tmp_res
diff --git a/tools/collectors/collectd/__init__.py b/tools/collectors/collectd/__init__.py
new file mode 100755
index 00000000..25e2c3c2
--- /dev/null
+++ b/tools/collectors/collectd/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2017 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for Collectd as a collector
+"""
diff --git a/tools/collectors/collectd/collectd.py b/tools/collectors/collectd/collectd.py
new file mode 100644
index 00000000..5e996d3a
--- /dev/null
+++ b/tools/collectors/collectd/collectd.py
@@ -0,0 +1,294 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects samples from collectd through collectd_bucky.
+Depending on the policy - decides to keep the sample or discard.
+Plot the values of the stored samples once the test is completed
+"""
+
+import copy
+import csv
+import glob
+import logging
+import multiprocessing
+import os
+from collections import OrderedDict
+import queue
+
+import matplotlib.pyplot as plt
+import numpy as np
+import tools.collectors.collectd.collectd_bucky as cb
+from tools.collectors.collector import collector
+from tools import tasks
+from conf import settings
+
+# The y-lables. Keys in this dictionary are used as y-labels.
+YLABELS = {'No/Of Packets': ['dropped', 'packets', 'if_octets', 'errors',
+ 'if_rx_octets', 'if_tx_octets'],
+ 'Jiffies': ['cputime'],
+ 'Bandwidth b/s': ['memory_bandwidth'],
+ 'Bytes': ['bytes.llc']}
+
+
+def get_label(sample):
+ """
+ Returns the y-label for the plot.
+ """
+ for label in YLABELS:
+ if any(r in sample for r in YLABELS[label]):
+ return label
+ return None
+
+
+def plot_graphs(dict_of_arrays):
+ """
+ Plot the values
+ Store the data used for plotting.
+ """
+ i = 1
+ results_dir = settings.getValue('RESULTS_PATH')
+ for key in dict_of_arrays:
+ tup_list = dict_of_arrays[key]
+ two_lists = list(map(list, zip(*tup_list)))
+ y_axis_list = two_lists[0]
+ x_axis_list = two_lists[1]
+ if np.count_nonzero(y_axis_list) > 0:
+ with open(os.path.join(results_dir,
+ str(key) + '.data'), "w") as pfile:
+ writer = csv.writer(pfile, delimiter='\t')
+ writer.writerows(zip(x_axis_list, y_axis_list))
+ plt.figure(i)
+ plt.plot(x_axis_list, y_axis_list)
+ plt.xlabel("Time (Ticks)")
+ plt.ylabel(get_label(key))
+ plt.savefig(os.path.join(results_dir, str(key) + '.png'))
+ plt.cla()
+ plt.clf()
+ plt.close()
+ i = i + 1
+
+
+def get_results_to_print(dict_of_arrays):
+ """
+ Return a results dictionary for report tool to
+ print the process-statistics.
+ """
+ presults = OrderedDict()
+ results = OrderedDict()
+ for key in dict_of_arrays:
+ if ('processes' in key and
+ any(proc in key for proc in ['ovs', 'vpp', 'qemu'])):
+ reskey = '.'.join(key.split('.')[2:])
+ preskey = key.split('.')[1] + '_collectd'
+ tup_list = dict_of_arrays[key]
+ two_lists = list(map(list, zip(*tup_list)))
+ y_axis_list = two_lists[0]
+ mean = 0.0
+ if np.count_nonzero(y_axis_list) > 0:
+ mean = np.mean(y_axis_list)
+ results[reskey] = mean
+ presults[preskey] = results
+ return presults
+
+
+class Receiver(multiprocessing.Process):
+ """
+ Wrapper Receiver (of samples) class
+ """
+ def __init__(self, pd_dict, control):
+ """
+ Initialize.
+ A queue will be shared with collectd_bucky
+ """
+ super(Receiver, self).__init__()
+ self.daemon = False
+ self.q_of_samples = multiprocessing.Queue()
+ self.server = cb.get_collectd_server(self.q_of_samples)
+ self.control = control
+ self.pd_dict = pd_dict
+ self.collectd_cpu_keys = settings.getValue('COLLECTD_CPU_KEYS')
+ self.collectd_processes_keys = settings.getValue(
+ 'COLLECTD_PROCESSES_KEYS')
+ self.collectd_iface_keys = settings.getValue(
+ 'COLLECTD_INTERFACE_KEYS')
+ self.collectd_iface_xkeys = settings.getValue(
+ 'COLLECTD_INTERFACE_XKEYS')
+ self.collectd_intelrdt_keys = settings.getValue(
+ 'COLLECTD_INTELRDT_KEYS')
+ self.collectd_ovsstats_keys = settings.getValue(
+ 'COLLECTD_OVSSTAT_KEYS')
+ self.collectd_dpdkstats_keys = settings.getValue(
+ 'COLLECTD_DPDKSTAT_KEYS')
+ self.collectd_intelrdt_xkeys = settings.getValue(
+ 'COLLECTD_INTELRDT_XKEYS')
+ self.exclude_coreids = []
+ # Expand the ranges in the intelrdt-xkeys
+ for xkey in self.collectd_intelrdt_xkeys:
+ if '-' not in xkey:
+ self.exclude_coreids.append(int(xkey))
+ else:
+ left, right = map(int, xkey.split('-'))
+ self.exclude_coreids += range(left, right + 1)
+
+ def run(self):
+ """
+ Start receiving the samples.
+ """
+ while not self.control.value:
+ try:
+ sample = self.q_of_samples.get(True, 1)
+ if not sample:
+ break
+ self.handle(sample)
+ except queue.Empty:
+ pass
+ except IOError:
+ continue
+ except (ValueError, IndexError, KeyError, MemoryError):
+ self.stop()
+ break
+
+ # pylint: disable=too-many-boolean-expressions
+ def handle(self, sample):
+ ''' Store values and names if names matches following:
+ 1. cpu + keys
+ 2. processes + keys
+ 3. interface + keys + !xkeys
+ 4. ovs_stats + keys
+ 5. dpdkstat + keys
+ 6. intel_rdt + keys + !xkeys
+ sample[1] is the name of the sample, which is . separated strings.
+ The first field in sample[1] is the type - cpu, proceesses, etc.
+ For intel_rdt, the second field contains the core-id, which is
+ used to make the decision on 'exclusions'
+ sample[0]: Contains the host information - which is not considered.
+ sample[2]: Contains the Value.
+ sample[3]: Contains the Time (in ticks)
+ '''
+ if (('cpu' in sample[1] and
+ any(c in sample[1] for c in self.collectd_cpu_keys)) or
+ ('processes' in sample[1] and
+ any(p in sample[1] for p in self.collectd_processes_keys)) or
+ ('interface' in sample[1] and
+ (any(i in sample[1] for i in self.collectd_iface_keys) and
+ any(x not in sample[1]
+ for x in self.collectd_iface_xkeys))) or
+ ('ovs_stats' in sample[1] and
+ any(o in sample[1] for o in self.collectd_ovsstats_keys)) or
+ ('dpdkstat' in sample[1] and
+ any(d in sample[1] for d in self.collectd_dpdkstats_keys)) or
+ ('intel_rdt' in sample[1] and
+ any(r in sample[1] for r in self.collectd_intelrdt_keys) and
+ (int(sample[1].split('.')[1]) not in self.exclude_coreids))):
+ if sample[1] not in self.pd_dict:
+ self.pd_dict[sample[1]] = list()
+ val = self.pd_dict[sample[1]]
+ val.append((sample[2], sample[3]))
+ self.pd_dict[sample[1]] = val
+ logging.debug("COLLECTD %s", ' '.join(str(p) for p in sample))
+
+ def stop(self):
+ """
+ Stop receiving the samples.
+ """
+ self.server.close()
+ self.q_of_samples.put(None)
+ self.control.value = True
+
+
+# inherit from collector.Icollector.
+class Collectd(collector.ICollector):
+ """A collector of system statistics based on collectd
+
+ It starts a UDP server, receives metrics from collectd
+ and plot the results.
+ """
+
+ def __init__(self, results_dir, test_name):
+ """
+ Initialize collection of statistics
+ """
+ self.logger = logging.getLogger(__name__)
+ self.resultsdir = results_dir
+ self.testname = test_name
+ self.results = {}
+ self.sample_dict = multiprocessing.Manager().dict()
+ self.control = multiprocessing.Value('b', False)
+ self.receiver = Receiver(self.sample_dict, self.control)
+ self.cleanup_metrics()
+ # Assumption: collected is installed at /opt/collectd
+ # And collected is configured to write to csv at /tmp/csv
+ self.pid = tasks.run_background_task(
+ ['sudo', '/opt/collectd/sbin/collectd'],
+ self.logger, 'Staring Collectd')
+
+ def cleanup_metrics(self):
+ """
+ Cleaup the old or archived metrics
+ """
+ for name in glob.glob(os.path.join('/tmp/csv/', '*')):
+ tasks.run_task(['sudo', 'rm', '-rf', name], self.logger,
+ 'Cleaning up Metrics', True)
+
+ def start(self):
+ """
+ Start receiving samples
+ """
+ self.receiver.server.start()
+ self.receiver.start()
+
+ def stop(self):
+ """
+ Stop receiving samples
+ """
+ tasks.terminate_task_subtree(self.pid, logger=self.logger)
+ # At times collectd fails to fully terminate.
+ # Killing process by name too helps.
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'collectd'],
+ self.logger, 'Stopping Collectd', True)
+ self.control.value = True
+ self.receiver.stop()
+ self.receiver.server.join(5)
+ self.receiver.join(5)
+ if self.receiver.server.is_alive():
+ self.receiver.server.terminate()
+ if self.receiver.is_alive():
+ self.receiver.terminate()
+ self.results = copy.deepcopy(self.sample_dict)
+ # Backup the collectd-metrics for this test into a zipfile
+ filename = ('/tmp/collectd-' + settings.getValue('LOG_TIMESTAMP') +
+ '.tar.gz')
+ tasks.run_task(['sudo', 'tar', '-czvf', filename, '/tmp/csv/'],
+ self.logger, 'Zipping File', True)
+ self.cleanup_metrics()
+
+ def get_results(self):
+ """
+ Return the results.
+ """
+ return get_results_to_print(self.results)
+
+ def print_results(self):
+ """
+ Print - Plot and save raw-data.
+ log the collected statistics
+ """
+ plot_graphs(self.results)
+ proc_stats = get_results_to_print(self.results)
+ for process in proc_stats:
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
+ for(key, value) in proc_stats[process].items():
+ logging.info(" Statistic: " + str(key) +
+ ", Value: " + str(value))
diff --git a/tools/collectors/collectd/collectd_bucky.py b/tools/collectors/collectd/collectd_bucky.py
new file mode 100644
index 00000000..f6061c55
--- /dev/null
+++ b/tools/collectors/collectd/collectd_bucky.py
@@ -0,0 +1,770 @@
+# Copyright 2014-2018 TRBS, Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This file is a modified version of scripts present in bucky software
+# details of bucky can be found at https://github.com/trbs/bucky
+
+"""
+This module receives the samples from collectd, processes it and
+enqueues it in a format suitable for easy processing.
+It also handles secure communication with collectd.
+"""
+import copy
+import hmac
+import logging
+import multiprocessing
+import os
+import socket
+import struct
+import sys
+from hashlib import sha1, sha256
+
+from Crypto.Cipher import AES
+from conf import settings
+
+logging.basicConfig()
+LOG = logging.getLogger(__name__)
+
+
+class CollectdError(Exception):
+ """
+ Custom error class.
+ """
+ def __init__(self, mesg):
+ super(CollectdError, self).__init__(mesg)
+ self.mesg = mesg
+
+ def __str__(self):
+ return self.mesg
+
+
+class ConnectError(CollectdError):
+ """
+ Custom connect error
+ """
+ pass
+
+
+class ConfigError(CollectdError):
+ """
+ Custom config error
+ """
+ pass
+
+
+class ProtocolError(CollectdError):
+ """
+ Custom protocol error
+ """
+ pass
+
+
+class UDPServer(multiprocessing.Process):
+ """
+ Actual UDP server receiving collectd samples over network
+ """
+ def __init__(self, ip, port):
+ super(UDPServer, self).__init__()
+ self.daemon = True
+ addrinfo = socket.getaddrinfo(ip, port,
+ socket.AF_UNSPEC, socket.SOCK_DGRAM)
+ afamily, _, _, _, addr = addrinfo[0]
+ ip, port = addr[:2]
+ self.ip_addr = ip
+ self.port = port
+ self.sock = socket.socket(afamily, socket.SOCK_DGRAM)
+ self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ self.sock.bind((ip, port))
+ LOG.info("Bound socket socket %s:%s", ip, port)
+ except socket.error:
+ LOG.exception("Error binding socket %s:%s.", ip, port)
+ sys.exit(1)
+
+ self.sock_recvfrom = self.sock.recvfrom
+
+ def run(self):
+ """
+ Start receiving messages
+ """
+ recvfrom = self.sock_recvfrom
+ while True:
+ try:
+ data, addr = recvfrom(65535)
+ except (IOError, KeyboardInterrupt):
+ continue
+ addr = addr[:2] # for compatibility with longer ipv6 tuples
+ if data == b'EXIT':
+ break
+ if not self.handle(data, addr):
+ break
+ try:
+ self.pre_shutdown()
+ except SystemExit:
+ LOG.exception("Failed pre_shutdown method for %s",
+ self.__class__.__name__)
+
+ def handle(self, data, addr):
+ """
+ Handle the message.
+ """
+ raise NotImplementedError()
+
+ def pre_shutdown(self):
+ """ Pre shutdown hook """
+ pass
+
+ def close(self):
+ """
+ Close the communication
+ """
+ self.send('EXIT')
+
+ def send(self, data):
+ """
+ Send over the network
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ if not isinstance(data, bytes):
+ data = data.encode()
+ sock.sendto(data, 0, (self.ip_addr, self.port))
+
+
+class CPUConverter(object):
+ """
+ Converter for CPU samples fom collectd.
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ return ["cpu", sample["plugin_instance"], sample["type_instance"]]
+
+
+class InterfaceConverter(object):
+ """
+ Converter for Interface samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ parts = []
+ parts.append("interface")
+ if sample.get("plugin_instance", ""):
+ parts.append(sample["plugin_instance"].strip())
+ stypei = sample.get("type_instance", "").strip()
+ if stypei:
+ parts.append(stypei)
+ stype = sample.get("type").strip()
+ if stype:
+ parts.append(stype)
+ vname = sample.get("value_name").strip()
+ if vname:
+ parts.append(vname)
+ return parts
+
+
+class MemoryConverter(object):
+ """
+ Converter for Memory samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ return ["memory", sample["type_instance"]]
+
+
+class DefaultConverter(object):
+ """
+ Default converter for samples from collectd
+ """
+ PRIORITY = -1
+
+ def __call__(self, sample):
+ parts = []
+ parts.append(sample["plugin"].strip())
+ if sample.get("plugin_instance"):
+ parts.append(sample["plugin_instance"].strip())
+ stype = sample.get("type", "").strip()
+ if stype and stype != "value":
+ parts.append(stype)
+ stypei = sample.get("type_instance", "").strip()
+ if stypei:
+ parts.append(stypei)
+ vname = sample.get("value_name").strip()
+ if vname and vname != "value":
+ parts.append(vname)
+ return parts
+
+
+DEFAULT_CONVERTERS = {
+ "cpu": CPUConverter(),
+ "interface": InterfaceConverter(),
+ "memory": MemoryConverter(),
+ "_default": DefaultConverter(),
+}
+
+
+class CollectDTypes(object):
+ """
+ Class to handle the sample types. The types.db that comes
+ with collectd, usually, defines the various types.
+ """
+ def __init__(self, types_dbs=None):
+ if types_dbs is None:
+ types_dbs = []
+ dirs = ["/opt/collectd/share/collectd/types.db",
+ "/usr/local/share/collectd/types.db"]
+ self.types = {}
+ self.type_ranges = {}
+ if not types_dbs:
+ types_dbs = [tdb for tdb in dirs if os.path.exists(tdb)]
+ if not types_dbs:
+ raise ConfigError("Unable to locate types.db")
+ self.types_dbs = types_dbs
+ self._load_types()
+
+ def get(self, name):
+ """
+ Get the name of the type
+ """
+ t_name = self.types.get(name)
+ if t_name is None:
+ raise ProtocolError("Invalid type name: %s" % name)
+ return t_name
+
+ def _load_types(self):
+ """
+ Load all the types from types_db
+ """
+ for types_db in self.types_dbs:
+ with open(types_db) as handle:
+ for line in handle:
+ if line.lstrip()[:1] == "#":
+ continue
+ if not line.strip():
+ continue
+ self._add_type_line(line)
+ LOG.info("Loaded collectd types from %s", types_db)
+
+ def _add_type_line(self, line):
+ """
+ Add types information
+ """
+ types = {
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "DERIVE": 2,
+ "ABSOLUTE": 3
+ }
+ name, spec = line.split(None, 1)
+ self.types[name] = []
+ self.type_ranges[name] = {}
+ vals = spec.split(", ")
+ for val in vals:
+ vname, vtype, minv, maxv = val.strip().split(":")
+ vtype = types.get(vtype)
+ if vtype is None:
+ raise ValueError("Invalid value type: %s" % vtype)
+ minv = None if minv == "U" else float(minv)
+ maxv = None if maxv == "U" else float(maxv)
+ self.types[name].append((vname, vtype))
+ self.type_ranges[name][vname] = (minv, maxv)
+
+
+class CollectDParser(object):
+ """
+ Parser class: Implements the sample parsing operations.
+ The types definition defines the parsing process.
+ """
+ def __init__(self, types_dbs=None, counter_eq_derive=False):
+ if types_dbs is None:
+ types_dbs = []
+ self.types = CollectDTypes(types_dbs=types_dbs)
+ self.counter_eq_derive = counter_eq_derive
+
+ def parse(self, data):
+ """
+ Parse individual samples
+ """
+ for sample in self.parse_samples(data):
+ yield sample
+
+ def parse_samples(self, data):
+ """
+ Extract all the samples from the message.
+ """
+ types = {
+ 0x0000: self._parse_string("host"),
+ 0x0001: self._parse_time("time"),
+ 0x0008: self._parse_time_hires("time"),
+ 0x0002: self._parse_string("plugin"),
+ 0x0003: self._parse_string("plugin_instance"),
+ 0x0004: self._parse_string("type"),
+ 0x0005: self._parse_string("type_instance"),
+ 0x0006: None, # handle specially
+ 0x0007: self._parse_time("interval"),
+ 0x0009: self._parse_time_hires("interval")
+ }
+ sample = {}
+ for (ptype, pdata) in self.parse_data(data):
+ if ptype not in types:
+ LOG.debug("Ignoring part type: 0x%02x", ptype)
+ continue
+ if ptype != 0x0006:
+ types[ptype](sample, pdata)
+ continue
+ for vname, vtype, val in self.parse_values(sample["type"], pdata):
+ sample["value_name"] = vname
+ sample["value_type"] = vtype
+ sample["value"] = val
+ yield copy.deepcopy(sample)
+
+ @staticmethod
+ def parse_data(data):
+ """
+ Parse the message
+ """
+ types = set([
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004,
+ 0x0005, 0x0006, 0x0007, 0x0008, 0x0009,
+ 0x0100, 0x0101, 0x0200, 0x0210
+ ])
+ while data:
+ if len(data) < 4:
+ raise ProtocolError("Truncated header.")
+ (part_type, part_len) = struct.unpack("!HH", data[:4])
+ data = data[4:]
+ if part_type not in types:
+ raise ProtocolError("Invalid part type: 0x%02x" % part_type)
+ part_len -= 4 # includes four header bytes we just parsed
+ if len(data) < part_len:
+ raise ProtocolError("Truncated value.")
+ part_data, data = data[:part_len], data[part_len:]
+ yield (part_type, part_data)
+
+ def parse_values(self, stype, data):
+ """
+ Parse the value of a particular type
+ """
+ types = {0: "!Q", 1: "<d", 2: "!q", 3: "!Q"}
+ (nvals,) = struct.unpack("!H", data[:2])
+ data = data[2:]
+ if len(data) != 9 * nvals:
+ raise ProtocolError("Invalid value structure length.")
+ vtypes = self.types.get(stype)
+ if nvals != len(vtypes):
+ raise ProtocolError("Values different than types.db info.")
+ for i in range(nvals):
+ vtype = data[i]
+ if vtype != vtypes[i][1]:
+ if self.counter_eq_derive and \
+ (vtype, vtypes[i][1]) in ((0, 2), (2, 0)):
+ # if counter vs derive don't break, assume server is right
+ LOG.debug("Type mismatch (counter/derive) for %s/%s",
+ stype, vtypes[i][0])
+ else:
+ raise ProtocolError("Type mismatch with types.db")
+ data = data[nvals:]
+ for i in range(nvals):
+ vdata, data = data[:8], data[8:]
+ (val,) = struct.unpack(types[vtypes[i][1]], vdata)
+ yield vtypes[i][0], vtypes[i][1], val
+
+ @staticmethod
+ def _parse_string(name):
+ """
+ Parse string value
+ """
+ def _parser(sample, data):
+ """
+ Actual string parser
+ """
+ data = data.decode()
+ if data[-1] != '\0':
+ raise ProtocolError("Invalid string detected.")
+ sample[name] = data[:-1]
+ return _parser
+
+ @staticmethod
+ def _parse_time(name):
+ """
+ Parse time value
+ """
+ def _parser(sample, data):
+ """
+ Actual time parser
+ """
+ if len(data) != 8:
+ raise ProtocolError("Invalid time data length.")
+ (val,) = struct.unpack("!Q", data)
+ sample[name] = float(val)
+ return _parser
+
+ @staticmethod
+ def _parse_time_hires(name):
+ """
+ Parse time hires value
+ """
+ def _parser(sample, data):
+ """
+ Actual time hires parser
+ """
+ if len(data) != 8:
+ raise ProtocolError("Invalid hires time data length.")
+ (val,) = struct.unpack("!Q", data)
+ sample[name] = val * (2 ** -30)
+ return _parser
+
+
+class CollectDCrypto(object):
+ """
+ Handle the sercured communications with collectd daemon
+ """
+ def __init__(self):
+ sec_level = settings.getValue('COLLECTD_SECURITY_LEVEL')
+ if sec_level in ("sign", "SIGN", "Sign", 1):
+ self.sec_level = 1
+ elif sec_level in ("encrypt", "ENCRYPT", "Encrypt", 2):
+ self.sec_level = 2
+ else:
+ self.sec_level = 0
+ if self.sec_level:
+ self.auth_file = settings.getValue('COLLECTD_AUTH_FILE')
+ self.auth_db = {}
+ if self.auth_file:
+ self.load_auth_file()
+ if not self.auth_file:
+ raise ConfigError("Collectd security level configured but no "
+ "auth file specified in configuration")
+ if not self.auth_db:
+ LOG.warning("Collectd security level configured but no "
+ "user/passwd entries loaded from auth file")
+
+ def load_auth_file(self):
+ """
+ Loading the authentication file.
+ """
+ try:
+ fil = open(self.auth_file)
+ except IOError as exc:
+ raise ConfigError("Unable to load collectd's auth file: %r" % exc)
+ self.auth_db.clear()
+ for line in fil:
+ line = line.strip()
+ if not line or line[0] == "#":
+ continue
+ user, passwd = line.split(":", 1)
+ user = user.strip()
+ passwd = passwd.strip()
+ if not user or not passwd:
+ LOG.warning("Found line with missing user or password")
+ continue
+ if user in self.auth_db:
+ LOG.warning("Found multiple entries for single user")
+ self.auth_db[user] = passwd
+ fil.close()
+ LOG.info("Loaded collectd's auth file from %s", self.auth_file)
+
+ def parse(self, data):
+ """
+ Parse the non-encrypted message
+ """
+ if len(data) < 4:
+ raise ProtocolError("Truncated header.")
+ part_type, part_len = struct.unpack("!HH", data[:4])
+ sec_level = {0x0200: 1, 0x0210: 2}.get(part_type, 0)
+ if sec_level < self.sec_level:
+ raise ProtocolError("Packet has lower security level than allowed")
+ if not sec_level:
+ return data
+ if sec_level == 1 and not self.sec_level:
+ return data[part_len:]
+ data = data[4:]
+ part_len -= 4
+ if len(data) < part_len:
+ raise ProtocolError("Truncated part payload.")
+ if sec_level == 1:
+ return self.parse_signed(part_len, data)
+ if sec_level == 2:
+ return self.parse_encrypted(part_len, data)
+ return None
+
+ def parse_signed(self, part_len, data):
+ """
+ Parse the signed message
+ """
+
+ if part_len <= 32:
+ raise ProtocolError("Truncated signed part.")
+ sig, data = data[:32], data[32:]
+ uname_len = part_len - 32
+ uname = data[:uname_len].decode()
+ if uname not in self.auth_db:
+ raise ProtocolError("Signed packet, unknown user '%s'" % uname)
+ password = self.auth_db[uname].encode()
+ sig2 = hmac.new(password, msg=data, digestmod=sha256).digest()
+ if not self._hashes_match(sig, sig2):
+ raise ProtocolError("Bad signature from user '%s'" % uname)
+ data = data[uname_len:]
+ return data
+
+ def parse_encrypted(self, part_len, data):
+ """
+ Parse the encrypted message
+ """
+ if part_len != len(data):
+ raise ProtocolError("Enc pkt size disaggrees with header.")
+ if len(data) <= 38:
+ raise ProtocolError("Truncated encrypted part.")
+ uname_len, data = struct.unpack("!H", data[:2])[0], data[2:]
+ if len(data) <= uname_len + 36:
+ raise ProtocolError("Truncated encrypted part.")
+ uname, data = data[:uname_len].decode(), data[uname_len:]
+ if uname not in self.auth_db:
+ raise ProtocolError("Couldn't decrypt, unknown user '%s'" % uname)
+ ival, data = data[:16], data[16:]
+ password = self.auth_db[uname].encode()
+ key = sha256(password).digest()
+ pad_bytes = 16 - (len(data) % 16)
+ data += b'\0' * pad_bytes
+ data = AES.new(key, IV=ival, mode=AES.MODE_OFB).decrypt(data)
+ data = data[:-pad_bytes]
+ tag, data = data[:20], data[20:]
+ tag2 = sha1(data).digest()
+ if not self._hashes_match(tag, tag2):
+ raise ProtocolError("Bad checksum on enc pkt for '%s'" % uname)
+ return data
+
+ @staticmethod
+ def _hashes_match(val_a, val_b):
+ """Constant time comparison of bytes """
+ if len(val_a) != len(val_b):
+ return False
+ diff = 0
+ for val_x, val_y in zip(val_a, val_b):
+ diff |= val_x ^ val_y
+ return not diff
+
+
+class CollectDConverter(object):
+ """
+ Handle all conversions.
+ Coversion: Convert the sample received from collectd to an
+ appropriate format - for easy processing
+ """
+ def __init__(self):
+ self.converters = dict(DEFAULT_CONVERTERS)
+
+ def convert(self, sample):
+ """
+ Main conversion handling.
+ """
+ default = self.converters["_default"]
+ handler = self.converters.get(sample["plugin"], default)
+ try:
+ name_parts = handler(sample)
+ if name_parts is None:
+ return None # treat None as "ignore sample"
+ name = '.'.join(name_parts)
+ except (AttributeError, IndexError, MemoryError, RuntimeError):
+ LOG.exception("Exception in sample handler %s (%s):",
+ sample["plugin"], handler)
+ return None
+ host = sample.get("host", "")
+ return (
+ host,
+ name,
+ sample["value_type"],
+ sample["value"],
+ int(sample["time"])
+ )
+
+ def _add_converter(self, name, inst, source="unknown"):
+ """
+ Add new converter types
+ """
+ if name not in self.converters:
+ LOG.info("Converter: %s from %s", name, source)
+ self.converters[name] = inst
+ return
+ kpriority = getattr(inst, "PRIORITY", 0)
+ ipriority = getattr(self.converters[name], "PRIORITY", 0)
+ if kpriority > ipriority:
+ LOG.info("Replacing: %s", name)
+ LOG.info("Converter: %s from %s", name, source)
+ self.converters[name] = inst
+ return
+ LOG.info("Ignoring: %s (%s) from %s (priority: %s vs %s)",
+ name, inst, source, kpriority, ipriority)
+
+
+class CollectDHandler(object):
+ """Wraps all CollectD parsing functionality in a class"""
+
+ def __init__(self):
+ self.crypto = CollectDCrypto()
+ collectd_types = []
+ collectd_counter_eq_derive = False
+ self.parser = CollectDParser(collectd_types,
+ collectd_counter_eq_derive)
+ self.converter = CollectDConverter()
+ self.prev_samples = {}
+ self.last_sample = None
+
+ def parse(self, data):
+ """
+ Parse the samples from collectd
+ """
+ try:
+ data = self.crypto.parse(data)
+ except ProtocolError as error:
+ LOG.error("Protocol error in CollectDCrypto: %s", error)
+ return
+ try:
+ for sample in self.parser.parse(data):
+ self.last_sample = sample
+ stype = sample["type"]
+ vname = sample["value_name"]
+ sample = self.converter.convert(sample)
+ if sample is None:
+ continue
+ host, name, vtype, val, time = sample
+ if not name.strip():
+ continue
+ val = self.calculate(host, name, vtype, val, time)
+ val = self.check_range(stype, vname, val)
+ if val is not None:
+ yield host, name, val, time
+ except ProtocolError as error:
+ LOG.error("Protocol error: %s", error)
+ if self.last_sample is not None:
+ LOG.info("Last sample: %s", self.last_sample)
+
+ def check_range(self, stype, vname, val):
+ """
+ Check the value range
+ """
+ if val is None:
+ return None
+ try:
+ vmin, vmax = self.parser.types.type_ranges[stype][vname]
+ except KeyError:
+ LOG.error("Couldn't find vmin, vmax in CollectDTypes")
+ return val
+ if vmin is not None and val < vmin:
+ LOG.debug("Invalid value %s (<%s) for %s", val, vmin, vname)
+ LOG.debug("Last sample: %s", self.last_sample)
+ return None
+ if vmax is not None and val > vmax:
+ LOG.debug("Invalid value %s (>%s) for %s", val, vmax, vname)
+ LOG.debug("Last sample: %s", self.last_sample)
+ return None
+ return val
+
+ def calculate(self, host, name, vtype, val, time):
+ """
+ Perform calculations for handlers
+ """
+ handlers = {
+ 0: self._calc_counter, # counter
+ 1: lambda _host, _name, v, _time: v, # gauge
+ 2: self._calc_derive, # derive
+ 3: self._calc_absolute # absolute
+ }
+ if vtype not in handlers:
+ LOG.error("Invalid value type %s for %s", vtype, name)
+ LOG.info("Last sample: %s", self.last_sample)
+ return None
+ return handlers[vtype](host, name, val, time)
+
+ def _calc_counter(self, host, name, val, time):
+ """
+ Calculating counter values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return None
+ pval, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.error("Invalid COUNTER update for: %s:%s", key[0], key[1])
+ LOG.info("Last sample: %s", self.last_sample)
+ return None
+ if val < pval:
+ # this is supposed to handle counter wrap around
+ # see https://collectd.org/wiki/index.php/Data_source
+ LOG.debug("COUNTER wrap-around for: %s:%s (%s -> %s)",
+ host, name, pval, val)
+ if pval < 0x100000000:
+ val += 0x100000000 # 2**32
+ else:
+ val += 0x10000000000000000 # 2**64
+ return float(val - pval) / (time - ptime)
+
+ def _calc_derive(self, host, name, val, time):
+ """
+ Calculating derived values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return None
+ pval, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.debug("Invalid DERIVE update for: %s:%s", key[0], key[1])
+ LOG.debug("Last sample: %s", self.last_sample)
+ return None
+ return float(abs(val - pval)) / (time - ptime)
+
+ def _calc_absolute(self, host, name, val, time):
+ """
+ Calculating absolute values
+ """
+ key = (host, name)
+ if key not in self.prev_samples:
+ self.prev_samples[key] = (val, time)
+ return None
+ _, ptime = self.prev_samples[key]
+ self.prev_samples[key] = (val, time)
+ if time <= ptime:
+ LOG.error("Invalid ABSOLUTE update for: %s:%s", key[0], key[1])
+ LOG.info("Last sample: %s", self.last_sample)
+ return None
+ return float(val) / (time - ptime)
+
+
+class CollectDServer(UDPServer):
+ """Single processes CollectDServer"""
+
+ def __init__(self, queue):
+ super(CollectDServer, self).__init__(settings.getValue('COLLECTD_IP'),
+ settings.getValue('COLLECTD_PORT'))
+ self.handler = CollectDHandler()
+ self.queue = queue
+
+ def handle(self, data, addr):
+ for sample in self.handler.parse(data):
+ self.queue.put(sample)
+ return True
+
+ def pre_shutdown(self):
+ LOG.info("Sutting down CollectDServer")
+
+
+def get_collectd_server(queue):
+ """Get the collectd server """
+ server = CollectDServer
+ return server(queue)
diff --git a/tools/collectors/multicmd/__init__.py b/tools/collectors/multicmd/__init__.py
new file mode 100755
index 00000000..2ae2340f
--- /dev/null
+++ b/tools/collectors/multicmd/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for multi-commands as a collector
+"""
diff --git a/tools/collectors/multicmd/multicmd.py b/tools/collectors/multicmd/multicmd.py
new file mode 100644
index 00000000..275a0693
--- /dev/null
+++ b/tools/collectors/multicmd/multicmd.py
@@ -0,0 +1,138 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects information using various command line tools.
+"""
+
+#from tools.collectors.collector import collector
+import glob
+import logging
+import os
+from collections import OrderedDict
+from tools import tasks
+from tools.collectors.collector import collector
+from conf import settings
+
+class MultiCmd(collector.ICollector):
+ """ Multiple command-line controllers
+ collectd, prox, crond, filebeat
+ """
+ def __init__(self, results_dir, test_name):
+ """
+ initialize collectrs
+ """
+ self.prox_home = settings.getValue('MC_PROX_HOME')
+ self.collectd_cmd = settings.getValue('MC_COLLECTD_CMD')
+ self.collectd_csv = settings.getValue('MC_COLLECTD_CSV')
+ self.prox_out = settings.getValue('MC_PROX_OUT')
+ self.prox_cmd = settings.getValue('MC_PROX_CMD')
+ self.cron_out = settings.getValue('MC_CRON_OUT')
+ self.logger = logging.getLogger(__name__)
+ self.results_dir = results_dir
+ self.collectd_pid = 0
+ self.prox_pid = 0
+ self.cleanup_collectd_metrics()
+ self.logger.debug('%s', 'Multicmd data for '+ str(test_name))
+ # There should not be a file by name stop in prox_home folder
+ # Else Prox will start and stop immediately. This is a Hack to
+ # control prox-runrapid, which by default runs for specified duration.
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(filename):
+ tasks.run_task(['sudo', 'rm', filename],
+ self.logger, 'deleting stop')
+ self.results = OrderedDict()
+
+ def cleanup_collectd_metrics(self):
+ """
+ Cleaup the old or archived metrics
+ """
+ for name in glob.glob(os.path.join(self.collectd_csv, '*')):
+ tasks.run_task(['sudo', 'rm', '-rf', name], self.logger,
+ 'Cleaning up Metrics', True)
+
+ def start(self):
+ # Command-1: Start Collectd
+ self.collectd_pid = tasks.run_background_task(
+ ['sudo', self.collectd_cmd],
+ self.logger, 'Staring Collectd')
+
+ # Command-2: Start PROX
+ working_dir = os.getcwd()
+ if os.path.exists(self.prox_home):
+ os.chdir(self.prox_home)
+ self.prox_pid = tasks.run_background_task(['sudo', self.prox_cmd,
+ '--test', 'irq',
+ '--env', 'irq'],
+ self.logger,
+ 'Start PROX')
+ os.chdir(working_dir)
+ # Command-3: Start CROND
+ tasks.run_task(['sudo', 'systemctl', 'start', 'crond'],
+ self.logger, 'Staring CROND', True)
+
+ # command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'start', 'filebeat'],
+ self.logger, 'Starting BEATS', True)
+
+ def stop(self):
+ """
+ Stop All commands
+ """
+ # Command-1: COLLECTD
+ tasks.terminate_task_subtree(self.collectd_pid, logger=self.logger)
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'collectd'],
+ self.logger, 'Stopping Collectd', True)
+
+ # Backup the collectd-metrics for this test into a results folder
+ # results_dir = os.path.join(settings.getValue('RESULTS_PATH'), '/')
+ tasks.run_task(['sudo', 'cp', '-r', self.collectd_csv,
+ self.results_dir], self.logger,
+ 'Copying Collectd Results File', True)
+ self.cleanup_collectd_metrics()
+
+ # Command-2: PROX
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(self.prox_home):
+ tasks.run_task(['sudo', 'touch', filename],
+ self.logger, 'Stopping PROX', True)
+
+ outfile = os.path.join(self.prox_home, self.prox_out)
+ if os.path.exists(outfile):
+ tasks.run_task(['sudo', 'mv', outfile, self.results_dir],
+ self.logger, 'Moving PROX-OUT file', True)
+
+ # Command-3: CROND
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'crond'],
+ self.logger, 'Stopping CROND', True)
+ if os.path.exists(self.cron_out):
+ tasks.run_task(['sudo', 'mv', self.cron_out, self.results_dir],
+ self.logger, 'Move Cron Logs', True)
+
+ # Command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'filebeat'],
+ self.logger, 'Stopping BEATS', True)
+
+ def get_results(self):
+ """
+ Return results
+ """
+ return self.results
+
+ def print_results(self):
+ """
+ Print results
+ """
+ logging.info("Multicmd Output is not collected by VSPERF")
+ logging.info("Please refer to corresponding command's output")
diff --git a/tools/collectors/sysmetrics/pidstat.py b/tools/collectors/sysmetrics/pidstat.py
index 99341ccf..277fdb11 100644
--- a/tools/collectors/sysmetrics/pidstat.py
+++ b/tools/collectors/sysmetrics/pidstat.py
@@ -70,13 +70,13 @@ class Pidstat(collector.ICollector):
into the file in directory with test results
"""
monitor = settings.getValue('PIDSTAT_MONITOR')
- self._logger.info('Statistics are requested for: ' + ', '.join(monitor))
+ self._logger.info('Statistics are requested for: %s', ', '.join(monitor))
pids = systeminfo.get_pids(monitor)
if pids:
with open(self._log, 'w') as logfile:
cmd = ['sudo', 'LC_ALL=' + settings.getValue('DEFAULT_CMD_LOCALE'),
'pidstat', settings.getValue('PIDSTAT_OPTIONS'),
- '-p', ','.join(pids),
+ '-t', '-p', ','.join(pids),
str(settings.getValue('PIDSTAT_SAMPLE_INTERVAL'))]
self._logger.debug('%s', ' '.join(cmd))
self._pid = subprocess.Popen(cmd, stdout=logfile, bufsize=0).pid
@@ -116,16 +116,48 @@ class Pidstat(collector.ICollector):
# combine stored header fields with actual values
tmp_res = OrderedDict(zip(tmp_header,
line[8:].split()))
- # use process's name and its pid as unique key
- key = tmp_res.pop('Command') + '_' + tmp_res['PID']
- # store values for given command into results dict
- if key in self._results:
- self._results[key].update(tmp_res)
- else:
- self._results[key] = tmp_res
+ cmd = tmp_res.pop('Command')
+ # remove unused fields (given by option '-t')
+ tmp_res.pop('UID')
+ tmp_res.pop('TID')
+ if '|_' not in cmd: # main process
+ # use process's name and its pid as unique key
+ tmp_pid = tmp_res.pop('TGID')
+ tmp_key = "%s_%s" % (cmd, tmp_pid)
+ # do not trust cpu usage of pid
+ # see VSPERF-569 for more details
+ if 'CPU' not in tmp_header:
+ self.update_results(tmp_key, tmp_res, False)
+ else: # thread
+ # accumulate cpu usage of all threads
+ if 'CPU' in tmp_header:
+ tmp_res.pop('TGID')
+ self.update_results(tmp_key, tmp_res, True)
line = logfile.readline()
+ def update_results(self, key, result, accumulate=False):
+ """
+ Update final results dictionary. If ``accumulate`` param is set to
+ ``True``, try to accumulate existing values.
+ """
+ # store values for given command into results dict
+ if key not in self._results:
+ self._results[key] = result
+ elif accumulate:
+ for field in result:
+ if field not in self._results[key]:
+ self._results[key][field] = result[field]
+ else:
+ try:
+ val = float(self._results[key][field]) + float(result[field])
+ self._results[key][field] = '{0:.2f}'.format(val)
+ except ValueError:
+ # cannot cast to float, let's update with the previous value
+ self._results[key][field] = result[field]
+ else:
+ self._results[key].update(result)
+
def get_results(self):
"""Returns collected statistics.
"""
@@ -135,7 +167,7 @@ class Pidstat(collector.ICollector):
"""Logs collected statistics.
"""
for process in self._results:
- logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+ logging.info("Process: %s", '_'.join(process.split('_')[:-1]))
for(key, value) in self._results[process].items():
logging.info(" Statistic: " + str(key) +
", Value: " + str(value))
diff --git a/tools/confgenwizard/__init__.py b/tools/confgenwizard/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tools/confgenwizard/__init__.py
diff --git a/tools/confgenwizard/nicinfo.py b/tools/confgenwizard/nicinfo.py
new file mode 100644
index 00000000..631b92c5
--- /dev/null
+++ b/tools/confgenwizard/nicinfo.py
@@ -0,0 +1,236 @@
+# Copyright 2019-2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Retrieve information from remote host.
+In this file, we retrive only NIC PICs
+"""
+
+from __future__ import print_function
+import sys
+import subprocess
+import os
+from os.path import exists
+from stat import S_ISDIR
+import paramiko
+
+# The PCI device class for ETHERNET devices
+ETHERNET_CLASS = "0200"
+LSPCI_PATH = '/usr/bin/lspci'
+RECV_BYTES = 4096
+ADVANCED = True
+
+
+#pylint: disable=too-many-instance-attributes
+class RemoteInfo(object):
+ """
+ Class to extract information from a remote system
+ """
+
+ def __init__(self, host, username, password):
+ """
+ Perform Initialization
+ """
+ # Dict of ethernet devices present. Dictionary indexed by PCI address.
+ # Each device within this is itself a dictionary of device properties
+ self.nic_devices = {}
+ if host == 'local':
+ self.local = True
+ else:
+ self.local = False
+ # Assuming port as 22.
+ self.port = 22
+ self.hostname = host
+ self.password = password
+ self.username = username
+ self.client = paramiko.Transport((self.hostname, self.port))
+ self.client.connect(username=self.username,
+ password=self.password)
+ self.session = self.client.open_channel(kind='session')
+ self.session.get_pty()
+ self.sftp = paramiko.SFTPClient.from_transport(self.client)
+
+ def sftp_exists(self, path):
+ """
+ Check if remote file exist
+ """
+ try:
+ self.sftp.stat(path)
+ return True
+ except IOError:
+ return False
+
+ def sft_listdir(self, path):
+ """
+ List directories on remote nost
+ """
+ files = []
+ for fil in self.sftp.listdir_attr(path):
+ if not S_ISDIR(fil.st_mode):
+ files.append(fil.filename)
+ return files
+
+ def is_connected(self):
+ """
+ Check if session is connected.
+ """
+ return self.client.is_active()
+
+ def new_channel(self):
+ """
+ FOr every command a new session is setup
+ """
+ if not self.is_connected():
+ self.client = paramiko.Transport((self.hostname, self.port))
+ self.client.connect(username=self.username,
+ password=self.password)
+ self.session = self.client.open_channel(kind='session')
+
+ # This is roughly compatible with check_output function in subprocess module
+ # which is only available in python 2.7.
+ def check_output(self, args, stderr=None):
+ '''
+ Run a command and capture its output
+ '''
+ stdout_data = []
+ stderr_data = []
+ if self.local:
+ return subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=stderr,
+ universal_newlines=True).communicate()[0]
+ else:
+ self.new_channel()
+ separator = ' '
+ command = separator.join(args)
+ # self.session.get_pty()
+ self.session.exec_command(command)
+ while True:
+ if self.session.recv_ready():
+ stdout_data.append(self.session.recv(RECV_BYTES))
+ if self.session.recv_stderr_ready():
+ stderr_data.append(self.session.recv_stderr(RECV_BYTES))
+ if self.session.exit_status_ready():
+ break
+ if stdout_data:
+ return b"".join(stdout_data)
+ return b"".join(stderr_data)
+
+ def get_pci_details(self, dev_id):
+ '''
+ This function gets additional details for a PCI device
+ '''
+ device = {}
+
+ extra_info = self.check_output([LSPCI_PATH,
+ "-vmmks", dev_id]).splitlines()
+
+ # parse lspci details
+ for line in extra_info:
+ if not line:
+ continue
+ if self.local:
+ name, value = line.split("\t", 1)
+ else:
+ name, value = line.decode().split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
+ # check for a unix interface name
+ sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
+ device["Interface"] = ""
+ if self.local:
+ if exists(sys_path):
+ device["Interface"] = ",".join(os.listdir(sys_path))
+ else:
+ if self.sftp_exists(sys_path):
+ device["Interface"] = ",".join(self.sft_listdir(sys_path))
+
+ # check if a port is used for ssh connection
+ device["Ssh_if"] = False
+ device["Active"] = ""
+
+ return device
+
+ def get_nic_details(self):
+ '''
+ This function populates the "devices" dictionary. The keys used are
+ the pci addresses (domain:bus:slot.func). The values are themselves
+ dictionaries - one for each NIC.
+ '''
+ devinfos = []
+ # first loop through and read details for all devices
+ # request machine readable format, with numeric IDs
+ dev = {}
+ dev_lines = self.check_output([LSPCI_PATH, "-Dvmmn"]).splitlines()
+ for dev_line in dev_lines:
+ if not dev_line:
+ if dev["Class"] == ETHERNET_CLASS:
+ # convert device and vendor ids to numbers, then add to
+ # global
+ dev["Vendor"] = int(dev["Vendor"], 16)
+ dev["Device"] = int(dev["Device"], 16)
+ self.nic_devices[dev["Slot"]] = dict(
+ dev) # use dict to make copy of dev
+ else:
+ # values = re.split(r'\t+', str(dev_line))
+ if self.local:
+ name, value = dev_line.split('\t', 1)
+ else:
+ name, value = dev_line.decode().split("\t", 1)
+ dev[name.rstrip(":")] = value
+
+ # based on the basic info, get extended text details
+ for dev in self.nic_devices:
+ # get additional info and add it to existing data
+ if ADVANCED:
+ self.nic_devices[dev].update(self.get_pci_details(dev).items())
+ devinfos.append(self.nic_devices[dev])
+ return devinfos
+
+ def dev_id_from_dev_name(self, dev_name):
+ '''
+ Take a device "name" - a string passed in by user to identify a NIC
+ device, and determine the device id - i.e. the domain:bus:slot.func-for
+ it, which can then be used to index into the devices array
+ '''
+ # dev = None
+ # check if it's already a suitable index
+ if dev_name in self.nic_devices:
+ return dev_name
+ # check if it's an index just missing the domain part
+ elif "0000:" + dev_name in self.nic_devices:
+ return "0000:" + dev_name
+ else:
+ # check if it's an interface name, e.g. eth1
+ for dev in self.nic_devices:
+ if dev_name in self.nic_devices[dev]["Interface"].split(","):
+ return self.nic_devices[dev]["Slot"]
+ # if nothing else matches - error
+ print("Unknown device: %s. "
+ "Please specify device in \"bus:slot.func\" format" % dev_name)
+ sys.exit(1)
+
+
+def main():
+ '''program main function'''
+ host = input("Enter Host IP: ")
+ username = input("Enter User Name: ")
+ pwd = input("Enter Password: ")
+ rhi = RemoteInfo(host, username, pwd)
+ dev_list = rhi.get_nic_details()
+ for dev in dev_list:
+ print(dev["Slot"])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/confgenwizard/vsperfwiz.py b/tools/confgenwizard/vsperfwiz.py
new file mode 100644
index 00000000..48a2d504
--- /dev/null
+++ b/tools/confgenwizard/vsperfwiz.py
@@ -0,0 +1,736 @@
+# Copyright 2019-2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tool to create configuration file for VSPERF
+"""
+
+from __future__ import print_function
+import signal
+import sys
+from pypsi import wizard as wiz
+from pypsi.shell import Shell
+import nicinfo
+
+
+#pylint: disable=too-many-instance-attributes
+class VsperfWizard(object):
+ """
+ Class to create wizards
+ """
+
+ def __init__(self):
+ """
+ Perform Initialization.
+ """
+ self.shell = Shell()
+ self.vpp_values = {}
+ self.dut_values = {}
+ self.main_values = {}
+ self.guest_values = {}
+ self.ovs_values = {}
+ self.ixnet_values = {}
+ self.stc_values = {}
+ self.trex_values = {}
+ self.traffic_values = {}
+ self.vpp_values = {}
+ self.wiz_dut = None
+ self.wiz_ixnet = None
+ self.wiz_stc = None
+ self.wiz_ovs = None
+ self.wiz_traffic = None
+ self.wiz_main = None
+ self.wiz_guest = None
+ self.wiz_trex = None
+ self.wiz_vpp = None
+ self.rhi = None
+ self.devices = ''
+ self.devs = {}
+
+
+
+######## Support Functions ############################
+ def get_nicpcis(self):
+ """
+ Get NIC information from Remote Host
+ """
+ self.rhi = nicinfo.RemoteInfo(self.dut_values['dutip'],
+ self.dut_values['dutuname'],
+ self.dut_values['dutpwd'])
+ dev_list = self.rhi.get_nic_details()
+ index = 0
+ for dev in dev_list:
+ self.devices += str("(" + str(index) + ")" + " "
+ + str(dev["Slot"]) + ', ')
+ self.devs[str(index)] = str(dev["Slot"])
+ index = index + 1
+
+ def get_nics_string(self):
+ """
+ Create string that's acceptable to configuration
+ """
+ indexes = self.main_values['nics'].split(',')
+ wlns = ''
+ for index in indexes:
+ wlns += "'" + self.devs[index] + "' ,"
+ print(wlns)
+ return wlns.rstrip(',')
+
+
+############# All the Wizards ##################################
+
+ def dut_wizard(self):
+ """
+ Wizard to collect DUT information
+ """
+ self.wiz_dut = wiz.PromptWizard(
+ name="VSPERF DUT Info Collection",
+ description="This collects DUT info",
+ steps=(
+ # The list of input prompts to ask the user.
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutip",
+ # Display name
+ name="Enter the IP address of the DUT [local]",
+ # Help message
+ help="IP address of the DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutuname",
+ # Display name
+ name="Enter the username to connect to DUT",
+ # Help message
+ help="Username for DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutpwd",
+ # Display name
+ name="Enter the Password to connect to DUT",
+ # Help message
+ help="Password for the DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ )
+ )
+
+ def main_wizard(self):
+ """
+ The Main Wizard
+ """
+ # First get the nics.
+ self.get_nicpcis()
+ self.wiz_main = wiz.PromptWizard(
+ name="VSPERF Common Configuration",
+ description="This configuration covers Basic inputs",
+ steps=(
+ # The list of input prompts to ask the user.
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="vswitch",
+ # Display name
+ name="VSwitch to use? - OVS or VPP?",
+ # Help message
+ help=" Enter the vswitch to use - either OVS or VPP",
+ # List of validators to run on the input
+ default='OVS'
+ ),
+ wiz.WizardStep(
+ id='nics',
+ name="NICs to Whitelist: " + self.devices,
+ help="Enter the list (separated by comma) of PCI-IDs",
+ validators=(wiz.required_validator),
+ ),
+ wiz.WizardStep(
+ id='tgen',
+ name=("What trafficgen to use: [TestCenter" +
+ " IxNet, Moongen, Trex]?"),
+ help=("Enter the trafficgen to use -" +
+ " TestCenter, IxNet, Moongen, Trex"),
+ validators=(wiz.required_validator),
+ default="Trex"
+ ),
+ wiz.WizardStep(
+ id='guest',
+ name=("Is Scenario either PVP or PVVP?"),
+ help=("This is ti capture guest Configuration"),
+ validators=(wiz.required_validator),
+ default="YES"
+ )
+ )
+ )
+
+ def traffic_wizard(self):
+ """
+ Wizard to collectd Traffic Info.
+ """
+ self.wiz_traffic = wiz.PromptWizard(
+ name="Traffic Configuration",
+ description="This configuration covers Traffic specifc inputs",
+ steps=(
+ wiz.WizardStep(
+ id='pktsizes',
+ name='Enter the Packet Sizes - comma separated',
+ help="Allowed values: (64,128,256,512,1024,1280,1518)",
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='duration',
+ name='Enter the Duration (in secs) for the traffic',
+ help="Enter for how long each iteration should be",
+ default='60',
+ ),
+ # wiz.WizardStep(
+ # id='multistream',
+ # name='Multistream preferred?',
+ # help="Multistream preference - Yes or No",
+ # default='No',
+ # validators=(wiz.required_validator)
+ #),
+ wiz.WizardStep(
+ id='count',
+ name='Number of flows?',
+ help="Enter the number of flows - 2 - 1,000,000",
+ default='2',
+ # validators=(wiz.required_validator)
+ ),
+ )
+ )
+
+ def ovs_wizard(self):
+ """
+ Wizard to collect OVS Information
+ """
+ self.wiz_ovs = wiz.PromptWizard(
+ name="Vswitch Configuration",
+ description="Specific configurations of the virtual-Switch",
+ steps=(
+ wiz.WizardStep(
+ id='type',
+ name='OVS Type? [Vanilla or DPDK]',
+ help='Enter either Vanilla or DPDK',
+ default='Vanilla',
+ ),
+ wiz.WizardStep(
+ id='mask',
+ name='Enter the CPU Mask for OVS to use',
+ help='Mask for OVS PMDs',
+ default='30',
+ ),
+ )
+ )
+
+ def vpp_wizard(self):
+ """
+ Wizard to collect VPP configuration
+ """
+ self.wiz_vpp = wiz.PromptWizard(
+ name="Vswitch Configuration",
+ description="Specific configurations of the virtual-Switch",
+ steps=(
+ wiz.WizardStep(
+ id='mode',
+ name='L2 Connection mode xconnect|bridge|l2patch to use?',
+ help='Select the l2 connection mode',
+ default='xconnect',
+ ),
+ )
+ )
+
+ def trex_wizard(self):
+ """
+ Wizard to collect Trex configuration
+ """
+ self.wiz_trex = wiz.PromptWizard(
+ name="Trex Traffic Generator Configuration",
+ description="Specific configurations of Trex TGen",
+ steps=(
+ wiz.WizardStep(
+ id='hostip',
+ name='What is IP address of the T-Rex Host?',
+ help='Enter the IP address of host where Trex is running',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='user',
+ name='What is Usernameof the T-Rex Host?',
+ help='Enter the Username of host where Trex is running',
+ default='root',
+ ),
+ wiz.WizardStep(
+ id='bdir',
+ name='What is Dir where the T-Rex Binary resides?',
+ help='Enter the Location where Trex Binary is',
+ default='/root/trex_2.37/scripts/',
+ ),
+ wiz.WizardStep(
+ id='pci1',
+ name='What is PCI address of the port-1?',
+ help='Enter the PCI address of Data port 1',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='pci2',
+ name='What is PCI address of the port-2?',
+ help='Enter the PCI address of Data port 2',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='rate',
+ name='What is Line rate (in Gbps) of the ports?',
+ help='Enter the linerate of the ports',
+ default='10',
+ ),
+ wiz.WizardStep(
+ id='prom',
+ name='T-Rex Promiscuous enabled?',
+ help='Do you want to enable the Promiscuous mode?',
+ default='False',
+ ),
+ wiz.WizardStep(
+ id='lat',
+ name='Whats the Trex Latency PPS?',
+ help='Enter the Latency value in PPS',
+ default='1000',
+ ),
+ wiz.WizardStep(
+ id='bslv',
+ name='Do you want Binary Loss Verification Enabled?',
+ help='Enter True if you want it to be enabled.',
+ default='True',
+ ),
+ wiz.WizardStep(
+ id='maxrep',
+ name='If Loss Verification, what the max rep?',
+ help='If BSLV is enabled, whats the max repetition value?',
+ default='2',
+ ),
+ )
+ )
+
+ def stc_wizard(self):
+ """
+ Wizard to collect STC configuration
+ """
+ self.wiz_stc = wiz.PromptWizard(
+ name="Spirent STC Traffic Generator Configuration",
+ description="Specific configurations of Spirent-STC TGen",
+ steps=(
+ wiz.WizardStep(
+ id='lab',
+ name='Lab Server IP?',
+ help='Enter the IP of Lab Server',
+ default='10.10.120.244',
+ ),
+ wiz.WizardStep(
+ id='lisc',
+ name='License Server IP?',
+ help='Enter the IP of the License Server',
+ default='10.10.120.246',
+ ),
+ wiz.WizardStep(
+ id='eaddr',
+ name='East Port Chassis Address?',
+ help='IP address of the East-Port',
+ default='10.10.120.245',
+ ),
+ wiz.WizardStep(
+ id='eslot',
+ name='East Port Slot Number',
+ help='Slot Number of the East Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='eport',
+ name='Port Number of the East-Port',
+ help='Port Number for the East Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='eint',
+ name='East port Interface Address',
+ help='IP to use for East Port?',
+ default='192.85.1.3',
+ ),
+ wiz.WizardStep(
+ id='egw',
+ name='Gateway Address for East Port',
+ help='IP of the East-Port Gateway',
+ default='192.85.1.103',
+ ),
+ wiz.WizardStep(
+ id='waddr',
+ name='West Port Chassis Address?',
+ help='IP address of the West-Port',
+ default='10.10.120.245',
+ ),
+ wiz.WizardStep(
+ id='wslot',
+ name='West Port Slot Number',
+ help='Slot Number of the West Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='wport',
+ name='Port Number of the West-Port',
+ help='Port Number for the West Port',
+ default='2',
+ ),
+ wiz.WizardStep(
+ id='wint',
+ name='West port Interface Address',
+ help='IP to use for West Port?',
+ default='192.85.1.103',
+ ),
+ wiz.WizardStep(
+ id='wgw',
+ name='Gateway Address for West Port',
+ help='IP of the West-Port Gateway',
+ default='192.85.1.3',
+ ),
+ wiz.WizardStep(
+ id='script',
+ name='Name of the Script to use for RFC2544 Tests?',
+ help='Script Name to use for RFC 2544 Tests.',
+ default='testcenter-rfc2544-rest.py',
+ ),
+ )
+ )
+
+ def ixnet_wizard(self):
+ """
+ Wizard to collect ixnet configuration
+ """
+ self.wiz_ixnet = wiz.PromptWizard(
+ name="Ixia IxNet Traffic Generator Configuration",
+ description="Specific configurations of Ixia-Ixnet TGen",
+ steps=(
+ wiz.WizardStep(
+ id='card',
+ name='Card Number?',
+ help='Chassis Card Number',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='port1',
+ name='Port-1 Number?',
+ help='Chassis Port-1 Number',
+ default='5',
+ ),
+ wiz.WizardStep(
+ id='port2',
+ name='Port-2 Number?',
+ help='Chassis Port-2 Number',
+ default='6',
+ ),
+ wiz.WizardStep(
+ id='libp1',
+ name='IXIA Library path?',
+ help='Library path of Ixia',
+ default='/opt/ixnet/ixos-api/8.01.0.2/lib/ixTcl1.0',
+ ),
+ wiz.WizardStep(
+ id='libp2',
+ name='IXNET Library Path',
+ help='Library Path for the IXNET',
+ default='/opt/ixnet/ixnetwork/8.01.1029.6/lib/IxTclNetwork',
+ ),
+ wiz.WizardStep(
+ id='host',
+ name='IP of the CHassis?',
+ help='Chassis IP',
+ default='10.10.50.6',
+ ),
+ wiz.WizardStep(
+ id='machine',
+ name='IP of the API Server?',
+ help='API Server IP ',
+ default='10.10.120.6',
+ ),
+ wiz.WizardStep(
+ id='port',
+ name='Port of the API Server?',
+ help='API Server Port',
+ default='9127',
+ ),
+ wiz.WizardStep(
+ id='user',
+ name='Username for the API server?',
+ help='Username to use to connect to API Server',
+ default='vsperf_sandbox',
+ ),
+ wiz.WizardStep(
+ id='tdir',
+ name='Path for Results Directory on API Server',
+ help='Results Path on API Server',
+ default='c:/ixia_results/vsperf_sandbox',
+ ),
+ wiz.WizardStep(
+ id='rdir',
+ name='Path for Results directory on DUT',
+ help='DUT Results Path',
+ default='/mnt/ixia_results/vsperf_sandbox',
+ ),
+ )
+ )
+
+ def guest_wizard(self):
+ """
+ Wizard to collect guest configuration
+ """
+ self.wiz_guest = wiz.PromptWizard(
+ name="Guest Configuration for PVP and PVVP Scenarios",
+ description="Guest configurations",
+ steps=(
+ wiz.WizardStep(
+ id='image',
+ name='Enter the Path for the iamge',
+ help='Complete path where image resides',
+ default='/home/opnfv/vloop-vnf-ubuntu-14.04_20160823.qcow2',
+ ),
+ wiz.WizardStep(
+ id='mode',
+ name='Enter the forwarding mode to use',
+ help='one of io|mac|mac_retry|macswap|flowgen|rxonly|....',
+ default='io',
+ ),
+ wiz.WizardStep(
+ id='smp',
+ name='Number of SMP to use?',
+ help='While Spawning the guest, how many SMPs to use?',
+ default='2',
+ ),
+ wiz.WizardStep(
+ id='cores',
+ name="Guest Core binding. For 2 cores a & b: ['a', 'b']",
+ help='Enter the cores to use in the specified format',
+ default="['8', '9']",
+ ),
+ )
+ )
+
+############### All the Run Operations ######################
+
+ def run_dutwiz(self):
+ """
+ Run the DUT wizard
+ """
+ self.dut_wizard()
+ self.dut_values = self.wiz_dut.run(self.shell)
+
+ def run_mainwiz(self):
+ """
+ Run the Main wizard
+ """
+ self.main_wizard()
+ self.main_values = self.wiz_main.run(self.shell)
+ print(self.main_values['nics'])
+
+ def run_vswitchwiz(self):
+ """
+ Run the vSwitch wizard
+ """
+ if self.main_values['vswitch'] == "OVS":
+ self.ovs_wizard()
+ self.ovs_values = self.wiz_ovs.run(self.shell)
+ elif self.main_values['vswitch'] == 'VPP':
+ self.vpp_wizard()
+ self.vpp_values = self.wiz_vpp.run(self.shell)
+
+ def run_trafficwiz(self):
+ """
+ Run the Traffic wizard
+ """
+ self.traffic_wizard()
+ self.traffic_values = self.wiz_traffic.run(self.shell)
+
+ def run_tgenwiz(self):
+ """
+ Run the Tgen wizard
+ """
+ if self.main_values['tgen'] == "Trex":
+ self.trex_wizard()
+ self.trex_values = self.wiz_trex.run(self.shell)
+ elif self.main_values['tgen'] == "TestCenter":
+ self.stc_wizard()
+ self.stc_values = self.wiz_stc.run(self.shell)
+ elif self.main_values['tgen'] == 'IxNet':
+ self.ixnet_wizard()
+ self.ixnet_values = self.wiz_ixnet.run(self.shell)
+
+ def run_guestwiz(self):
+ """
+ Run the Guest wizard
+ """
+ if self.main_values['guest'] == 'YES':
+ self.guest_wizard()
+ self.guest_values = self.wiz_guest.run(self.shell)
+
+################ Prepare Configuration File ##################
+ #pylint: disable=too-many-statements
+ def prepare_conffile(self):
+ """
+ Create the Configuration file that can be used with VSPERF
+ """
+ with open("./vsperf.conf", 'w+') as ofile:
+ ofile.write("#### This file is Automatically Created ####\n\n")
+ if self.main_values['vswitch'] == "OVS":
+ if self.ovs_values['type'] == "Vanilla":
+ ofile.write("VSWITCH = 'OvsVanilla'\n")
+ else:
+ ofile.write("VSWITCH = 'OvsDpdkVhost'\n")
+ ofile.write("VSWITCH_PMD_CPU_MASK = '" +
+ self.ovs_values['mask'] + "'\n")
+ else:
+ ofile.write("VSWITCH = 'VppDpdkVhost'\n")
+ ofile.write("VSWITCH_VPP_L2_CONNECT_MODE = '" +
+ self.vpp_values['mode'] + "'\n")
+ nics = self.get_nics_string()
+ wln = "WHITELIST_NICS = [" + nics + "]" + "\n"
+ ofile.write(wln)
+ ofile.write("RTE_TARGET = 'x86_64-native-linuxapp-gcc'")
+ ofile.write("\n")
+ ofile.write("TRAFFICGEN = " + "'" + self.main_values['tgen'] + "'")
+ ofile.write("\n")
+ ofile.write("VSWITCH_BRIDGE_NAME = 'vsperf-br0'")
+ ofile.write("\n")
+ ofile.write("TRAFFICGEN_DURATION = " +
+ self.traffic_values['duration'] + "\n")
+ ofile.write("TRAFFICGEN_LOSSRATE = 0" + "\n")
+ ofile.write("TRAFFICGEN_PKT_SIZES = (" +
+ self.traffic_values['pktsizes'] +
+ ")" + "\n")
+ if self.main_values['tgen'] == "Trex":
+ ofile.write("TRAFFICGEN_TREX_HOST_IP_ADDR = '" +
+ self.trex_values['hostip'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_USER = '" +
+ self.trex_values['user'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_BASE_DIR = '" +
+ self.trex_values['bdir'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_LINE_SPEED_GBPS = '" +
+ self.trex_values['rate'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PORT1 = '" +
+ self.trex_values['pci1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PORT2 = '" +
+ self.trex_values['pci2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PROMISCUOUS = " +
+ self.trex_values['prom'] + "\n")
+ ofile.write("TRAFFICGEN_TREX_LATENCY_PPS = " +
+ self.trex_values['lat'] + "\n")
+ ofile.write("TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = " +
+ self.trex_values['bslv'])
+ ofile.write("TRAFFICGEN_TREX_MAX_REPEAT = " +
+ self.trex_values['maxrep'] + "\n")
+ elif self.main_values['tgen'] == "TestCenter":
+ ofile.write("TRAFFICGEN_STC_LAB_SERVER_ADDR = '" +
+ self.stc_values['lab'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_LICENSE_SERVER_ADDR = '" +
+ self.stc_values['lisc'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_CHASSIS_ADDR = '" +
+ self.stc_values['eaddr'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_SLOT_NUM = '" +
+ self.stc_values['eslot'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_PORT_NUM = '" +
+ self.stc_values['eport'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_INTF_ADDR = '" +
+ self.stc_values['eint'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR = '" +
+ self.stc_values['egw'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_CHASSIS_ADDR = '" +
+ self.stc_values['waddr'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_SLOT_NUM = '" +
+ self.stc_values['wslot'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_PORT_NUM = '" +
+ self.stc_values['wport'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_INTF_ADDR = '" +
+ self.stc_values['wint'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR = '" +
+ self.stc_values['wgw'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = '" +
+ self.stc_values['script'] + "'" + "\n")
+ elif self.main_values['tgen'] == 'IxNet':
+ print("IXIA Trafficgen")
+ # Ixia/IxNet configuration
+ ofile.write("TRAFFICGEN_IXIA_CARD = '" +
+ self.ixnet_values['card'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_PORT1 = '" +
+ self.ixnet_values['port1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_PORT2 = '" +
+ self.ixnet_values['port2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_LIB_PATH = '" +
+ self.ixnet_values['libp1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_LIB_PATH = '" +
+ self.ixnet_values['libp2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_HOST = '" +
+ self.ixnet_values['host'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_MACHINE = '" +
+ self.ixnet_values['machine'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_PORT = '" +
+ self.ixnet_values['port'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_USER = '" +
+ self.ixnet_values['user'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_TESTER_RESULT_DIR = '" +
+ self.ixnet_values['tdir'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_DUT_RESULT_DIR = '" +
+ self.ixnet_values['rdir'] + "'" + "\n")
+ if self.main_values['guest'] == 'YES':
+ ofile.write("GUEST_IMAGE = ['" +
+ self.guest_values['image'] + "']" + "\n")
+ ofile.write("GUEST_TESTPMD_FWD_MODE = ['" +
+ self.guest_values['mode'] + "']" + "\n")
+ ofile.write("GUEST_SMP = ['" +
+ self.guest_values['smp'] + "']" + "\n")
+ ofile.write("GUEST_CORE_BINDING = [" +
+ self.guest_values['cores'] + ",]" + "\n")
+
+
+def signal_handler(signum, frame):
+ """
+ Signal Handler
+ """
+ print("\n You interrupted, No File will be generated!")
+ print(signum, frame)
+ sys.exit(0)
+
+
+def main():
+ """
+ The Main Function
+ """
+ try:
+ vwiz = VsperfWizard()
+ vwiz.run_dutwiz()
+ vwiz.run_mainwiz()
+ vwiz.run_vswitchwiz()
+ vwiz.run_trafficwiz()
+ vwiz.run_tgenwiz()
+ vwiz.run_guestwiz()
+ vwiz.prepare_conffile()
+ except (KeyboardInterrupt, MemoryError):
+ print("Some Error Occured, No file will be generated!")
+
+ print("Thanks for using the VSPERF-WIZARD, Please look for vsperf.conf " +
+ "file in the current folder")
+
+
+if __name__ == "__main__":
+ signal.signal(signal.SIGINT, signal_handler)
+ main()
diff --git a/tools/docker/client/__init__.py b/tools/docker/client/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/client/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/client/vsperf_client.py b/tools/docker/client/vsperf_client.py
new file mode 100644
index 00000000..2a3e509f
--- /dev/null
+++ b/tools/docker/client/vsperf_client.py
@@ -0,0 +1,771 @@
+"""Deploy : vsperf_deploy_client"""
+#pylint: disable=import-error
+
+import configparser
+import sys
+from pathlib import Path
+
+
+import grpc
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+
+CHUNK_SIZE = 1024 * 1024 # 1MB
+
+
+HEADER = r"""
+ _ _ ___ ____ ____ ____ ____ ___ __ ____ ____ _ _ ____
+( \/ )/ __)( _ \( ___)( _ \( ___) / __)( ) (_ _)( ___)( \( )(_ _)
+ \ / \__ \ )___/ )__) ) / )__) ( (__ )(__ _)(_ )__) ) ( )(
+ \/ (___/(__) (____)(_)\_)(__) \___)(____)(____)(____)(_)\_) (__)
+"""
+
+COLORS = {
+ 'blue': '\033[94m',
+ 'pink': '\033[95m',
+ 'green': '\033[92m',
+}
+
+DUT_CHECK = 0
+TGEN_CHECK = 0
+
+def colorize(string, color):
+ """Colorized HEADER"""
+ if color not in COLORS:
+ return string
+ return COLORS[color] + string + '\033[0m'
+
+
+class VsperfClient():
+ """
+ This class reprsents the VSPERF-client.
+ It talks to vsperf-docker to perform installation, configuration and
+ test-execution
+ """
+ # pylint: disable=R0904,no-else-break
+ # pylint: disable=W0603,invalid-name
+ # pylint: disable=R1710
+ def __init__(self):
+ """read vsperfclient.conf"""
+ self.cfp = 'vsperfclient.conf'
+ self.config = configparser.RawConfigParser()
+ self.config.read(self.cfp)
+ self.stub = None
+ self.dut_check = 0
+ self.tgen_check = 0
+
+ def get_mode(self):
+ """read the mode for the client"""
+ return self.config.get('Mode', 'mode')
+
+ def get_deploy_channel_info(self):
+ """get the channel data"""
+ return (self.config.get('DeployServer', 'ip'),
+ self.config.get('DeployServer', 'port'))
+
+ def get_test_channel_info(self):
+ """get the channel for tgen"""
+ return (self.config.get('TestServer', 'ip'),
+ self.config.get('TestServer', 'port'))
+
+ def create_stub(self, channel):
+ """create stub to talk to controller"""
+ self.stub = vsperf_pb2_grpc.ControllerStub(channel)
+
+ def host_connect(self):
+ """provice dut-host credential to controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ connect_reply = self.stub.HostConnect(hostinfo)
+ DUT_CHECK = 1
+ print(connect_reply.message)
+
+ def tgen_connect(self):
+ """provide tgen-host credential to controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ connect_reply = self.stub.TGenHostConnect(tgeninfo)
+ TGEN_CHECK = 1
+ print(connect_reply.message)
+
+ def host_connect_both(self):
+ """provice dut-host credential to controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ connect_reply = self.stub.HostConnect(hostinfo)
+ client = VsperfClient()
+ client.automatically_test_dut_connect()
+ DUT_CHECK = 1
+ print(connect_reply.message)
+
+ def tgen_connect_both(self):
+ """provide tgen-host credential to controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ connect_reply = self.stub.TGenHostConnect(tgeninfo)
+ TGEN_CHECK = 1
+ client = VsperfClient()
+ client.automatically_test_tgen_connect()
+ print(connect_reply.message)
+
+ @classmethod
+ def automatically_test_dut_connect(cls):
+ """handle automatic connection with tgen"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+ client.create_stub(channel)
+ client.host_testcontrol_connect()
+
+ @classmethod
+ def automatically_test_tgen_connect(cls):
+ """handle automatic connection with tgen"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+ client.create_stub(channel)
+ client.tgen_testcontrol_connect()
+
+ def exit_section(self):
+ """exit"""
+ @classmethod
+ def section_execute(cls, menuitems, client, ip_add, port):
+ """it will use to enter into sub-option"""
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+
+ while True:
+ client.create_stub(channel)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ if (int(choice) >= 0) and (int(choice) < (len(menuitems) - 1)):
+ list(menuitems[int(choice)].values())[0]()
+ else:
+ break
+ except (ValueError, IndexError):
+ pass
+ break
+ @classmethod
+ def get_user_trex_conf_location(cls):
+ """Ask user for t-rex configuration location"""
+ while True:
+ filename_1 = str(input("Provide correct location for your t-rex configuration " \
+ "file where trex_cfg.yaml exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+
+ def upload_tgen_config(self):
+ """t-rex config file as a chunk to controller"""
+ if TGEN_CHECK == 0:
+ return print("TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with TGen-Host.")
+ default_location = self.config.get('ConfFile', 'tgenpath')
+ if not default_location:
+ filename = self.get_user_trex_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'tgenpath')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_trex_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ chunks = self.get_file_chunks_1(filename)
+ upload_status = self.stub.TGenUploadConfigFile(chunks)
+ print(upload_status.Message)
+
+ def vsperf_install(self):
+ """vsperf install on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ install_reply = self.stub.VsperfInstall(hostinfo)
+ print(install_reply.message)
+
+ def collectd_install(self):
+ """collectd install on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ install_reply = self.stub.CollectdInstall(hostinfo)
+ print(install_reply.message)
+
+ def tgen_install(self):
+ """install t-rex on Tgen host"""
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ install_reply = self.stub.TGenInstall(tgeninfo)
+ print(install_reply.message)
+
+ @classmethod
+ def get_user_conf_location(cls):
+ """get user input for test configuration file"""
+ while True:
+ filename_1 = str(input("Provide correct location for your test configuration " \
+ "file where it exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " Test File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+
+ def upload_config(self):
+ """transfer config file as a chunk to controller"""
+ if DUT_CHECK == 0:
+ return print("DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with DUT-Host.")
+ default_location = self.config.get('ConfFile', 'path')
+ if not default_location:
+ filename = self.get_user_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'path')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ upload_param = self.get_file_chunks(filename)
+ upload_status = self.stub.UploadConfigFile(upload_param)
+ print(upload_status.Message)
+
+ def start_test(self):
+ """start test parameter, test config file and test name"""
+ test_control = vsperf_pb2.ControlVsperf(testtype=self.config.get('Testcase', 'test'), \
+ conffile=self.config.get('Testcase', 'conffile'))
+ control_reply = self.stub.StartTest(test_control)
+ print(control_reply.message)
+
+ def start_tgen(self):
+ """start t-rex traffic generetor on tgen-host"""
+ tgen_control = vsperf_pb2.ControlTGen(params=self.config.get('TGen', 'params'))
+ control_reply = self.stub.StartTGen(tgen_control)
+ print(control_reply.message)
+
+ @classmethod
+ def get_file_chunks(cls, filename):
+ """convert file into chunk to stream between client and controller with filename"""
+ with open(filename, 'rb') as f_1:
+ while True:
+ file_path = filename
+ file_path_list = file_path.split("/")
+ test_filename = file_path_list[(len(file_path_list)-1)]
+ piece = f_1.read(CHUNK_SIZE)
+ if not piece:
+ return None
+ return vsperf_pb2.ConfFileTest(Content=piece, Filename=test_filename)
+ @classmethod
+ def get_file_chunks_1(cls, filename):
+ """Convert file into chunks"""
+ with open(filename, 'rb') as f:
+ while True:
+ piece = f.read(CHUNK_SIZE)
+ if len(piece) == 0:
+ return
+ yield vsperf_pb2.ConfFile(Content=piece)
+
+
+ def test_status(self):
+ """check the test_status"""
+ test_check = vsperf_pb2.StatusQuery(
+ testtype=self.config.get('Testcase', 'test'))
+ check_result_reply = self.stub.TestStatus(test_check)
+ print(check_result_reply.message)
+
+ def vsperf_terminate(self):
+ """after running test terminate vsperf on dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ termination_reply = self.stub.TerminateVsperf(hostinfo)
+ print(termination_reply.message)
+
+ def start_beats(self):
+ """start beats on dut-host before running test"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.StartBeats(hostinfo)
+ print(status_reply.message)
+
+ def remove_vsperf(self):
+ """remove vsperf from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveVsperf(hostinfo)
+ print(status_reply.message)
+
+ def remove_result_folder(self):
+ """remove resutl folder from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveResultFolder(hostinfo)
+ print(status_reply.message)
+
+ def remove_config_files(self):
+ """remove all config files"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveUploadedConfig(hostinfo)
+ print(status_reply.message)
+
+ def remove_collectd(self):
+ """remove collectd from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveCollectd(hostinfo)
+ print(status_reply.message)
+
+ def remove_everything(self):
+ """remove everything from dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveEverything(hostinfo)
+ print(status_reply.message)
+
+ def sanity_nic_check(self):
+ """nic is available on tgen host check"""
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ status_reply = self.stub.SanityNICCheck(tgeninfo)
+ print(status_reply.message)
+
+ def sanity_collectd_check(self):
+ """check collecd properly running"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityCollectdCheck(hostinfo)
+ print(status_reply.message)
+
+ def cpu_allocation_check(self):
+ """check cpu allocation"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityCPUAllocationCheck(hostinfo)
+ print(status_reply.message)
+
+ def sanity_vnf_path(self):
+ """vnf path available on dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityVNFpath(hostinfo)
+ print(status_reply.message)
+
+ def sanity_vsperf_check(self):
+ """check vsperf correctly installed"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityVSPERFCheck(hostinfo)
+ print(status_reply.message)
+
+ def sanity_dut_tgen_conn_check(self):
+ """check the connection between dut-host and tgen-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityTgenConnDUTCheck(hostinfo)
+ print(status_reply.message)
+
+ def dut_test_availability(self):
+ """dut-host is free for test check"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.DUTvsperfTestAvailability(hostinfo)
+ print(status_reply.message)
+
+ def get_test_conf_from_dut(self):
+ """get the vsperf test config file from dut host for user to check"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.GetVSPERFConffromDUT(hostinfo)
+ print(status_reply.message)
+
+ def dut_hugepage_config(self):
+ """setup hugepages on dut-host"""
+ configparam = vsperf_pb2.HugepConf(hpmax=self.config.get('HugepageConfig', 'HpMax'), \
+ hprequested=self.config.get('HugepageConfig',\
+ 'HpRequested'))
+ config_status_reply = self.stub.DutHugepageConfig(configparam)
+ print(config_status_reply.message)
+ @classmethod
+ def get_user_collectd_conf_location(cls):
+ """get collectd configuration file location from user"""
+ while True:
+ filename_1 = str(input("Provide correct location for your collectd configuration " \
+ "file where collectd.conf exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+ def host_testcontrol_connect(self):
+ """provice dut-host credential to test controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ self.stub.HostConnect(hostinfo)
+
+ def tgen_testcontrol_connect(self):
+ """provide tgen-host credential to test controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ self.stub.TGenHostConnect(tgeninfo)
+
+ def upload_collectd_config(self):
+ """collectd config file chunks forwarded to controller"""
+ if DUT_CHECK == 0:
+ return print("DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with DUT-Host.")
+ default_location = self.config.get('ConfFile', 'collectdpath')
+ if not default_location:
+ filename = self.get_user_collectd_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'collectdpath')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_collectd_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ chunks = self.get_file_chunks_1(filename)
+ upload_status = self.stub.CollectdUploadConfig(chunks)
+ print(upload_status.Message)
+
+ def dut_check_dependecies(self):
+ """check_dependecies on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ check_reply = self.stub.CheckDependecies(hostinfo)
+ print(check_reply.message)
+
+ @classmethod
+ def establish_connection_both(cls):
+ """
+ This Function use to establish connection for vsperf to both the deploy server \
+ and testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect_both},
+ {"Connect to TGen Host": client.tgen_connect_both},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def establish_connection_deploy(cls):
+ """
+ This Function use to establish connection for vsperf to either using the dploy
+ or using the testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect},
+ {"Connect to TGen Host": client.tgen_connect},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def establish_connection_test(cls):
+ """
+ This Function use to establish connection for vsperf to either using the dploy
+ or using the testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect},
+ {"Connect to TGen Host": client.tgen_connect},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def vsperf_setup(cls):
+ """setup sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Prerequisites Installation for VSPERF")
+ menuitems_setup = [
+ {"Install VSPERF": client.vsperf_install},
+ {"Install TGen ": client.tgen_install},
+ {"Install Collectd": client.collectd_install},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+ @classmethod
+ def upload_config_files(cls):
+ """all the upload sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ menuitems_setup = [
+ {"Upload TGen Configuration File": client.upload_tgen_config},
+ {"Upload Collectd Configuration File": client.upload_collectd_config},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+ @classmethod
+ def manage_sysparam_config(cls):
+ """manage system parameter on dut host before run test"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ menuitems_setup = [
+ {"DUT-Host hugepages configuration": client.dut_hugepage_config},
+ {"Check VSPERF Dependencies on DUT-Host": client.dut_check_dependecies},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def test_status_check(cls):
+ """after running test , test status related sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Test status": client.test_status},
+ {"Get Test Configuration file from DUT-host": client.get_test_conf_from_dut},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def sanity_check_options(cls):
+ """all sanity check sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Check installed VSPERF": client.sanity_vsperf_check},
+ {"Check Test Config's VNF path is available on DUT-Host": client.sanity_vnf_path},
+ {"Check NIC PCIs is available on Traffic Generator": client.sanity_nic_check},
+ {"Check CPU allocation on DUT-Host": client.cpu_allocation_check},
+ {"Check installed Collectd": client.sanity_collectd_check},
+ {"Check Connection between DUT-Host and Traffic Generator Host":
+ client.sanity_dut_tgen_conn_check},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def run_test(cls):
+ """run test sub-options"""
+ print("**Before user Run Tests we highly recommend user to perform Sanity Checks.......")
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Upload Test Configuration File": client.upload_config},
+ {"Perform Sanity Checks before running tests": client.sanity_check_options},
+ {"Check if DUT-HOST is available": client.dut_test_availability},
+ {"Start TGen ": client.start_tgen},
+ {"Start Beats": client.start_beats},
+ {"Start Test": client.start_test},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def clean_up(cls):
+ """clean-up sub-options"""
+ print(
+ "*******************************************************************\n\n\
+ IF you are performing Test on IntelPOD 12 - Node 4, Be careful during removal\n\n\
+ *******************************************************************")
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Remove VSPERF": client.remove_vsperf},
+ {"Terminate VSPERF": client.vsperf_terminate},
+ {"Remove Results from DUT-Host": client.remove_result_folder},
+ {"Remove Uploaded Configuration File": client.remove_config_files},
+ {"Remove Collectd": client.remove_collectd},
+ {"Remove Everything": client.remove_everything},
+ {"Return to Previous Menu": client.exit_section}
+
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+def run():
+ """It will run the actul primary options"""
+ client = VsperfClient()
+ client_mode = client.get_mode()
+ print(client_mode)
+ if "deploy" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_deploy},
+ {"Installation": client.vsperf_setup},
+ {"Upload Configuration Files": client.upload_config_files},
+ {"Manage DUT-System Configuration": client.manage_sysparam_config},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ elif "test" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_test},
+ {"Run Test": client.run_test},
+ {"Test Status": client.test_status_check},
+ {"Clean-Up": client.clean_up},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ elif "together" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_both},
+ {"Installation": client.vsperf_setup},
+ {"Upload Configuration Files": client.upload_config_files},
+ {"Manage DUT-System Configuration": client.manage_sysparam_config},
+ {"Run Test": client.run_test},
+ {"Test Status": client.test_status_check},
+ {"Clean-Up": client.clean_up},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ else:
+ print("You have not defined client mode in vsperfclient.conf [!]")
+
+
+if __name__ == '__main__':
+ run()
diff --git a/tools/docker/client/vsperfclient.conf b/tools/docker/client/vsperfclient.conf
new file mode 100644
index 00000000..12a657d7
--- /dev/null
+++ b/tools/docker/client/vsperfclient.conf
@@ -0,0 +1,39 @@
+[DeployServer]
+ip = 127.0.0.1
+port = 50051
+
+[TestServer]
+ip = 127.0.0.1
+port = 50052
+
+[Mode]
+#Deploy: To perform only for the vsperf-setup purpose
+#Test: To perform only test
+#Together: To perform as well as test.
+#assign any value from the above option according to your requirement
+mode = Together
+
+[Host]
+ip = 10.10.120.24
+uname = opnfv
+pwd = opnfv
+
+[TGen]
+ip = 10.10.120.25
+uname = root
+pwd = P@ssw0rd
+params = -i --no-scapy-server --nc --no-watchdog
+
+[HugepageConfig]
+HpMax = 8192
+HpRequested = 1024
+
+#provide appropriate location for configuration files
+[ConfFile]
+path =
+tgenpath =
+collectdpath =
+
+[Testcase]
+test = phy2phy_tput
+conffile = vsperf.conf
diff --git a/tools/docker/deployment/auto/controller/Dockerfile b/tools/docker/deployment/auto/controller/Dockerfile
new file mode 100644
index 00000000..e849d8f2
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/Dockerfile
@@ -0,0 +1,23 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip && apt-get -y install openssh-server
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50051
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
+
diff --git a/tools/docker/deployment/auto/controller/list.env b/tools/docker/deployment/auto/controller/list.env
new file mode 100644
index 00000000..ab4404b7
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/list.env
@@ -0,0 +1,14 @@
+DUT_IP_ADDRESS=10.10.120.24
+DUT_USERNAME=opnfv
+DUT_PASSWORD=opnfv
+
+TGEN_IP_ADDRESS=10.10.120.25
+TGEN_USERNAME=root
+TGEN_PASSWORD=P@ssw0rd
+TGEN_PARAMS= -i --no-scapy-server --nc --no-watchdog
+
+HUGEPAGE_MAX=8192
+HUGEPAGE_REQUESTED=1024
+
+SANITY_CHECK=NO
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/__init__.py b/tools/docker/deployment/auto/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/deployment/auto/controller/vsperf/collectd.conf b/tools/docker/deployment/auto/controller/vsperf/collectd.conf
new file mode 100644
index 00000000..9cefc8c5
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/collectd.conf
@@ -0,0 +1,49 @@
+Hostname "pod12-node4"
+Interval 1
+LoadPlugin intel_rdt
+LoadPlugin processes
+LoadPlugin interface
+LoadPlugin network
+LoadPlugin ovs_stats
+LoadPlugin cpu
+LoadPlugin memory
+LoadPlugin csv
+#LoadPlugin dpdkstat
+##############################################################################
+# Plugin configuration #
+##############################################################################
+<Plugin processes>
+ ProcessMatch "ovs-vswitchd" "ovs-vswitchd"
+ ProcessMatch "ovsdb-server" "ovsdb-server"
+ ProcessMatch "collectd" "collectd"
+</Plugin>
+<Plugin network>
+ Server "10.10.120.22" "25826"
+</Plugin>
+
+<Plugin ovs_stats>
+ Port "6640"
+ Address "127.0.0.1"
+ Socket "/usr/local/var/run/openvswitch/db.sock"
+ Bridges "vsperf-br0"
+</Plugin>
+
+<Plugin "intel_rdt">
+ Cores "2" "4-5" "6-7" "8" "9" "22" "23" "24" "25" "26" "27"
+</Plugin>
+
+<Plugin csv>
+ DataDir "/tmp/csv"
+ StoreRates false
+</Plugin>
+
+#<Plugin dpdkstat>
+# <EAL>
+# Coremask "0x1"
+# MemoryChannels "4"
+# FilePrefix "rte"
+# </EAL>
+# SharedMemObj "dpdk_collectd_stats_0"
+# EnabledPortMask 0xffff
+#</Plugin>
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml b/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml
new file mode 100644
index 00000000..8bb8e341
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml
@@ -0,0 +1,20 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["81:00.0", "81:00.1"] # list of the interfaces to bind # node 4
+ port_bandwidth_gb : 10 #10G nics
+ port_info : # set eth mac addr
+ - dest_mac : "3c:fd:fe:b4:41:09" # port 0
+ src_mac : "3c:fd:fe:b4:41:08"
+ - dest_mac : "3c:fd:fe:b4:41:08" # port 1
+ src_mac : "3c:fd:fe:b4:41:09"
+ platform :
+ master_thread_id : 17
+ latency_thread_id : 16
+ dual_if :
+ - socket : 1
+ threads : [22,23,24,25,26,27]
+ - socket : 0
+ threads : [10,11,12,13,14,15]
+
+
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py b/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..b6865272
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,392 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF_deploy_auto
+"""
+# pylint: disable=W0603
+
+import os
+import sys
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+
+TGEN_IP = os.getenv('TGEN_IP_ADDRESS')
+TGEN_USER = os.getenv('TGEN_USERNAME')
+TGEN_PWD = os.getenv('TGEN_PASSWORD')
+TGEN_PARAM = os.getenv('TGEN_PARAMS')
+
+HPMAX = int(os.getenv('HUGEPAGE_MAX'))
+HPREQUESTED = int(os.getenv('HUGEPAGE_REQUESTED'))
+
+SANITY = str(os.getenv('SANITY_CHECK'))#
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT-Host Successfully Connected .........................................[OK] \n ")
+
+def tgen_connect():
+ """
+ Handle Tgen Connection to Trex
+ """
+ global TGEN_CLIENT
+ TGEN_CLIENT = ssh.SSH(host=TGEN_IP, user=TGEN_USER, password=TGEN_PWD)
+ print("Traffic Generator Successfully Connected ...............................[OK] \n ")
+
+
+def vsperf_install():
+ """
+ Perform actual installation
+ """
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; "
+ vsperf_check_command += "cd vswitchperf && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ print(
+ "VSPERF is Already Installed on DUT-Host..........................."\
+ ".......[OK]\n")
+ else:
+ download_cmd = "git clone https://gerrit.opnfv.org/gerrit/vswitchperf"
+ DUT_CLIENT.run(download_cmd)
+ install_cmd = "cd vswitchperf/systems ; "
+ install_cmd += "echo '{}' | sudo -S ./build_base_machine.sh ".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(install_cmd)
+ print(
+ "Vsperf Installed on DUT-Host ....................................[OK]\n")
+
+
+def tgen_install():
+ """
+ Install T-rex traffic gen on TGen
+ """
+ kill_cmd = "pkill -f ./t-rex"
+ TGEN_CLIENT.send_command(kill_cmd)
+ tgen_start_check = "cd trex/scripts && ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(TGEN_CLIENT.execute(tgen_start_check)[0])
+ if tgen_start_cmd_result == 0:
+ print(
+ "The Host has T-rex Installed....................................[OK]\n")
+ else:
+ download_cmd = "git clone https://github.com/cisco-system-traffic-generator/trex-core trex"
+ TGEN_CLIENT.run(download_cmd)
+ install_cmd = "cd trex-core/linux_dpdk ; ./b configure ; ./b build"
+ TGEN_CLIENT.run(install_cmd)
+ print(
+ "The Host has now T-rex Installed...........................[OK]\n")
+
+def upload_tgen_config_file():
+ """
+ Upload Tgen Config File on T-rex
+ """
+ localpath = '/usr/src/app/vsperf/trex_cfg.yaml'
+ if not os.path.exists(localpath):
+ print("TGEN config File does not exist................[Failed]")
+ return
+ remotepath = '~/trex_cfg.yaml'
+ check_trex_config_cmd = "echo {} | sudo -S find /etc -maxdepth 1 -name '{}'".format(
+ TGEN_PWD, remotepath[2:])
+ check_test_result = str(TGEN_CLIENT.execute(check_trex_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f /etc/{}".format(remotepath[2:]))
+ TGEN_CLIENT.put_file(localpath, remotepath)
+ TGEN_CLIENT.run(
+ "echo {} | sudo -S mv ~/{} /etc/".format(TGEN_PWD, remotepath[2:]), pty=True)
+ print(
+ "T-rex Configuration File Uploaded on TGen-Host...........................[OK]\n")
+
+
+def install_collectd():
+ """
+ installation of the collectd
+ """
+ check_collectd_config_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_config_cmd)[1])
+ if "collectd" in check_test_result:
+ print(
+ 'Collectd Installed Successfully on DUT-Host..............................[OK]\n')
+ else:
+ download_cmd = "git clone https://github.com/collectd/collectd.git"
+ DUT_CLIENT.run(download_cmd)
+ build_cmd = "cd collectd ; "
+ build_cmd += "./build.sh"
+ DUT_CLIENT.run(build_cmd)
+ config_cmd = "cd collectd ; ./configure --enable-syslog --enable-logfile "
+ config_cmd += "--enable-hugepages --enable-debug ; "
+ DUT_CLIENT.run(config_cmd)
+ install_cmd = "cd collectd ; make ; "
+ install_cmd += "echo '{}' | sudo -S make install".format(DUT_PWD)
+ DUT_CLIENT.run(install_cmd, pty=True)
+ print(
+ 'Collectd Installed Successfully on DUT-Host.............................[OK]\n ')
+
+
+def collectd_upload_config():
+ """
+ Upload Configuration file of Collectd on DUT
+ """
+ localpath = '/usr/src/app/vsperf/collectd.conf'
+ if not os.path.exists(localpath):
+ print("Collectd config File does not exist.......................[Failed]")
+ return
+ remotepath = '~/collectd.conf'
+ collectd_config_cmd = "echo {} | sudo -S find /opt/collectd/etc -maxdepth 1 -name '{}'".\
+ format(DUT_PWD, remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(collectd_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run(
+ "echo {} | sudo -S rm -f /opt/collectd/etc/{}".format(DUT_PWD, remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ DUT_CLIENT.run("echo {} | sudo -S mv ~/{} /opt/collectd/etc/".\
+ format(DUT_PWD, remotepath[2:]), pty=True)
+ print(
+ "Collectd Configuration File Uploaded on DUT-Host.........................[OK]\n ")
+
+def start_tgen():
+ """
+ It will start the Traffic generetor
+ """
+ kill_cmd = "pkill -f ./t-rex"
+ TGEN_CLIENT.send_command(kill_cmd)
+ run_cmd = "cd trex_2.37/scripts && "
+ run_cmd += "screen ./t-rex-64 "
+ run_cmd += TGEN_PARAM
+ TGEN_CLIENT.send_command(run_cmd)
+ print(
+ "T-Rex Successfully running...............................................[OK]\n")
+
+
+def dut_hugepage_config():
+ """
+ Configure the DUT system hugepage parameter from client
+ """
+ if not HPMAX or not HPREQUESTED:
+ print("HPMAX and HPREQUESTED not defined ...................[Failed]")
+ return
+ hugepage_cmd = "echo '{}' | sudo -S mkdir -p /mnt/huge ; ".format(
+ DUT_PWD)
+ hugepage_cmd += "echo '{}' | sudo -S mount -t hugetlbfs nodev /mnt/huge".format(
+ DUT_PWD)
+ DUT_CLIENT.run(hugepage_cmd, pty=True)
+ hp_nr_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
+ hp_free_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/free_hugepages"
+ hp_nr = int(DUT_CLIENT.execute(hp_nr_cmd)[1])
+ hp_free = int(DUT_CLIENT.execute(hp_free_cmd)[1])
+ if hp_free <= HPREQUESTED:
+ hp_nr_new = hp_nr + (HPREQUESTED - hp_free)
+ if hp_nr_new > HPMAX:
+ hp_nr_new = HPMAX
+
+ nr_hugepage_cmd = "echo '{}' | sudo -S bash -c \"echo 'vm.nr_hugepages={}' >> ".\
+ format(DUT_PWD, hp_nr_new)
+ nr_hugepage_cmd += "/etc/sysctl.conf\""
+ DUT_CLIENT.run(nr_hugepage_cmd, pty=True)
+
+ dict_cmd = "cat /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages"
+ dict_check = int(DUT_CLIENT.execute(dict_cmd)[0])
+ if dict_check == 0:
+ node1_hugepage_cmd = "echo '{}' | sudo -s bash -c \"echo 0 > ".format(DUT_PWD)
+ node1_hugepage_cmd += "/sys/devices/system/node/node1/hugepages"
+ node1_hugepage_cmd += "/hugepages-2048kB/nr_hugepages\""
+ DUT_CLIENT.run(node1_hugepage_cmd, pty=True)
+ print("DUT-Host system configured with {} No of Hugepages.....................[OK] \n ".\
+ format(hp_nr_new))
+
+
+def sanity_nic_check():
+ """
+ Check either NIC PCI ids are Correctly placed or not
+ """
+ trex_conf_path = "cat /etc/trex_cfg.yaml | grep interfaces"
+ trex_conf_read = TGEN_CLIENT.execute(trex_conf_path)[1]
+ nic_pid_ids_list = [trex_conf_read.split("\"")[1], trex_conf_read.split("\"")[3]]
+ trex_nic_pic_id_cmd = "lspci | egrep -i --color 'network|ethernet'"
+ trex_nic_pic_id = str(TGEN_CLIENT.execute(trex_nic_pic_id_cmd)[1]).split('\n')
+ acheck = 0
+ for k in trex_nic_pic_id:
+ for j in nic_pid_ids_list:
+ if j in k:
+ acheck += 1
+ else:
+ pass
+ if acheck == 2:
+ print("Both the NIC PCI Ids are Correctly"\
+ " configured on TGen-Host...............[OK]\n")
+ else:
+ print("You configured NIC PCI Ids Wrong in "\
+ "TGen-Host............................[OK]\n")
+
+
+def sanity_collectd_check():
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(
+ DUT_PWD)
+ DUT_CLIENT.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(
+ DUT_CLIENT.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ print(
+ "Collectd is working Fine ................................................[OK] \n ")
+ else:
+ print(
+ "Collectd Fail to Start, Install correctly before running Test....[Failed]\n ")
+ else:
+ print(
+ "Collectd is not installed yet........................................[Failed]\n")
+
+def sanity_vsperf_check():
+ """
+ We have to make sure that VSPERF install correctly
+ """
+ if not DUT_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ vsperf_check_cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_cmd)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ print(
+ "VSPERF Installed Correctly and Working fine.........................."\
+ "....[OK]\n")
+ else:
+ print(
+ "VSPERF Does Not Installed Correctly , INSTALL IT AGAIN........[Critical]\n")
+ else:
+ print(
+ "VSPERF Does Not Installed Correctly , INSTALL IT AGAIN............[Critical]\n")
+ break
+
+
+def sanity_tgen_conn_dut_check():
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ if not DUT_CLIENT or not TGEN_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(TGEN_IP)
+ tgen_connectivity_check_result = int(
+ DUT_CLIENT.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ print(
+ "DUT-Host is successfully reachable to Traffic Generator Host.............[OK]\n")
+ else:
+ print(
+ "DUT-host is unsuccessful to reach the Traffic Generator Host..............[Failed]")
+ print(
+ "Make sure to establish connection before running Test...............[Critical]\n")
+
+
+def sanity_tgen_check():
+ """
+ It will check Trex properly running or not
+ """
+ if not TGEN_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ tgen_start_cmd_check = "cd trex/scripts &&"
+ tgen_start_cmd_check += " ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(TGEN_CLIENT.execute(tgen_start_cmd_check)[0])
+ if tgen_start_cmd_result == 0:
+ print(
+ "TGen-Host successfully running........................................[OK]\n")
+ else:
+ print("TGen-Host is unable to start t-rex ..................[Failed]")
+ print("Make sure you install t-rex correctly ...............[Critical]\n")
+
+
+def dut_vsperf_test_availability():
+ """
+ Before running test we have to make sure there is no other test running
+ """
+ vsperf_ava_cmd = "ps -ef | grep -v grep | grep ./vsperf | awk '{print $2}'"
+ vsperf_ava_result = len(
+ (DUT_CLIENT.execute(vsperf_ava_cmd)[1]).split("\n"))
+ if vsperf_ava_result == 1:
+ print("DUT-Host is available for performing VSPERF Test\n\
+ You can perform Test!")
+ else:
+ print("DUT-Host is busy right now, Wait for some time\n\
+ Always Check availability before Running Test!\n")
+
+if DUT_IP:
+ host_connect()
+if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ sys.exit()
+else:
+ vsperf_install()
+ install_collectd()
+ collectd_upload_config()
+ dut_hugepage_config()
+ dut_vsperf_test_availability()
+if TGEN_IP:
+ tgen_connect()
+if not TGEN_CLIENT:
+ print('Failed to connect to TGEN_HOST.............[Critical]')
+ sys.exit()
+else:
+ tgen_install()
+ upload_tgen_config_file()
+ sanity_nic_check()
+ start_tgen()
+
+print("\n\nIF you are getting any Failed or Critical message!!!\n" \
+ "Please follow this steps:\n"
+ "1. Make necessory changes before running VSPERF TEST\n"\
+ "2. Re-Run the auto deployment container")
+
+if SANITY and 'yes' in SANITY.lower():
+ sanity_collectd_check()
+ sanity_vsperf_check()
+ sanity_tgen_check()
+ sanity_tgen_conn_dut_check()
diff --git a/tools/docker/deployment/auto/docker-compose.yml b/tools/docker/deployment/auto/docker-compose.yml
new file mode 100644
index 00000000..b5b808d2
--- /dev/null
+++ b/tools/docker/deployment/auto/docker-compose.yml
@@ -0,0 +1,22 @@
+version: '2'
+
+services:
+ deploy:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ env_file:
+ - ./controller/list.env
+ ports:
+ - 50051
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/deployment/interactive/controller/Dockerfile b/tools/docker/deployment/interactive/controller/Dockerfile
new file mode 100644
index 00000000..3d9fca42
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/Dockerfile
@@ -0,0 +1,21 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+
+
diff --git a/tools/docker/deployment/interactive/controller/vsperf/__init__.py b/tools/docker/deployment/interactive/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py b/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..b192c493
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,360 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=R0902
+# Sixteen is reasonable instance attributes
+# pylint: disable=W0221
+"""
+VSPER docker-controller.
+"""
+
+import io
+import time
+from concurrent import futures
+import grpc
+
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+
+# pylint: disable=too-few-public-methods,no-self-use
+class PseudoFile(io.RawIOBase):
+ """
+ Handle ssh command output.
+ """
+
+ def write(self, chunk):
+ """
+ Write to file
+ """
+ if "error" in chunk:
+ return
+ with open("./output.txt", "a") as fref:
+ fref.write(chunk)
+
+
+class VsperfController(vsperf_pb2_grpc.ControllerServicer):
+ """
+ Main Controller Class
+ """
+
+ def __init__(self):
+ """
+ Initialization
+ """
+ self.client = None
+ self.dut = None
+ self.dut_check = None
+ self.tgen_check = None
+ self.user = None
+ self.pwd = None
+ self.tgen_client = None
+ self.tgen = None
+ self.tgen_user = None
+ self.tgenpwd = None
+ self.tgen_conf = None
+ self.scenario = None
+ self.hpmax = None
+ self.hprequested = None
+ self.tgen_ip_address = None
+ self.trex_conf = None
+ # Default TGen is T-Rex
+ self.trex_conffile = "trex_cfg.yml"
+ self.collectd_conffile = "collectd.conf"
+
+ def setup(self):
+ """
+ Performs Setup of the client.
+ """
+ # Just connect to VM.
+ self.client = ssh.SSH(host=self.dut, user=self.user,
+ password=self.pwd)
+ self.client.wait()
+
+ def install_vsperf(self):
+ """
+ Perform actual installation
+ """
+ download_cmd = "git clone https://gerrit.opnfv.org/gerrit/vswitchperf"
+ self.client.run(download_cmd)
+ install_cmd = "cd vswitchperf/systems ; "
+ install_cmd += "echo '{}' | sudo -S ./build_base_machine.sh ".format(
+ self.pwd)
+ #install_cmd += "./build_base_machine.sh"
+ self.client.run(install_cmd)
+
+ def VsperfInstall(self, request, context):
+ """
+ Handle VSPERF install command from client
+ """
+ # print("Installing VSPERF")
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_check_cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && ./vsperf --help"
+ vsperf_check_cmd_result = str(self.client.execute(vsperf_check_cmd)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ return vsperf_pb2.StatusReply(
+ message="VSPERF is Already Installed on DUT-Host")
+ self.install_vsperf()
+ return vsperf_pb2.StatusReply(message="VSPERF Successfully Installed DUT-Host")
+
+ def HostConnect(self, request, context):
+ """
+ Handle host connectivity command from client
+ """
+ self.dut = request.ip
+ self.user = request.uname
+ self.pwd = request.pwd
+ self.setup()
+ check_cmd = "ls -l"
+ self.dut_check = int(self.client.execute(check_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def save_chunks_to_file(self, chunks, filename):
+ """
+ Write the output to file
+ """
+ with open(filename, 'wb') as fref:
+ for chunk in chunks:
+ fref.write(chunk.Content)
+
+###### Traffic Generator Related functions ####
+ def TGenHostConnect(self, request, context):
+ """
+ Connect to TGen-Node
+ """
+ self.tgen = request.ip
+ self.tgen_user = request.uname
+ self.tgenpwd = request.pwd
+ self.tgen_setup()
+ check_tgen_cmd = "ls"
+ self.tgen_check = int(self.tgen_client.execute(check_tgen_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def tgen_setup(self):
+ """
+ Setup the T-Gen Client
+ """
+ # Just connect to VM.
+ self.tgen_client = ssh.SSH(host=self.tgen, user=self.tgen_user,
+ password=self.tgenpwd)
+ self.tgen_client.wait()
+
+ def TGenInstall(self, request, context):
+ """
+ Install Traffic generator on the node.
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ kill_cmd = "pkill -f t-rex"
+ self.tgen_client.send_command(kill_cmd)
+ tgen_start_cmd = "cd trex_2.37/scripts && ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(self.tgen_client.execute(tgen_start_cmd)[0])
+ kill_cmd = "pkill -f t-rex"
+ self.tgen_client.send_command(kill_cmd)
+ if tgen_start_cmd_result == 0:
+ return vsperf_pb2.StatusReply(
+ message="Traffic Generetor has T-rex Installed")
+ download_cmd = "git clone https://github.com/cisco-system-traffic-generator/trex-core"
+ self.tgen_client.run(download_cmd)
+ install_cmd = "cd trex-core/linux_dpdk ; ./b configure ; ./b build"
+ self.tgen_client.run(install_cmd)
+ # before you setup your trex_cfg.yml make sure to do sanity check
+ # NIC PICs and establish route between your DUT and Test Device.
+ return vsperf_pb2.StatusReply(message="Traffic Generetor has now T-rex Installed")
+
+ def TGenUploadConfigFile(self, request, context):
+ """
+ Handle upload config-file command from client
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ filename = self.trex_conffile
+ self.save_chunks_to_file(request, filename)
+ check_trex_config_cmd = "echo {} | sudo -S find /etc -maxdepth 1 -name trex_cfg.yaml".\
+ format(self.tgenpwd)
+ check_test_result = str(
+ self.tgen_client.execute(check_trex_config_cmd)[1])
+ if "trex_cfg.yaml" in check_test_result:
+ self.tgen_client.run("rm -f /etc/trex_cfg.yaml")
+ self.upload_tgen_config()
+ self.tgen_client.run(
+ "echo {} | sudo -S mv ~/trex_cfg.yaml /etc/".format(self.tgenpwd), pty=True)
+ return vsperf_pb2.UploadStatus(Message="Successfully Uploaded",
+ Code=1)
+
+ def upload_tgen_config(self):
+ """
+ Perform file upload.
+ """
+ self.tgen_client.put_file(self.trex_conffile, '/root/trex_cfg.yaml')
+
+# Tool-Chain related Functions####3
+
+ def install_collectd(self):
+ """
+ installation of the collectd
+ """
+ check_collectd_config_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(
+ self.client.execute(check_collectd_config_cmd)[1])
+ if "collectd" in check_test_result:
+ pass
+ else:
+ download_cmd = "git clone https://github.com/collectd/collectd.git"
+ self.client.run(download_cmd)
+ build_cmd = "cd collectd ; "
+ build_cmd += "./build.sh"
+ self.client.run(build_cmd)
+ config_cmd = "cd collectd ; ./configure --enable-syslog "
+ config_cmd += "--enable-logfile --enable-hugepages --enable-debug ; "
+ self.client.run(config_cmd)
+ install_cmd = "cd collectd ; make ; "
+ install_cmd += "echo '{}' | sudo -S make install".format(self.pwd)
+ self.client.run(install_cmd, pty=True)
+
+ def CollectdInstall(self, request, context):
+ """
+ Install Collectd on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.install_collectd()
+ return vsperf_pb2.StatusReply(
+ message="Collectd Successfully Installed on DUT-Host")
+
+ def upload_collectd_config(self):
+ """
+ Perform file upload.
+ """
+ self.client.put_file(self.collectd_conffile, '~/collectd.conf')
+ move_cmd = "echo '{}' | sudo -S mv ~/collectd.conf /opt/collectd/etc".format(
+ self.pwd)
+ self.client.run(move_cmd, pty=True)
+
+ def CollectdUploadConfig(self, request, context):
+ """
+ Upload collectd config-file on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ filename = self.collectd_conffile
+ self.save_chunks_to_file(request, filename)
+ self.upload_collectd_config()
+ return vsperf_pb2.UploadStatus(
+ Message="Successfully Collectd Configuration Uploaded", Code=1)
+
+###System Configuration related functions###
+
+ def DutHugepageConfig(self, request, context):
+ """
+ Configure the DUT system hugepage parameter from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.hpmax = int(request.hpmax)
+ self.hprequested = int(request.hprequested)
+ hugepage_cmd = "echo '{}' | sudo -S mkdir -p /mnt/huge ; ".format(
+ self.pwd)
+ hugepage_cmd += "echo '{}' | sudo -S mount -t hugetlbfs nodev /mnt/huge".format(
+ self.pwd)
+ self.client.run(hugepage_cmd, pty=True)
+ hp_nr_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
+ hp_free_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/free_hugepages"
+ hp_nr = int(self.client.execute(hp_nr_cmd)[1])
+ hp_free = int(self.client.execute(hp_free_cmd)[1])
+ if hp_free <= self.hprequested:
+ hp_nr_new = hp_nr + (self.hprequested - hp_free)
+ if hp_nr_new > self.hpmax:
+ hp_nr_new = self.hpmax
+
+ nr_hugepage_cmd = "echo '{}' | sudo -S bash -c \"echo 'vm.nr_hugepages={}' >>".\
+ format(self.pwd, hp_nr_new)
+ nr_hugepage_cmd += " /etc/sysctl.conf\""
+ self.client.run(nr_hugepage_cmd, pty=True)
+
+ dict_cmd = "cat /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages"
+ dict_check = int(self.client.execute(dict_cmd)[0])
+ if dict_check == 0:
+ node1_hugepage_cmd = "echo '{}' | sudo -s bash -c \"echo 0 >".format(self.pwd)
+ node1_hugepage_cmd += " /sys/devices/system/node/node1/"
+ node1_hugepage_cmd += "hugepages/hugepages-2048kB/nr_hugepages\""
+ return vsperf_pb2.StatusReply(
+ message="DUT-Host system configured with {} No of Hugepages".format(hp_nr_new))
+
+ def CheckDependecies(self, request, context):
+ """
+ Check and Install required packages on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ packages = ['python34-tkinter', 'sysstat', 'bc']
+ for pkg in packages:
+ # pkg_check_cmd = "dpkg -s {}".format(pkg) for ubuntu
+ pkg_check_cmd = "rpm -q {}".format(pkg)
+ pkg_cmd_response = self.client.execute(pkg_check_cmd)[0]
+ if pkg_cmd_response == 1:
+ install_pkg_cmd = "echo '{}' | sudo -S yum install -y {}".format(
+ self.pwd, pkg)
+ #install_pkg_cmd = "echo '{}' | sudo -S apt-get install -y {}".format(self.pwd,pkg)
+ self.client.run(install_pkg_cmd, pty=True)
+
+ return vsperf_pb2.StatusReply(message="Python34-tkinter, sysstat and bc Packages"\
+ "are now Installed")
+
+def serve():
+ """
+ Start servicing the client
+ """
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ vsperf_pb2_grpc.add_ControllerServicer_to_server(
+ VsperfController(), server)
+ server.add_insecure_port('[::]:50051')
+ server.start()
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except (SystemExit, KeyboardInterrupt, MemoryError, RuntimeError):
+ server.stop(0)
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/tools/docker/deployment/interactive/docker-compose.yml b/tools/docker/deployment/interactive/docker-compose.yml
new file mode 100644
index 00000000..cbf894c5
--- /dev/null
+++ b/tools/docker/deployment/interactive/docker-compose.yml
@@ -0,0 +1,21 @@
+version: '2'
+
+services:
+ deploy:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ ports:
+ - 50051:50051
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/docs/architecture.txt b/tools/docker/docs/architecture.txt
new file mode 100644
index 00000000..a0b29e05
--- /dev/null
+++ b/tools/docker/docs/architecture.txt
@@ -0,0 +1,70 @@
+Architecture diagrams of the VSPERF-Containers.
+
+Figure-1: Deploy-Auto
+ ++++++++++++ +++++++++++++++++++++++++++
+ +Container + + +
+ + + + +
+ + + |--- + DUT - HOST +
+ + + | + +
+ + + | + +
+ + Deploy + | +++++++++++++++++++++++++++
+ + [AUTO] + --|
+ + + | +++++++++++++++++++++++++++
+ + + | + +
+ + + | + +
+ + + |--- + TGEN (HOST) +
+ + + + +
+ + + + +
+ ++++++++++++ +++++++++++++++++++++++++++
+
+Figure-2: Deploy-Interactive
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++
+ + + + Container+ + +
+ + + + + + +
+ + + + + |----+ DUT - HOST +
+ + + + + | + +
+ + + + + | + +
+ + CLIENT + + Deploy + | +++++++++++++++++++++++++++
+ + +<------>+ [INTER +---|
+ + + + ACTIVE]+ | +++++++++++++++++++++++++++
+ + + + + | + +
+ + + + + | + +
+ + + + + |----+ TGEN (HOST) +
+ + + + + + +
+ + + + + + +
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++
+
+Figure-3: TestControl Auto
+ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+ + Container+ + + +Container +
+ + + + + + +
+ + + |--- + DUT - HOST +----| + +
+ + + | + + | + +
+ + + | + + | + +
+ + Test + | +++++++++++++++++++++++++++ | + Results +
+ + Control +---| |---+ +
+ + [AUTO] + | +++++++++++++++++++++++++++ | + +
+ + + | + + | + +
+ + + | + + | + +
+ + + |--- + TGEN (HOST) +----| + +
+ + + + + + +
+ + + + + + +
+ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+
+Figure-4: TestControl Interactive
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+ + + +Container + + + + Container+
+ + + + + + + + +
+ + + + + |----+ DUT - HOST +----| + +
+ + + + + | + + | + +
+ + + + + | + + | + +
+ + CLIENT + + Test + | + + | + Results +
+ + + + Control + | +++++++++++++++++++++++++++ | + +
+ + +<------>+ [INTER +---| |---+ +
+ + + + ACTIVE]+ | +++++++++++++++++++++++++++ | + +
+ + + + + | + + | + +
+ + + + + | + + | + +
+ + + + + |----+ TGEN (HOST) +----| + +
+ + + + + + + + +
+ + + + + + + + +
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
diff --git a/tools/docker/docs/client.rst b/tools/docker/docs/client.rst
new file mode 100644
index 00000000..1483ff40
--- /dev/null
+++ b/tools/docker/docs/client.rst
@@ -0,0 +1,99 @@
+VSPERF Client
+--------------
+VSPERF client is a simple python application, which can be used to work with interactive deploy and testcontrol containers.
+
+============
+Description
+============
+
+VSPERF client is used for both set-up of DUT-Host and TGen-Host as well as to run multiple tests. User can perform different operations by selecting the available options and their sub-options.
+
+VSPERF client provides following options to User.
+
+* Establish Connections
+This option allows user to initialize the connections.
+
+[0]Connect to DUT Host: It will establish connection with DUT-HOST. DUT-HOST refers to system where the DUT - vswitch and vnfs - run. The vsperf application also runs on DUT-HOST.
+[1]Connect to Tgen Host: This option will establish connection with TGEN-HOST. TGEN-HOST refers to system where the traffic generator runs. As of now, only T-Rex is support for installation and configuration.
+
+* Installation
+After establishing the connections, user can perform installations to set up the test environment. Under this, we have 3 options:
+
+[0]Install VSPERF : This option will first check either vsperf is installed on DUT-Host or not. If VSPERF is not installed, it will perform VSPERF installation process on DUT-Host
+
+[1]Install TGen: This option will check whether t-rex is installed on Tgen-host or not. If t-rex is already installed then it will also check either is working fine or not. If t-rex is not installed, then configured version of t-rex will be installed.
+
+[2]Install Collectd: This is option will install collectd on DUT-Host.
+
+* Upload Configuration Files
+Once the installation process is completed, User can upload configuration files. Two uploads are supported:
+
+[0]Upload TGen Configuration File: It will upload trex_cfg.yaml configuration file to Tgen-Host.[User can either specify path in vsperfclient.conf or provide path during runtime for the trex_cfg.yaml]. This file will be used to run T-rex traffic generator.
+
+[1]Upload Collectd Configuration File: This is option is use to uplaod collectd configuration file.
+
+* Manage DUT-System Configuration
+Following upload of configuration files, user can perform some basic configuration of the DUT-Host. The available options are:
+
+[0]DUT-Host hugepages configuration: This option allows User to manage hugepages of DUT-Host. [User need to provide values for HpMax and HpRequested in vsperfclient.conf]
+
+[1]Check VSPERF dependencies: Using this option user can check library dependencies on DUT-Host.
+
+* Run Test
+Once the above steps are completed, user can perform sanity checks and run the tests. The available options are:
+
+[0]Upload Test Configuration File : This option will upload the vsperf test configuration file.
+
+[1]Perform Sanity Checks before running tests : This option has certain sub-options, user must perform all sanity checks before running test. User may not able to start the Vsperf test until all sanity checks are passed. The sanity check option contains following sub-options: (a) check VSPERF is installed correctly, (b) check if VNF path is available on DUT-Host, (c) check if configured NIC-PCIs is available on TGen and DUT hosts (d) check if Collectd is installed correctly (e) check if connection between DUT-Host and TGen-Host is OK, (f) check CPU-allocation on DUT-host is done correctly.
+
+[2]Check if DUT-HOST is available : User can check if DUT-Host is available for Test or not. If DUT-Host is available for performing Vsperf user can go ahead and start performing test.
+
+[3]Start TGen : This option will start t-rex traffic generator for test.
+
+[4]Start Beats : This option will start beats on DUT-Host
+
+[5]Start Test : If all the sanity checks are passed, and traffic generator is running, then this option will start the vsperf test. Whatever test is defined in vsperfclient.conf will be performed. Note: User can also perform multiple tests.
+
+* Test Status
+Once user has started a test, he can check on the status. The following sub-options are available.
+
+[0]Test status : Check whether the test has completed successfully or failed. If user is running multiple tests, they can identify the failed test-name using this option.
+
+[1]Get Test Configuration file from DUT-host: User can also able to read the test configuration file content they uploaded.
+
+* Clean-Up
+When all tests are done, user can perform cleanup of the systems, using the following sub-options:
+
+[0]Remove VSPERF: This option will completely remove the vsperfenv on DUT-Host
+
+[1]Terminate VSPERF: This option will keep vsperfenv on DUT-Host. If there is any process still running related with the vsperf then this option will terminate all those processes like ovs-vswitchd,ovsdb-server,vppctl,stress,qemu-system-x86_64.
+
+[2]Remove Results from DUT-Host : This is option will remove all the test results located in /tmp folder.
+
+[3]Remove Uploaded Configuration Files: This option will remove all uploaded test configuration file
+
+[4]Remove Collectd: This option will uninstall collectd from the DUT-Host
+
+[5]Remove Everything: This option will execute all the options listed above.
+
+=============================
+How To Use
+=============================
+
+Prerequisites before running vsperf client
+^^^^^^^^^^^^^^^^^^^^^
+
+1. User must install grpcio, grpcio-tools and configparser for python3 environment.
+
+2. User has to prepare the client-configuration file by providing appropriate values.
+
+3. User has to prepare the configuration files that will be uploaded to either DUT-host or TGen-Host systems.
+
+4. T-rex and collectd configuration files should be named as trex_cfg.yaml and collectd.conf, respectively.
+
+5. Start the deployment-interactive container and testcontrol-interactive container, which will run the servers on ports 50051 and 50052, respectively.
+
+Run vsperf client
+^^^^^^^^^^^^^^^^^^^^^
+Locate and run the vsperf_client.py with python3.
+
diff --git a/tools/docker/docs/test.rst b/tools/docker/docs/test.rst
new file mode 100644
index 00000000..d002ddbe
--- /dev/null
+++ b/tools/docker/docs/test.rst
@@ -0,0 +1,86 @@
+Before using VSPERF client and VSPERF containers, user must run the prepare.sh script which will prepare their local environment.
+
+locate vsperf-docker/prepare.sh and run:
+bash prepare.sh
+
+VSPERF Containers
+------------------
+
+============
+deployment
+============
+Users have two choices for deployment, auto and interactive.
+
+1. auto
+^^^^^^^^^^^^^^^^^^^^^
+This auto deployment container will do everything related with VSPERF set-up automatically. It includes, installation of VSPERF, T-rex and collectd, uploading collectd configuration file on DUT-Host, uploading t-rex configuration files and starting the t-rex traffic generator. Before installing vsperf and t-rex, the container will perform verification process, which includes basic sanity checks such as checking for old installations, huge-page checks, necessary folders and software, etc. User should modify the t-rex(trex_cfg.yaml)and collectd(collectd.conf) configuration files depending on their needs before running the containers.
+
+
+Pre-Deployment Configuration
+******************
+User has to provide the following in list.env file:
+1.DUT-Host and TGen-Host related credentials and IP address
+2.Values for HUGEPAGE_MAX and HUGEPAGE_REQUESTED
+3.Option for sanity check - YES or NO.
+
+Build
+******************
+Use **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container's service with **docker-compose run deploy** command.
+
+
+2. interactive
+^^^^^^^^^^^^^^^^^^^^^
+The interactive container must run before using the vsperf client. It will start the server on port 50051 for the vsperf client to send commands. Deployment interactive container handles all vsperf set-up related commands from vsperf client and performs the corresponding operation.
+
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container with **docker-compose up deploy** command.
+Once the server is running user have to run testcontrol interactive container and then user can run the vsperf client.
+
+
+===============
+testcontrol
+===============
+For testcontrol too, user has two choices- auto and interactive.
+
+1. auto
+^^^^^^^^^^^^^^^^^^^^^
+This auto testcontrol container will perform test automatically on DUT-Host. This container also performing sanity checks automatically. User will also able to get test-status for all tests. If all sanity check doesn't satisfy then test will not run and container gracefully stopped. User can modify the vsperf.conf file which will be upload on DUT-Host automatically by container and used for performing the vsperf test.
+
+Pre-Deployment Configuration
+******************
+1.User have to provide all the DUT-Host credentials and IP address of TGen-host in list.env.
+2.Provide name for VSPERF_TESTS and VSPERF_CONFFILE in list.env.
+3.Provide option for VSPERF_TRAFFICGEN_MODE and CLEAN_UP [YES or NO] in list.env file.
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container's service with **docker-compose run testcontrol** command.
+User can observe the results and perform the another test by just changing the VSPERF_TEST environment variable in list.env file.
+
+
+2. interactive
+^^^^^^^^^^^^^^^^^^^^^
+This interactive testcontrol container must run before using the vsperf client. It will start the server on port 50052 for the vsperf client. This testcontrol interactive container handle all the test related commands from vsperf client and do the operations. Testcontrol interactive container running server on localhost port 50052.
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container with **docker-compose up testcontrol** command.
+After running this container user can use the vsperf client.
diff --git a/tools/docker/libs/proto/__init__.py b/tools/docker/libs/proto/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/libs/proto/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/libs/proto/vsperf.proto b/tools/docker/libs/proto/vsperf.proto
new file mode 100755
index 00000000..0fc45df3
--- /dev/null
+++ b/tools/docker/libs/proto/vsperf.proto
@@ -0,0 +1,109 @@
+// Copyright 2018-2019 .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+syntax = "proto3";
+package vsperf;
+
+service Controller {
+ rpc HostConnect (HostInfo) returns (StatusReply) {}
+ rpc VsperfInstall (HostInfo) returns (StatusReply) {}
+ rpc TGenHostConnect (HostInfo) returns (StatusReply) {}
+ rpc TGenInstall (HostVerInfo) returns (StatusReply) {}
+ rpc TGenUploadConfigFile (stream ConfFile) returns (UploadStatus) {}
+ rpc CollectdInstall (HostInfo) returns (StatusReply) {}
+ rpc CollectdUploadConfig (stream ConfFile) returns (UploadStatus) {}
+ rpc DutHugepageConfig (HugepConf) returns (StatusReply) {}
+ rpc CheckDependecies (HostInfo) returns (StatusReply) {}
+ rpc UploadConfigFile (ConfFileTest) returns (UploadStatus) {}
+ rpc StartTest (ControlVsperf) returns (StatusReply) {}
+ rpc TestStatus (StatusQuery) returns (StatusReply) {}
+ rpc StartTGen (ControlTGen) returns (StatusReply) {}
+ rpc StartBeats (HostInfo) returns (StatusReply) {}
+ rpc RemoveVsperf (HostInfo) returns (StatusReply) {}
+ rpc RemoveResultFolder (HostInfo) returns (StatusReply) {}
+ rpc RemoveUploadedConfig (HostInfo) returns (StatusReply) {}
+ rpc RemoveCollectd (HostInfo) returns (StatusReply) {}
+ rpc RemoveEverything (HostInfo) returns (StatusReply) {}
+ rpc TerminateVsperf (HostInfo) returns (StatusReply) {}
+ rpc SanityNICCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityCollectdCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityVNFpath (HostInfo) returns (StatusReply) {}
+ rpc SanityVSPERFCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityTgenConnDUTCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityCPUAllocationCheck (HostInfo) returns (StatusReply) {}
+ rpc DUTvsperfTestAvailability (HostInfo) returns (StatusReply) {}
+ rpc GetVSPERFConffromDUT (HostInfo) returns (StatusReply) {}
+}
+
+message ControlVsperf {
+ string testtype = 1;
+ string conffile = 2;
+}
+
+message ControlTGen {
+ string params = 1;
+ string conffile = 2;
+}
+
+message LogDir {
+ string directory = 1;
+}
+
+message ConfFile {
+ bytes Content = 1;
+}
+
+message ConfFileTest {
+ string Content = 1;
+ string Filename = 2;
+}
+
+message HostInfo {
+ string ip = 1;
+ string uname = 2;
+ string pwd = 3;
+}
+
+message HugepConf {
+ string hpmax = 1;
+ string hprequested = 2;
+}
+
+message HostVerInfo {
+ string ip = 1;
+ string uname = 2;
+ string pwd = 3;
+ string version = 4;
+}
+
+message StatusQuery {
+ string testtype = 1;
+}
+
+message StatusReply {
+ string message = 1;
+}
+
+enum UploadStatusCode {
+ Unknown = 0;
+ Ok = 1;
+ Failed = 2;
+}
+
+message UploadStatus {
+ string Message = 1;
+ UploadStatusCode Code = 2;
+}
+
diff --git a/tools/docker/libs/utils/__init__.py b/tools/docker/libs/utils/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/libs/utils/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/libs/utils/exceptions.py b/tools/docker/libs/utils/exceptions.py
new file mode 100644
index 00000000..c4e0e097
--- /dev/null
+++ b/tools/docker/libs/utils/exceptions.py
@@ -0,0 +1,65 @@
+"""
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+#pylint: disable=import-error
+from oslo_utils import excutils
+
+
+class VsperfCException(Exception):
+ """Base VSPERF-C Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VsperfCException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VsperfCException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """ #pylint: disable=no-self-use
+ return False
+
+
+class InvalidType(VsperfCException):
+ """Invalid type"""
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
+class SSHError(VsperfCException):
+ """ssh error"""
+ message = '%(error_msg)s'
+
+
+class SSHTimeout(SSHError):
+ """ssh timeout""" #pylint: disable=unnecessary-pass
+ pass
diff --git a/tools/docker/libs/utils/ssh.py b/tools/docker/libs/utils/ssh.py
new file mode 100644
index 00000000..a4df13b0
--- /dev/null
+++ b/tools/docker/libs/utils/ssh.py
@@ -0,0 +1,546 @@
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#pylint: disable=I,C,R,locally-disabled
+#pylint: disable=import-error,arguments-differ
+
+# this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+ ssh = sshclient.SSH("root", "example.com", port=33)
+ status, stdout, stderr = ssh.execute("ps ax")
+ if status:
+ raise Exception("Command failed with non-zero status.")
+ print(stdout.splitlines())
+
+Execute command with huge output:
+
+ class PseudoFile(io.RawIOBase):
+ def write(chunk):
+ if "error" in chunk:
+ email_admin(chunk)
+
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+ ssh = sshclient.SSH("user", "example.com")
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
+
+Upload file:
+
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+ eventlet.monkey_patch(select=True, time=True)
+ or
+ eventlet.monkey_patch()
+ or
+ sshclient = eventlet.import_patched("vsperf.ssh")
+
+"""
+from __future__ import print_function
+import io
+import logging
+import os
+import re
+import select
+import socket
+import time
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+
+# When building container change this to
+import utils.exceptions as exceptions
+#else keep it as
+#import exceptions
+# When building container change this to
+from utils.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+#else keep it as
+#from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+
+
+def convert_key_to_str(key):
+ if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+ return key
+ k = io.StringIO()
+ key.write_private_key(k)
+ return k.getvalue()
+
+
+# class SSHError(Exception):
+# pass
+#
+#
+# class SSHTimeout(SSHError):
+# pass
+
+
+class SSH(object):
+ """Represent ssh connection."""
+ #pylint: disable=no-member
+
+ SSH_PORT = paramiko.config.SSH_PORT
+ DEFAULT_WAIT_TIMEOUT = 120
+
+ @staticmethod
+ def gen_keys(key_filename, bit_count=2048):
+ rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+ rsa_key.write_private_key_file(key_filename)
+ print("Writing %s ..." % key_filename)
+ with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+ pubkey_file.write(rsa_key.get_name())
+ pubkey_file.write(' ')
+ pubkey_file.write(rsa_key.get_base64())
+ pubkey_file.write('\n')
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else
+ # refers to the calling class
+ # i.e. the subclass, not the superclass
+ return SSH
+
+ @classmethod
+ def get_arg_key_map(cls):
+ return {
+ 'user': ('user', NON_NONE_DEFAULT),
+ 'host': ('ip', NON_NONE_DEFAULT),
+ 'port': ('ssh_port', cls.SSH_PORT),
+ 'pkey': ('pkey', None),
+ 'key_filename': ('key_filename', None),
+ 'password': ('password', None),
+ 'name': ('name', None),
+ }
+
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None):
+ """Initialize SSH client.
+
+ :param user: ssh username
+ :param host: hostname or ip address of remote ssh server
+ :param port: remote ssh port
+ :param pkey: RSA or DSS private key string or file object
+ :param key_filename: private key filename
+ :param password: password
+ """
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
+
+ self.wait_timeout = self.DEFAULT_WAIT_TIMEOUT
+ self.user = user
+ self.host = host
+ # everybody wants to debug this in the caller, do it here instead
+ self.log.debug("user:%s host:%s", user, host)
+
+ # we may get text port from YAML, convert to int
+ self.port = try_int(port, self.SSH_PORT)
+ self.pkey = self._get_pkey(pkey) if pkey else None
+ self.password = password
+ self.key_filename = key_filename
+ self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
+
+ @classmethod
+ def args_from_node(cls, node, overrides=None, defaults=None):
+ if overrides is None:
+ overrides = {}
+ if defaults is None:
+ defaults = {}
+
+ params = ChainMap(overrides, node, defaults)
+ return make_dict_from_map(params, cls.get_arg_key_map())
+
+ @classmethod
+ def from_node(cls, node, overrides=None, defaults=None):
+ return cls(**cls.args_from_node(node, overrides, defaults))
+
+ def _get_pkey(self, key):
+ if isinstance(key, six.string_types):
+ key = six.moves.StringIO(key)
+ errors = []
+ for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+ try:
+ return key_class.from_private_key(key)
+ except paramiko.SSHException as e:
+ errors.append(e)
+ raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors)
+
+ @property
+ def is_connected(self):
+ return bool(self._client)
+
+ def _get_client(self):
+ if self.is_connected:
+ return self._client
+ try:
+ self._client = paramiko.SSHClient()
+ self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.connect(self.host, username=self.user,
+ port=self.port, pkey=self.pkey,
+ key_filename=self.key_filename,
+ password=self.password,
+ allow_agent=False, look_for_keys=False,
+ timeout=1)
+ return self._client
+ except Exception as e:
+ message = ("Exception %(exception_type)s was raised "
+ "during connect. Exception value is: %(exception)r" %
+ {"exception": e, "exception_type": type(e)})
+ self._client = False
+ raise exceptions.SSHError(error_msg=message)
+
+ def _make_dict(self):
+ return {
+ 'user': self.user,
+ 'host': self.host,
+ 'port': self.port,
+ 'pkey': self.pkey,
+ 'key_filename': self.key_filename,
+ 'password': self.password,
+ 'name': self.name,
+ }
+
+ def copy(self):
+ return self.get_class()(**self._make_dict())
+
+ def close(self):
+ if self._client:
+ self._client.close()
+ self._client = False
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ """Execute specified command on the server.
+
+ :param cmd: Command to be executed.
+ :type cmd: str
+ :param stdin: Open file or string to pass to stdin.
+ :param stdout: Open file to connect to stdout.
+ :param stderr: Open file to connect to stderr.
+ :param raise_on_error: If False then exit code will be return. If True
+ then exception will be raized if non-zero code.
+ :param timeout: Timeout in seconds for command execution.
+ Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
+ :param pty: Request a pseudo terminal for this connection.
+ This allows passing control characters.
+ Default False.
+ :type pty: bool
+ """
+
+ client = self._get_client()
+
+ if isinstance(stdin, six.string_types):
+ stdin = six.moves.StringIO(stdin)
+
+ return self._run(client, cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, raise_on_error=raise_on_error,
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open, pty=pty)
+
+ def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+
+ transport = client.get_transport()
+ session = transport.open_session()
+ if pty:
+ session.get_pty()
+ session.exec_command(cmd)
+ start_time = time.time()
+
+ # encode on transmit, decode on receive
+ data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+ stderr_data = None
+
+ # If we have data to be sent to stdin then `select' should also
+ # check for stdin availability.
+ if stdin and not stdin.closed:
+ writes = [session]
+ else:
+ writes = []
+
+ while True:
+ # Block until data can be read/write.
+ e = select.select([session], writes, [session], 1)[2]
+
+ if session.recv_ready():
+ data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+ self.log.debug("stdout: %r", data)
+ if stdout is not None:
+ stdout.write(data)
+ continue
+
+ if session.recv_stderr_ready():
+ stderr_data = encodeutils.safe_decode(
+ session.recv_stderr(4096), 'utf-8')
+ self.log.debug("stderr: %r", stderr_data)
+ if stderr is not None:
+ stderr.write(stderr_data)
+ continue
+
+ if session.send_ready():
+ if stdin is not None and not stdin.closed:
+ if not data_to_send:
+ stdin_txt = stdin.read(4096)
+ if stdin_txt is None:
+ stdin_txt = ''
+ data_to_send = encodeutils.safe_encode(
+ stdin_txt, incoming='utf-8')
+ if not data_to_send:
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
+
+ if session.exit_status_ready():
+ break
+
+ if timeout and (time.time() - timeout) > start_time:
+ message = ('Timeout executing command %(cmd)s on host %(host)s'
+ % {"cmd": cmd, "host": self.host})
+ raise exceptions.SSHTimeout(error_msg=message)
+ if e:
+ raise exceptions.SSHError(error_msg='Socket error')
+
+ exit_status = session.recv_exit_status()
+ if exit_status != 0 and raise_on_error:
+ fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+ details = fmt % {"cmd": cmd, "status": exit_status}
+ if stderr_data:
+ details += " Last stderr data: '%s'." % stderr_data
+ raise exceptions.SSHError(error_msg=details)
+ return exit_status
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ """Execute the specified command on the server.
+
+ :param cmd: (str) Command to be executed.
+ :param stdin: (StringIO) Open file to be sent on process stdin.
+ :param timeout: (int) Timeout for execution of the command.
+ :param raise_on_error: (bool) If True, then an SSHError will be raised
+ when non-zero exit code.
+
+ :returns: tuple (exit_status, stdout, stderr)
+ """
+ stdout = six.moves.StringIO()
+ stderr = six.moves.StringIO()
+
+ exit_status = self.run(cmd, stderr=stderr,
+ stdout=stdout, stdin=stdin,
+ timeout=timeout, raise_on_error=raise_on_error)
+ stdout.seek(0)
+ stderr.seek(0)
+ return exit_status, stdout.read(), stderr.read()
+
+ def wait(self, timeout=None, interval=1):
+ """Wait for the host will be available via ssh."""
+ if timeout is None:
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self.execute("uname")
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
+
+ def get(self, remote_path, local_path='/tmp/', recursive=True):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.get(remote_path, local_path, recursive)
+
+ # keep shell running in the background, e.g. screen
+ def send_command(self, command):
+ client = self._get_client()
+ client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ self.run("&& ".join(cmd), stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)
+
+ def put_file_obj(self, file_obj, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.putfo(file_obj, remotepath)
+ if mode is not None:
+ sftp.chmod(remotepath, mode)
+
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+ @classmethod
+ def get_arg_key_map(cls):
+ arg_key_map = super(AutoConnectSSH, cls).get_arg_key_map()
+ arg_key_map['wait'] = ('wait', True)
+ return arg_key_map
+
+ # always wait or we will get OpenStack SSH errors
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None, wait=True):
+ super(AutoConnectSSH, self).__init__(user, host, port, pkey,
+ key_filename, password, name)
+ if wait and wait is not True:
+ self.wait_timeout = int(wait)
+
+ def _make_dict(self):
+ data = super(AutoConnectSSH, self)._make_dict()
+ data.update({
+ 'wait': self.wait_timeout
+ })
+ return data
+
+ def _connect(self):
+ if not self.is_connected:
+ interval = 1
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self._get_client()
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def drop_connection(self):
+ """ Don't close anything, just force creation of a new client """
+ self._client = False
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ self._connect()
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ self._connect()
+ return super(AutoConnectSSH, self).run(cmd, stdin, stdout,
+ stderr, raise_on_error,
+ timeout, keep_stdin_open, pty)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ self._connect()
+ return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+ def put_file(self, local_path, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file(local_path,
+ remote_path, mode)
+
+ def put_file_obj(self, file_obj, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file_obj(file_obj,
+ remote_path, mode)
+
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+ @staticmethod
+ def get_class():
+ # must return static class name,
+ # anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return AutoConnectSSH
diff --git a/tools/docker/libs/utils/utils.py b/tools/docker/libs/utils/utils.py
new file mode 100644
index 00000000..d945381e
--- /dev/null
+++ b/tools/docker/libs/utils/utils.py
@@ -0,0 +1,41 @@
+"""
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+
+
+NON_NONE_DEFAULT = object()
+
+
+def get_key_with_default(data, key, default=NON_NONE_DEFAULT):
+ """get default key"""
+ value = data.get(key, default)
+ if value is NON_NONE_DEFAULT:
+ raise KeyError(key)
+ return value
+
+
+def make_dict_from_map(data, key_map):
+ """mapping dict"""
+ return {dest_key: get_key_with_default(data, src_key, default)
+ for dest_key, (src_key, default) in key_map.items()}
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ #pylint: disable=invalid-name
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
diff --git a/tools/docker/prepare.sh b/tools/docker/prepare.sh
new file mode 100755
index 00000000..7afdbd6b
--- /dev/null
+++ b/tools/docker/prepare.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+#This script will used to prepare local host to use vsperf client and containers.
+
+#first change the permission for prepare.sh file
+chmod a+x prepare.sh
+
+#Install python3 for local host
+sudo apt-get install python3
+
+#Install python3-pip
+sudo apt-get install python3-pip
+
+#Install grpcio, grpcio-tools and configparser
+pip3 install grpcio==1.4.0 grpcio-tools==1.4.0 configparser
+
+# Build .proto to create python library
+cd libs/proto && python3 -m grpc_tools.protoc -I./ --python_out=. --grpc_python_out=. vsperf.proto
+sed -i 's/import vsperf_pb2 as vsperf__pb2/from . import vsperf_pb2 as vsperf__pb2/g' vsperf_pb2_grpc.py
+cd ../..
+
+#copy libs/proto and libs/utils in deployment and testcontrol container at appropriate location.
+cp -r libs/proto deployment/interactive/controller/vsperf/proto
+cp -r libs/utils deployment/interactive/controller/vsperf/utils
+cp -r libs/proto testcontrol/interactive/controller/vsperf/proto
+cp -r libs/utils testcontrol/interactive/controller/vsperf/utils
+
+#copy libs/utils into deployment and testcontrol auto container at appropriate location.
+cp -r libs/utils deployment/auto/controller/vsperf/utils
+cp -r libs/utils testcontrol/auto/controller/vsperf/utils
+
+#copy libs/proto into client
+cp -r libs/proto client/proto
diff --git a/tools/docker/results/README.md b/tools/docker/results/README.md
new file mode 100644
index 00000000..15d28b15
--- /dev/null
+++ b/tools/docker/results/README.md
@@ -0,0 +1,48 @@
+## Please set the limit on mmap counts equal to 262144 or more.
+
+There are two options. Run this command:
+```sh
+
+sysctl -w vm.max_map_count = 262144
+
+```
+or, to set it permanently, update the
+```sh
+
+vm.max_map_count
+
+```
+setting in
+
+```sh
+
+/etc/sysctl.conf
+
+```
+
+### Update the IP address.
+You may want to modify the IP address from 0.0.0.0 to appropriate host-ip in
+```sh
+docker-compose.yml
+
+```
+
+### Changes made to sebp/elk
+The vsperf/elk image is same as sebp/elk with a minor change - the inclusion of collectd codec to logstash.
+In the Dockerfile of sebp/elk, under logstash configuration, following lines are added:
+```sh
+ WORKDIR ${LOGSTASH_HOME}
+ RUN gosu logstash bin/logstash-plugin install logstash-codec-collectd
+ WORKDIR /
+
+```
+
+The resultsdb directory contains the source from Dovetail/Dovetail-webportal project.
+Once the results container is deployed, please run the python script as follows, to ensure that results can be pushed and queried correctly.
+```sh
+python init_db.py host_ip_address testapi_port
+```
+For example, if the host on which the container is running is 10.10.120.22, and container is exposing 8000 as the port, the command should be:
+```sh
+python init_db.py 10.10.120.22 8000
+```
diff --git a/tools/docker/results/docker-compose.yml b/tools/docker/results/docker-compose.yml
new file mode 100644
index 00000000..87ba7fc0
--- /dev/null
+++ b/tools/docker/results/docker-compose.yml
@@ -0,0 +1,80 @@
+version: '3'
+volumes:
+ elk-data:
+ influx-data:
+ grafana-data:
+ mongo-data:
+ jupyter-data:
+ testapi-logs:
+services:
+ influxdb:
+ image: influxdb:latest
+ ports:
+ - "25826:25826/udp"
+ - "25826:25826"
+ - "8083:8083"
+ - "8086:8086"
+ expose:
+ - "25826"
+ - "8086"
+ - "8083"
+ volumes:
+ - influx-data:/var/lib/influxdb
+ grafana:
+ image: opnfv/barometer-grafana
+ volumes:
+ - grafana-data:/var/lib/grafana
+ - ./grafana/dashboards:/opt/grafana/dashboards
+ ports:
+ - "3000:3000"
+ elk:
+ image: vsperf/elk
+ ports:
+ - "5601:5601"
+ - "9200:9200"
+ - "5044:5044"
+ volumes:
+ - elk-data:/var/lib/elasticsearch
+ - ./logstash/pipeline/30-output.conf:/etc/logstash/conf.d/30-output.conf
+ - ./logstash/pipeline/02-beats-input.conf:/etc/logstash/conf.d/02-beats-input.conf
+ - ./logstash/pipeline/20-collectd-input.conf:/etc/logstash/conf.d/20-collectd-input.conf
+ environment:
+ - discovery.type=single-node
+ mongo:
+ image: mongo:3.2.1
+ ports:
+ - "27017:27017"
+ volumes:
+ - mongo-data:/data/db
+ container_name: opnfv-mongo
+ testapi:
+ image: opnfv/testapi:latest
+ container_name: opnfv-testapi
+ volumes:
+ - testapi-logs:/home/testapi/logs
+ environment:
+ - mongodb_url=mongodb://opnfv-mongo:27017/
+ - base_url=http://0.0.0.0:8000
+ ports:
+ - "8000:8000"
+ - "8001:8001"
+ links:
+ - mongo
+ jupyter:
+ build:
+ context: ./jupyter
+ ports:
+ - "8888:8888"
+ links:
+ - postgres
+ volumes:
+ - ./notebooks:/notebooks
+ - ./notebooks/testresult-analysis.ipynb:/notebooks/testresult-analysis.ipynb
+ - jupyter-data:/data
+ postgres:
+ image: postgres
+ restart: always
+ environment:
+ POSTGRES_USER: data
+ POSTGRES_PASSWORD: data
+ POSTGRES_DB: data
diff --git a/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json b/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json
new file mode 100644
index 00000000..ef0b32a1
--- /dev/null
+++ b/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json
@@ -0,0 +1,1291 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": 3,
+ "links": [],
+ "refresh": "5s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": 234,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "Total CPU usage of container",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 1,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Total",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "hide": false,
+ "measurement": "cpu_usage_total",
+ "orderByTime": "ASC",
+ "policy": "monitor",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Total Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "Core 0": "#0a50a1",
+ "Core 1": "#890f02",
+ "Core 2": "#f9934e",
+ "Core 3": "#3f6833"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "CPU usage per core",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 2,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "scopedVars": {
+ "core": {
+ "selected": false,
+ "text": "0",
+ "value": "0"
+ }
+ },
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Core $tag_instance",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "instance"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "cpu_usage_per_cpu",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT difference(\"value\") / 1000000000 FROM \"monitor\".\"cpu_usage_per_cpu\" WHERE (\"container_name\" =~ /^$container$/) AND $timeFilter GROUP BY \"instance\"",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Usage per Core",
+ "tooltip": {
+ "shared": false,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "Kernel": "#890f02",
+ "User": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "CPU usage per User/Kernel",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 3,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "User",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "hide": false,
+ "measurement": "cpu_usage_user",
+ "orderByTime": "ASC",
+ "policy": "monitor",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "Kernel",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "cpu_usage_system",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Usage Breakdown",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {
+ "Hot": "#890f02",
+ "Total": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Memory Usage",
+ "fill": 1,
+ "height": "",
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Total",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "memory_usage",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ " / 1024/1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "Hot",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "memory_working_set",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ " / 1024/1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Memory",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decmbytes",
+ "label": "Megabytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {
+ "rx_bytes": "#890f02",
+ "tx_bytes": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Rx/Tx",
+ "fill": 0,
+ "height": "",
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "rx_bytes",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "rx_bytes",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "tx_bytes",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "tx_bytes",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Throughput",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "KBs",
+ "label": "Kilobytes per second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "rx_errors": "#890f02",
+ "tx_bytes": "#0a50a1",
+ "tx_errors": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "height": "",
+ "id": 6,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "rx_errors",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "rx_errors",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "tx_errors",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "tx_errors",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Errors per second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Network",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 30,
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": null,
+ "decimals": 2,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "200px",
+ "hideTimeOverride": false,
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 6,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "fs_usage",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "timeFrom": "1m",
+ "title": "Storage usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Storage",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "columns": [],
+ "datasource": "influxdb",
+ "description": "Runtime table",
+ "fontSize": "80%",
+ "height": "",
+ "id": 8,
+ "links": [],
+ "pageSize": 5,
+ "scroll": false,
+ "showHeader": true,
+ "sort": {
+ "col": 0,
+ "desc": true
+ },
+ "span": 12,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "MM/DD/YY h:mm:ss a",
+ "pattern": "Time",
+ "type": "date"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "alias": "$col",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "limit": "5",
+ "measurement": "runtime",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "Alloc"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Frees"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapAlloc"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapIdle"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapObjects"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapReleased"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapSys"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Lookups"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Mallocs"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "NumGC"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "NumGoroutine"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "PauseTotalNs"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Sys"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "TotalAlloc"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "timeFrom": "1m",
+ "title": "Runtime Metrics",
+ "transform": "timeseries_to_columns",
+ "type": "table"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "vsperf",
+ "container"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "tags": [],
+ "text": "nginx",
+ "value": "nginx"
+ },
+ "datasource": "influxdb",
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "container",
+ "options": [],
+ "query": "show tag values with key = container_name",
+ "refresh": 1,
+ "regex": "[a-zA-Z0-9_/]*",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "tags": [],
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "influxdb",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [],
+ "query": "show tag values with key = instance",
+ "refresh": 1,
+ "regex": "",
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Container Metrics",
+ "version": 12
+} \ No newline at end of file
diff --git a/tools/docker/results/jupyter/Dockerfile b/tools/docker/results/jupyter/Dockerfile
new file mode 100644
index 00000000..94f9bd36
--- /dev/null
+++ b/tools/docker/results/jupyter/Dockerfile
@@ -0,0 +1,16 @@
+FROM jupyter/scipy-notebook
+
+RUN python --version
+
+RUN conda install --quiet --yes -c \
+ conda-forge osmnx dask
+
+RUN pip install -U graphviz paramiko
+
+RUN echo "c.NotebookApp.token=''" >> $HOME/.jupyter/jupyter_notebook_config.py
+
+VOLUME /notebooks
+VOLUME /data
+
+RUN mkdir /data/results
+WORKDIR /notebooks
diff --git a/tools/docker/results/logstash/pipeline/02-beats-input.conf b/tools/docker/results/logstash/pipeline/02-beats-input.conf
new file mode 100644
index 00000000..a00d3f5b
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/02-beats-input.conf
@@ -0,0 +1,6 @@
+input {
+ beats {
+ port => 5044
+ ssl => false
+ }
+}
diff --git a/tools/docker/results/logstash/pipeline/20-collectd-input.conf b/tools/docker/results/logstash/pipeline/20-collectd-input.conf
new file mode 100644
index 00000000..990903f9
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/20-collectd-input.conf
@@ -0,0 +1,14 @@
+input {
+ udp {
+ port => 25826
+ buffer_size => 1452
+ type => collectd
+ codec => collectd { }
+ }
+}
+
+filter {
+ mutate {
+ remove_field => [ "host" ]
+ }
+}
diff --git a/tools/docker/results/logstash/pipeline/30-output.conf b/tools/docker/results/logstash/pipeline/30-output.conf
new file mode 100644
index 00000000..0e3161a8
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/30-output.conf
@@ -0,0 +1,7 @@
+output {
+ elasticsearch {
+ hosts => "http://localhost:9200"
+ manage_template => false
+ codec => collectd { }
+ }
+}
diff --git a/tools/docker/results/notebooks/testresult-analysis.ipynb b/tools/docker/results/notebooks/testresult-analysis.ipynb
new file mode 100644
index 00000000..6ce58dd8
--- /dev/null
+++ b/tools/docker/results/notebooks/testresult-analysis.ipynb
@@ -0,0 +1,783 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "hide_input": true
+ },
+ "source": [
+ "# OPNFV VSPERF\n",
+ "# Beyond Performance Metrics: Towards Causation Analysis"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### sridhar.rao@spirent.com and acm@research.att.com"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import packages\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import seaborn as sns\n",
+ "from graphviz import Digraph\n",
+ "import collections\n",
+ "import glob\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Get the results to analyze: \n",
+ "Getting Latest one, if ``directory_to_download`` is empty"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import paramiko\n",
+ "import tarfile\n",
+ "import os\n",
+ "from stat import S_ISDIR\n",
+ "RECV_BYTES = 4096\n",
+ "hostname = '10.10.120.24'\n",
+ "port = 22\n",
+ "uname='opnfv'\n",
+ "pwd='opnfv' \n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "client = paramiko.Transport((hostname, port))\n",
+ "client.connect(username=uname, password=pwd)\n",
+ "session = client.open_channel(kind='session')\n",
+ "directory_to_download = ''\n",
+ "\n",
+ "session.exec_command('ls /tmp | grep results')\n",
+ "if not directory_to_download:\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stdout_data:\n",
+ " line = stdout_data[0]\n",
+ " filenames = line.decode(\"utf-8\").rstrip('\\n').split('\\n')\n",
+ " filenames = sorted(filenames)\n",
+ " latest = filenames[-1]\n",
+ " directory_to_download = os.path.join('/tmp', latest).replace(\"\\\\\",\"/\")\n",
+ " print(directory_to_download)\n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "if directory_to_download:\n",
+ " # zip the collectd results to make the download faster\n",
+ " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + '/tmp/csv .'\n",
+ " session = client.open_channel(kind='session')\n",
+ " session.get_pty()\n",
+ " session.exec_command(zip_command)\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stderr_data:\n",
+ " print(stderr_data[0])\n",
+ " if stdout_data:\n",
+ " print(stdout_data[0])\n",
+ "\n",
+ " # Begin the actual downlaod\n",
+ " sftp = paramiko.SFTPClient.from_transport(client)\n",
+ " def sftp_walk(remotepath):\n",
+ " path=remotepath\n",
+ " files=[]\n",
+ " folders=[]\n",
+ " for f in sftp.listdir_attr(remotepath):\n",
+ " if S_ISDIR(f.st_mode):\n",
+ " folders.append(f.filename)\n",
+ " else:\n",
+ " files.append(f.filename)\n",
+ " if files:\n",
+ " yield path, files\n",
+ " # Filewise download happens here\n",
+ " for path,files in sftp_walk(directory_to_download):\n",
+ " for file in files:\n",
+ " remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
+ " local = os.path.join('/data/results', file).replace(\"\\/\",\"/\")\n",
+ " sftp.get(remote, local)\n",
+ "# Untar the collectd results if we got it.\n",
+ "path = os.path.join('/data/results', 'collectd.tar.gz')\n",
+ "if os.path.exists(path):\n",
+ " tar = tarfile.open(path)\n",
+ " tar.extractall()\n",
+ " tar.close()\n",
+ "# Ready to work with downloaded data, close the session and client.\n",
+ "session.close()\n",
+ "client.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
+ " '* Memory:', '* Virtual Switch Set-up:',\n",
+ " '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.rst')[0])\n",
+ "info_dict = {}\n",
+ "with open(os.path.join('/data/results', filename), 'r') as file:\n",
+ " for line in file:\n",
+ " if any(s in line for s in strings):\n",
+ " info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
+ "df = pd.DataFrame.from_dict(info_dict, orient='index', columns=['Value'])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Understand the configuration used for the test."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = os.path.basename(glob.glob('/data/results/vsperf*.conf')[0])\n",
+ "file = os.path.join('/data/results', filename)\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('TRAFFICGEN_DURATION'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " traffic_duration = int(value)\n",
+ " print(traffic_duration)\n",
+ " elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " pmd_cores_mask = value.lstrip()\n",
+ " print(pmd_cores_mask)\n",
+ " elif line.startswith('GUEST_CORE_BINDING'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " guest_cores = value[1:-2]\n",
+ " print(guest_cores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## OVS-Ports and Cores"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import collections\n",
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "# import graphviz\n",
+ "from graphviz import Digraph\n",
+ "ps = Digraph(name='ovs-ports-cores', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='green')\n",
+ " c.node('t0', 'TGen-Port-0')\n",
+ " c.node('t1', 'TGen-Port-1')\n",
+ " c.attr(label='TGEN')\n",
+ " c.attr(color='blue')\n",
+ "with ps.subgraph(name=\"cluster_1\") as c:\n",
+ " c.node_attr.update(style='filled', color='yellow')\n",
+ " c.node('v0', 'VNF-Port-0')\n",
+ " c.node('v1', 'VNF-Port-1')\n",
+ " c.attr(label='VNF')\n",
+ " c.attr(color='blue')\n",
+ " \n",
+ "with ps.subgraph(name='cluster_2') as c: \n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " c.node(id, port+'\\nCore-ID:'+ core)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ " if 'dpdkvhost' in port:\n",
+ " ps.edge(id, 'v'+num)\n",
+ " else:\n",
+ " ps.edge(id, 't'+num)\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dropped Packets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "ps = Digraph(name='ovs-dropped', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "\n",
+ "def get_dropped(port_id):\n",
+ " # port_id = 'dpdk0'\n",
+ " if glob.glob('./pod12-node4/*'+port_id):\n",
+ " dirname = os.path.basename(glob.glob('./pod12-node4/*'+port_id)[0])\n",
+ " if dirname:\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " filename = os.path.basename(glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0])\n",
+ " if filename:\n",
+ " with open(os.path.join('./pod12-node4', dirname, filename), 'r') as f:\n",
+ " line = f.readlines()[-1]\n",
+ " fields = line.split(',')\n",
+ " return fields[1], fields[2]\n",
+ " return 'NA','NA'\n",
+ "\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " rx,tx = get_dropped(port)\n",
+ " c.node(id, port+'\\nRX-Dropped:'+ rx + '\\nTX-Dropped:' + tx)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Plotting Live Results - T-Rex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "lines_seen = set() # holds lines already seen\n",
+ "outfile = open('./counts.dat', \"w\")\n",
+ "file = os.path.join('/data/results', 'trex-liveresults-counts.dat')\n",
+ "for line in open(file, \"r\"):\n",
+ " if line not in lines_seen: # not a duplicate\n",
+ " outfile.write(line)\n",
+ " lines_seen.add(line)\n",
+ "outfile.close()\n",
+ "tdf = pd.read_csv('./counts.dat')\n",
+ "print(tdf.columns)\n",
+ "ax = tdf.loc[(tdf.rx_port == 1)].plot(y='rx_pkts')\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i][0], indices[i][1], facecolor='RED', edgecolor='BLUE', alpha=.2)\n",
+ " i+=1\n",
+ "\n",
+ "ind = 0\n",
+ "indv = tdf.ts[0]\n",
+ "ax.set_xlabel(\"Index\")\n",
+ "ax.set_ylabel('Count')\n",
+ "for i in range(len(tdf.ts)):\n",
+ " if tdf.ts[i] - indv > int(traffic_duration):\n",
+ " highlight([(ind, i)], ax)\n",
+ " ind = i\n",
+ " indv = tdf.ts[i]\n",
+ "highlight([(ind,i)], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## IRQ Latency Histogram"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "file = os.path.join('/data/results', 'RUNirq.irq.log')\n",
+ "tdf = pd.read_csv(file)\n",
+ "tdf.columns\n",
+ "exclude = [' <1', ' < 5', ' < 10',' < 50', ' < 100', ' < 500', ' < 1000']\n",
+ "ax = tdf.loc[:, tdf.columns.difference(exclude)].plot(x=' number', xticks=tdf[' number'], figsize=(20,10))\n",
+ "ax.set_xlabel('Core #')\n",
+ "ax.set_ylabel('Count')\n",
+ "#tdf.plot(x='number')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Sample Collectd Metric Display - L3 Cache Occupancy in Bytes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import math\n",
+ "def cpumask2coreids(mask):\n",
+ " intmask = int(mask, 16)\n",
+ " i = 1\n",
+ " coreids = []\n",
+ " while (i < intmask):\n",
+ " if (i & intmask):\n",
+ " coreids.append(str(math.frexp(i)[-1]-1))\n",
+ " i = i << 1\n",
+ " return (coreids)\n",
+ "\n",
+ "vswitch_cpus = \"['2']\"\n",
+ "ps = Digraph(name='cpu-map', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='CPU-MAPPINGS')\n",
+ " c.attr(color='blue')\n",
+ " c.node('vscpus', 'vSwitch: \\n' + vswitch_cpus)\n",
+ " # vnf_cpus = cpumask2coreids(guest_cores)\n",
+ " c.node('vncpus', 'VNF: \\n' + guest_cores)\n",
+ " pmd_cpus = cpumask2coreids(pmd_cores_mask[1:-1])\n",
+ " c.node('pmcpus', 'PMDs: \\n' + str(pmd_cpus))\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "# Path where collectd results are stored.\n",
+ "mypath = \"./pod12-node4\"\n",
+ "file_count = 0\n",
+ "cpu_names = []\n",
+ "for level1 in os.listdir(mypath):\n",
+ " if \"intel_rdt\" in level1:\n",
+ " l2path = os.path.join(mypath, level1)\n",
+ " for level2 in os.listdir(l2path):\n",
+ " if \"bytes\" in level2:\n",
+ " l3path = os.path.join(l2path, level2)\n",
+ " if file_count == 0:\n",
+ " file_count += 1\n",
+ " df = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " # nn = 'cpu-'+ level1.split('-')[1]\n",
+ " cpu_names.append(nn)\n",
+ " # print(nn)\n",
+ " df.rename(columns={'value': nn}, inplace=True)\n",
+ " else:\n",
+ " file_count += 1\n",
+ " tdf = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " cpu_names.append(nn)\n",
+ " tdf.rename(columns={'value': nn}, inplace=True)\n",
+ " df[nn] = tdf[nn] \n",
+ "\n",
+ "ax = df.plot(x='epoch', y=cpu_names)\n",
+ "ax.set_ylabel(\"MBytes\")\n",
+ "ax.set_xlabel('Time')\n",
+ "\n",
+ "\n",
+ " \n",
+ "# df = pd.read_csv()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Events "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "filename = os.path.basename(glob.glob('/data/results/vsperf-overall*.log')[0])\n",
+ "logfile = os.path.join('/data/results', filename)\n",
+ "linecnt = 0\n",
+ "times = {}\n",
+ "with open(logfile) as f:\n",
+ " for line in f:\n",
+ " line = line.strip('\\n')\n",
+ " if linecnt == 0:\n",
+ " times['Start-Test'] = line.split(\" : \")[0]\n",
+ " linecnt += 1\n",
+ " if 'Binding NICs' in line:\n",
+ " times['Binding-NICs'] = line.split(\" : \")[0]\n",
+ " if 'Starting traffic at' in line:\n",
+ " sline = line.split(\" : \")[1]\n",
+ " time = line.split(\" : \")[0]\n",
+ " speed = sline.split('at',1)[1]\n",
+ " times[speed] = time \n",
+ " elif 'Starting vswitchd' in line:\n",
+ " times['vSwitch-Start'] = line.split(\" : \")[0]\n",
+ " elif 'Starting ovs-vswitchd' in line:\n",
+ " times['ovsvswitch-start'] = line.split(\" : \")[0]\n",
+ " elif 'Adding Ports' in line:\n",
+ " times['Ports-Added'] = line.split(\" : \")[0]\n",
+ " elif 'Flows Added' in line:\n",
+ " times['Flows-Added'] = line.split(\" : \")[0]\n",
+ " elif 'send_traffic with' in line:\n",
+ " times['Traffic Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1280' in line:\n",
+ " times['Traffic-Start-1280'] = line.split(\" : \")[0]\n",
+ " elif 'Starting qemu' in line:\n",
+ " times['VNF-Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 64' in line:\n",
+ " times['Traffic-Start-64'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 128' in line:\n",
+ " times['Traffic-Start-128'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 256' in line:\n",
+ " times['Traffic-Start-256'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 512' in line:\n",
+ " times['Traffic-Start-512'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1024' in line:\n",
+ " times['Traffic-Start-1024'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1518' in line:\n",
+ " times['Traffic-Start-1518'] = line.split(\" : \")[0]\n",
+ " elif 'dump flows' in line:\n",
+ " times['Traffic-End'] = line.split(\" : \")[0]\n",
+ " elif 'Wait for QEMU' in line:\n",
+ " times['VNF-Stop'] = line.split(\" : \")[0]\n",
+ " elif 'delete flow' in line:\n",
+ " times['flow-removed'] = line.split(\" : \")[0]\n",
+ " elif 'delete port' in line:\n",
+ " times['port-removed'] = line.split(\" : \")[0]\n",
+ " elif 'Killing ovs-vswitchd' in line:\n",
+ " times['vSwitch-Stop'] = line.split(\" : \")[0]\n",
+ "\n",
+ "times['Test-Stop'] = line.split(\" : \")[0]\n",
+ "#print(times)\n",
+ "ddf = pd.DataFrame.from_dict(times, orient='index', columns=['timestamp'])\n",
+ "names = ddf.index.values\n",
+ "dates = ddf['timestamp'].tolist()\n",
+ "datefmt=\"%Y-%m-%d %H:%M:%S,%f\"\n",
+ "dates = [datetime.strptime(ii, datefmt) for ii in dates]\n",
+ "# print(names)\n",
+ "# print(dates)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.dates as mdates\n",
+ "from matplotlib import ticker\n",
+ "\n",
+ "levels = np.array([-5, 5, -3, 3, -1, 1])\n",
+ "fig, ax = plt.subplots(figsize=(40, 5))\n",
+ "\n",
+ "# Create the base line\n",
+ "start = min(dates)\n",
+ "stop = max(dates)\n",
+ "ax.plot((start, stop), (0, 0), 'k', alpha=.5)\n",
+ "\n",
+ "pos_list = np.arange(len(dates))\n",
+ "\n",
+ "# Iterate through releases annotating each one\n",
+ "for ii, (iname, idate) in enumerate(zip(names, dates)):\n",
+ " level = levels[ii % 6]\n",
+ " vert = 'top' if level < 0 else 'bottom'\n",
+ " ax.scatter(idate, 0, s=100, facecolor='w', edgecolor='k', zorder=9999)\n",
+ " # Plot a line up to the text\n",
+ " ax.plot((idate, idate), (0, level), c='r', alpha=.7)\n",
+ " # Give the text a faint background and align it properly\n",
+ " ax.text(idate, level, iname,\n",
+ " horizontalalignment='right', verticalalignment=vert, fontsize=14,\n",
+ " backgroundcolor=(1., 1., 1., .3))\n",
+ "ax.set(title=\"VSPERF Main Events\")\n",
+ "# Set the xticks formatting\n",
+ "ax.get_xaxis().set_major_locator(mdates.SecondLocator(interval=30))\n",
+ "ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%M %S\"))\n",
+ "fig.autofmt_xdate()\n",
+ "plt.setp((ax.get_yticklabels() + ax.get_yticklines() +\n",
+ " list(ax.spines.values())), visible=False)\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Current and old."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Current Result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import glob\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.csv')[0])\n",
+ "filename\n",
+ "tdf = pd.read_csv(os.path.join('/data/results', filename))\n",
+ "pkts = ['tx_frames', 'rx_frames']\n",
+ "fps = ['tx_rate_fps', 'throughput_rx_fps']\n",
+ "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",
+ "pcents = ['tx_rate_percent', 'throughput_rx_percent', 'frame_loss_percent']\n",
+ "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))\n",
+ "tdf.plot.bar(y= pkts,ax=axes[0,0])\n",
+ "tdf.plot.bar(y= fps,ax=axes[0,1])\n",
+ "tdf.plot.bar(y= mbps,ax=axes[1,0])\n",
+ "tdf.plot.bar(y= pcents,ax=axes[1,1])\n",
+ "current_pkt_size = str(tdf['packet_size'].iloc[-1])\n",
+ "current_rx_fps = str(tdf['throughput_rx_fps'].iloc[-1])\n",
+ "print(current_rx_fps)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## How Current Result compares to Previous ones?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import urllib\n",
+ "import json\n",
+ "import requests\n",
+ "#json_data = requests.get('http://testresults.opnfv.org/test/api/v1/results?project=vsperf').json()\n",
+ "json_data = requests.get('http://10.10.120.22:8000/api/v1/results?project=vsperf').json()\n",
+ "res = json_data['results']\n",
+ "df1 = pd.DataFrame(res)\n",
+ "sort_by_date = df1.sort_values('start_date')\n",
+ "details = df1['details'].apply(pd.Series)\n",
+ "details[current_pkt_size] = pd.to_numeric(pd.Series(details[current_pkt_size]))\n",
+ "# details.plot.bar(y = current_pkt_size)\n",
+ "details_cur_pkt = details[[current_pkt_size]].copy()\n",
+ "details_cur_pkt.loc[-1]= float(current_rx_fps)\n",
+ "details_cur_pkt.index = details_cur_pkt.index + 1 # shifting index\n",
+ "details_cur_pkt.sort_index(inplace=True) \n",
+ "ax = details_cur_pkt.plot.bar()\n",
+ "ax.set_ylabel(\"Frames per sec\")\n",
+ "ax.set_xlabel(\"Run Number\")\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i]-0.5, indices[i]+0.5, facecolor='RED', edgecolor='none', alpha=.2)\n",
+ " i+=1\n",
+ "highlight([0], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Heatmaps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "array_of_dfs = []\n",
+ "for dirs in glob.glob('./pod12-node4/ovs_stats-vsperf*'):\n",
+ " dirname = os.path.basename(dirs)\n",
+ " if dirname:\n",
+ " port = dirname.split('.')[1]\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " full_path = glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0]\n",
+ " filename = os.path.basename(full_path)\n",
+ " if filename:\n",
+ " df = pd.read_csv(full_path)\n",
+ " df.rename(index=str, columns={\"rx\": port+\"-rx\" , \"tx\": port+\"-tx\"}, inplace=True)\n",
+ " df = df.drop(columns=['epoch'])\n",
+ " array_of_dfs.append(df)\n",
+ "master_df = pd.concat(array_of_dfs, axis=1, sort=True)\n",
+ "master_df.columns\n",
+ "\n",
+ "# get the correlation coefficient between the different columns\n",
+ "corr = master_df.iloc[:, 0:].corr()\n",
+ "arr_corr = corr.values\n",
+ "# mask out the top triangle\n",
+ "arr_corr[np.triu_indices_from(arr_corr)] = np.nan\n",
+ "fig, ax = plt.subplots(figsize=(18, 12))\n",
+ "sns.set(font_scale=3.0)\n",
+ "hm = sns.heatmap(arr_corr, cbar=True, vmin=-0.5, vmax=0.5,\n",
+ " fmt='.2f', annot_kws={'size': 20}, annot=True, \n",
+ " square=True, cmap=plt.cm.Reds)\n",
+ "ticks = np.arange(corr.shape[0]) + 0.5\n",
+ "ax.set_xticks(ticks)\n",
+ "ax.set_xticklabels(corr.columns, rotation=90, fontsize=20)\n",
+ "ax.set_yticks(ticks)\n",
+ "ax.set_yticklabels(corr.index, rotation=360, fontsize=20)\n",
+ "\n",
+ "ax.set_title('Heatmap')\n",
+ "plt.tight_layout()\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "author": {
+ "@type": "Person",
+ "name": "Sridhar K. N. Rao",
+ "worksFor": {
+ "@type": "Organization",
+ "name": "Spirent Communications"
+ }
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tools/docker/results/resultsdb/cases.json b/tools/docker/results/resultsdb/cases.json
new file mode 100644
index 00000000..e7576dbf
--- /dev/null
+++ b/tools/docker/results/resultsdb/cases.json
@@ -0,0 +1 @@
+{"testcases": [{"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovsdpdk", "_id": "565feb6b514bc5087f3cfe2e", "catalog_description": "Packet Loss Ratio for DPDK OVS (RFC2544)", "creation_date": "2015-12-03 07:12:43.925943", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005d9514bc5087f3cfe30", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:29.686136", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005ed514bc5087f3cfe31", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:49.363961", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovsdpdk", "_id": "566006c8514bc5087f3cfe32", "catalog_description": "Back To Back Frames for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:09:28.927130", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovs", "_id": "5660071e514bc5087f3cfe33", "catalog_description": "Back To Back Frames for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:10:54.473180", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovsdpdk", "_id": "566007a9514bc5087f3cfe34", "catalog_description": "Packet Loss Ratio Frame Modification for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:13:13.600168", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovs", "_id": "566007ec514bc5087f3cfe35", "catalog_description": "Packet Loss Ratio Frame Modification for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:14:20.594501", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovsdpdk", "_id": "56600870514bc5087f3cfe36", "catalog_description": ".Scalability Packet Loss for DPDK Ovs", "creation_date": "2015-12-03 09:16:32.491960", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovs", "_id": "566008b3514bc5087f3cfe37", "catalog_description": "Scalability Packet Loss for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:17:39.501079", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovs"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsdpdkuser", "_id": "5660095a514bc5087f3cfe38", "catalog_description": "PVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:20:26.244843", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsvirtio", "_id": "566009ae514bc5087f3cfe39", "catalog_description": "PVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:21:50.251212", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsdpdkuser", "_id": "56600a1a514bc5087f3cfe3a", "catalog_description": "PVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:23:38.269821", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsvirtio", "_id": "56600a5f514bc5087f3cfe3b", "catalog_description": "PVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:24:47.990062", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsdpdkuser", "_id": "56600ab3514bc5087f3cfe3c", "catalog_description": "PVVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:26:11.657515", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsvirtio", "_id": "56600ae9514bc5087f3cfe3d", "catalog_description": "PVVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:27:05.466374", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsdpdkuser", "_id": "56600b2a514bc5087f3cfe3e", "catalog_description": "PVVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:28:10.150217", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsvirtio", "_id": "56600b4f514bc5087f3cfe3f", "catalog_description": "PVVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:28:47.108529", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs,dpdk", "url": "", "_id": "591e8a8f41b755000a68c831", "catalog_description": "Phy2Phy Continuous Stream DPDK", "creation_date": "2017-05-19 06:02:55.177254", "domains": "compute", "dependencies": "", "version": ">euphrates", "criteria": "", "tier": "performance", "trust": null, "blocking": "", "name": "cont_ovsdpdk"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs", "url": "", "_id": "5980d1b073ce050010c339ca", "catalog_description": "Phy2Phy Continuous Stream", "creation_date": "2017-08-01 19:08:32.518983", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "cont_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovsdpdk", "_id": "565feb6b514bc5087f3cfe2e", "catalog_description": "Packet Loss Ratio for DPDK OVS (RFC2544)", "creation_date": "2015-12-03 07:12:43.925943", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005d9514bc5087f3cfe30", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:29.686136", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovsdpdk", "_id": "566006c8514bc5087f3cfe32", "catalog_description": "Back To Back Frames for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:09:28.927130", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovs", "_id": "5660071e514bc5087f3cfe33", "catalog_description": "Back To Back Frames for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:10:54.473180", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "back2back_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovsdpdk", "_id": "566007a9514bc5087f3cfe34", "catalog_description": "Packet Loss Ratio Frame Modification for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:13:13.600168", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_mod_vlan_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovs", "_id": "566007ec514bc5087f3cfe35", "catalog_description": "Packet Loss Ratio Frame Modification for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:14:20.594501", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_mod_vlan_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovsdpdk", "_id": "56600870514bc5087f3cfe36", "catalog_description": ".Scalability Packet Loss for DPDK Ovs", "creation_date": "2015-12-03 09:16:32.491960", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_scalability_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovs", "_id": "566008b3514bc5087f3cfe37", "catalog_description": "Scalability Packet Loss for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:17:39.501079", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_scalability_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsdpdkuser", "_id": "5660095a514bc5087f3cfe38", "catalog_description": "PVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:20:26.244843", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsvirtio", "_id": "566009ae514bc5087f3cfe39", "catalog_description": "PVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:21:50.251212", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsdpdkuser", "_id": "56600a1a514bc5087f3cfe3a", "catalog_description": "PVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:23:38.269821", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsvirtio", "_id": "56600a5f514bc5087f3cfe3b", "catalog_description": "PVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:24:47.990062", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_back2back_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsdpdkuser", "_id": "56600ab3514bc5087f3cfe3c", "catalog_description": "PVVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:26:11.657515", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsvirtio", "_id": "56600ae9514bc5087f3cfe3d", "catalog_description": "PVVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:27:05.466374", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsdpdkuser", "_id": "56600b2a514bc5087f3cfe3e", "catalog_description": "PVVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:28:10.150217", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsvirtio", "_id": "56600b4f514bc5087f3cfe3f", "catalog_description": "PVVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:28:47.108529", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_back2back_ovsvanilla"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs,dpdk", "url": "", "_id": "591e8a8f41b755000a68c831", "catalog_description": "Phy2Phy Continuous Stream DPDK", "creation_date": "2017-05-19 06:02:55.177254", "domains": "compute", "dependencies": "", "version": ">euphrates", "criteria": "", "tier": "performance", "trust": null, "blocking": "", "name": "phy2phy_cont_ovsdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs", "url": "", "_id": "5980d1b073ce050010c339ca", "catalog_description": "Phy2Phy Continuous Stream", "creation_date": "2017-08-01 19:08:32.518983", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_cont_ovsvanilla"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a48f18dc5815000e54a624", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 21:46:00.448859", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a493e7dc5815000e54a62e", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:06:31.415776", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_back2back_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a4946ddc5815000e54a630", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 22:08:45.830223", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvp_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a494cbdc5815000e54a632", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:10:19.882545", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvp_back2back_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a495cfdc5815000e54a635", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 22:14:39.603143", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvvp_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a4964edc5815000e54a637", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:16:46.066477", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvvp_back2back_vpp_vppdpdkvhost"}]}
diff --git a/tools/docker/results/resultsdb/init_db.py b/tools/docker/results/resultsdb/init_db.py
new file mode 100644
index 00000000..40bb4ee2
--- /dev/null
+++ b/tools/docker/results/resultsdb/init_db.py
@@ -0,0 +1,110 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""
+Preload the results database with testcases.
+"""
+
+from __future__ import print_function
+import json
+import sys
+import requests
+
+DB_HOST_IP = sys.argv[1]
+TESTAPI_PORT = sys.argv[2]
+
+TARGET_URL = 'http://{}:{}/api/v1'.format(DB_HOST_IP, TESTAPI_PORT)
+
+
+def get(url):
+ """
+ Get the http response.
+ """
+ return requests.get(url).json()
+
+
+def post(url, data):
+ """
+ Post HTTP request.
+ """
+ headers = {'Content-Type': 'application/json'}
+ res = requests.post(url, data=json.dumps(data), headers=headers)
+ print(res.text)
+
+
+def pod():
+ """
+ Get the PODs.
+ """
+ target = '{}/pods'.format(TARGET_URL)
+
+ with open('pods.json', 'r') as podref:
+ pods = json.load(podref)
+ for apod in pods:
+ post(target, apod)
+
+ add_pod('master', 'metal')
+ add_pod('virtual_136_2', 'virtual')
+
+
+def project():
+ """
+ Get the Projects
+ """
+ target = '{}/projects'.format(TARGET_URL)
+ with open('projects.json', 'r') as projref:
+ projects = json.load(projref)
+ for proj in projects:
+ post(target, proj)
+
+
+def cases():
+ """
+ Get the Cases
+ """
+ with open('cases.json', 'r') as caseref:
+ for line in caseref:
+ subcases = json.loads(line)
+ for cas in subcases["testcases"]:
+ target = '{}/projects/{}/cases'.format(TARGET_URL,
+ cas['project_name'])
+ post(target, cas)
+ add_case("functest", "tempest_custom")
+
+
+def add_pod(name, mode):
+ """
+ Add the Pods.
+ """
+ data = {
+ "role": "",
+ "name": name,
+ "details": '',
+ "mode": mode,
+ "creation_date": "2017-2-23 11:23:03.765581"
+ }
+ pod_url = '{}/pods'.format(TARGET_URL)
+ post(pod_url, data)
+
+
+def add_case(projectname, casename):
+ """
+ Add a testcase
+ """
+ data = {
+ "project_name": projectname,
+ "name": casename,
+ }
+ case_url = '{}/projects/{}/cases'.format(TARGET_URL, projectname)
+ post(case_url, data)
+
+
+if __name__ == '__main__':
+ pod()
+ project()
+ cases()
diff --git a/tools/docker/results/resultsdb/pods.json b/tools/docker/results/resultsdb/pods.json
new file mode 100644
index 00000000..3cd1dadb
--- /dev/null
+++ b/tools/docker/results/resultsdb/pods.json
@@ -0,0 +1,382 @@
+[
+ {
+ "name": "lf-pod2",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617f98e514bc5355b51f6b5",
+ "details": ""
+ },
+ {
+ "name": "lf-pod1",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617fa5a514bc5355b51f6b6",
+ "details": ""
+ },
+ {
+ "name": "orange-pod2",
+ "creation_date": "2015-10-27 15:27:30.312012",
+ "role": "",
+ "mode": "metal",
+ "_id": "562f97e2514bc5174d053d38",
+ "details": "https://wiki.opnfv.org/opnfv-orange"
+ },
+ {
+ "name": "unknown-pod",
+ "creation_date": "2015-11-30 08:55:02.550465",
+ "role": "",
+ "mode": "undefined",
+ "_id": "565c0ee6514bc5087f2ddcf7",
+ "details": null
+ },
+ {
+ "name": "huawei-pod1",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea58514bc5068a345d4b",
+ "details": ""
+ },
+ {
+ "name": "intel-pod5",
+ "creation_date": "2015-12-15 10:24:53.476549",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea75514bc5068a345d4c",
+ "details": null
+ },
+ {
+ "name": "intel-pod3",
+ "creation_date": "2015-12-21 17:38:31.435593",
+ "role": "",
+ "mode": "metal",
+ "_id": "56783917514bc5068a345d97",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod1",
+ "creation_date": "2015-12-22 07:21:03.765581",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9df514bc5068a345d98",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod2",
+ "creation_date": "2015-12-22 07:21:18.173966",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9ee514bc5068a345d99",
+ "details": null
+ },
+ {
+ "name": "dell-us-testing-bm-1",
+ "creation_date": "2016-01-08 12:41:54.097114",
+ "role": "",
+ "mode": "metal",
+ "_id": "568fae92514bc5068a60e7d2",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-3",
+ "creation_date": "2016-01-08 14:13:16.740415",
+ "role": "",
+ "mode": null,
+ "_id": "568fc3fc514bc5068a60e7d4",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-2",
+ "creation_date": "2016-01-08 14:15:54.037500",
+ "role": "",
+ "mode": null,
+ "_id": "568fc49a514bc5068a60e7d5",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm3",
+ "creation_date": "2016-01-15 12:14:20.956198",
+ "role": "",
+ "mode": "metal",
+ "_id": "5698e29c514bc56e65a47bc8",
+ "details": null
+ },
+ {
+ "name": "intel-pod6",
+ "creation_date": "2016-01-22 13:32:18.767326",
+ "role": "",
+ "mode": "metal",
+ "_id": "56a22f62514bc541f885b2c0",
+ "details": null
+ },
+ {
+ "name": "huawei-virtual2",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9d7ac851d7e6a0f74930d",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual1",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9f411851d7e6a0f749313",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual3",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67ba6851d7e4b188676bc",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual4",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67bb6851d7e4b188676bd",
+ "details": ""
+ },
+ {
+ "name": "intel-pod8",
+ "creation_date": "2016-03-14 08:52:47.576623",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bdf851d7e4b188676be",
+ "details": null
+ },
+ {
+ "name": "intel-pod7",
+ "creation_date": "2016-03-14 08:53:00.757525",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bec851d7e4b188676c0",
+ "details": null
+ },
+ {
+ "name": "huawei-pod2",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67c35851d7e4b188676c1",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual1",
+ "creation_date": "2016-03-14 08:58:06.432105",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67d1e851d7e4b188676c2",
+ "details": null
+ },
+ {
+ "name": "arm-pod1",
+ "creation_date": "2016-05-05 09:18:54.879497",
+ "role": "",
+ "mode": "metal",
+ "_id": "572b0ffe9377c51472b7878f",
+ "details": null
+ },
+ {
+ "name": "zte-pod1",
+ "creation_date": "2016-05-12 03:36:56.091397",
+ "role": "",
+ "mode": "metal",
+ "_id": "5733fa589377c548e8df3834",
+ "details": null
+ },
+ {
+ "name": "intel-virtual1",
+ "creation_date": "2016-08-23 17:22:30.901081",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86561d2c6e000ab19d93",
+ "details": null
+ },
+ {
+ "name": "intel-virtual2",
+ "creation_date": "2016-08-23 17:24:23.143681",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86c71d2c6e000ab19d94",
+ "details": null
+ },
+ {
+ "name": "zte-pod2",
+ "creation_date": "2016-09-06 09:49:20.228736",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91201d2c6e000ab1c261",
+ "details": ""
+ },
+ {
+ "name": "zte-pod3",
+ "creation_date": "2016-09-06 09:49:26.019816",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91261d2c6e000ab1c263",
+ "details": ""
+ },
+ {
+ "name": "arm-pod3",
+ "creation_date": "2016-09-12 09:47:50.791351",
+ "role": "",
+ "mode": "metal",
+ "_id": "57d679c61d2c6e000ab1d6bd",
+ "details": "ARM POD3"
+ },
+ {
+ "name": "cisco-pod1",
+ "creation_date": "2016-09-13 13:01:21.906958",
+ "role": "Community lab",
+ "mode": "metal",
+ "_id": "57d7f8a11d2c6e000ab1db88",
+ "details": "not yet declared in CI but needed to validate vpp scenarios for Colorado"
+ },
+ {
+ "name": "ool-virtual1",
+ "creation_date": "2016-09-19 12:43:50.313032",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57dfdd861d2c6e000ab1f37b",
+ "details": "Okinawa lab"
+ },
+ {
+ "name": "ericsson-pod3",
+ "creation_date": "2016-09-26 09:45:40.565795",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee441d2c6e000ab20fa9",
+ "details": ""
+ },
+ {
+ "name": "ericsson-pod4",
+ "creation_date": "2016-09-26 09:45:48.980198",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee4c1d2c6e000ab20faa",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual2",
+ "creation_date": "2016-09-26 09:46:05.508776",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee5d1d2c6e000ab20fac",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual3",
+ "creation_date": "2016-09-26 09:46:10.244443",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee621d2c6e000ab20fad",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual4",
+ "creation_date": "2016-09-26 09:46:14.734383",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee661d2c6e000ab20fae",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual5",
+ "creation_date": "2016-09-26 09:46:19.477110",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee6b1d2c6e000ab20faf",
+ "details": ""
+ },
+ {
+ "name": "intel-pod9",
+ "creation_date": "2016-11-23 14:07:35.963037",
+ "role": "",
+ "mode": "metal",
+ "_id": "5835a2a71d2c6e000ab2bb4b",
+ "details": "https://wiki.opnfv.org/display/pharos/Intel+Pod9"
+ },
+ {
+ "name": "huawei-pod3",
+ "creation_date": "2017-01-17 13:36:03.908341",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dc38cf551000c780eda",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod4",
+ "creation_date": "2017-01-17 13:36:10.759860",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dca8cf551000c780edb",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod5",
+ "creation_date": "2017-01-17 13:36:15.447849",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dcf8cf551000c780edc",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod6",
+ "creation_date": "2017-01-18 10:53:10.586724",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f49168cf551000c780f5e",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod7",
+ "creation_date": "2017-01-18 10:53:15.373953",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f491b8cf551000c780f5f",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod12",
+ "creation_date": "2017-02-09 07:22:46.425836",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "589c18c68cf551000c7820e8",
+ "details": ""
+ },
+ {
+ "name": "intel-pod12",
+ "creation_date": "2017-05-17 14:11:18.852731",
+ "role": "production-ci",
+ "details": "performance",
+ "query": "<function query at 0x7f574c29c500>",
+ "mode": "metal",
+ "_id": "591c5a06ee2e3f000a50f0b4",
+ "miss_fields": [
+ "name"
+ ]
+ },
+ {
+ "name": "cisco-vina-pod10",
+ "creation_date": "2017-05-29 09:13:20.818497",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "592be63078a2ad000ae6aad7",
+ "details": ""
+ },
+ {
+ "name": "zte-virtual1",
+ "creation_date": "2017-05-30 14:11:04.264967",
+ "role": "",
+ "mode": "baremetal",
+ "_id": "592d7d7878a2ad000ae6ac49",
+ "details": ""
+ }
+] \ No newline at end of file
diff --git a/tools/docker/results/resultsdb/projects.json b/tools/docker/results/resultsdb/projects.json
new file mode 100644
index 00000000..81c3d77f
--- /dev/null
+++ b/tools/docker/results/resultsdb/projects.json
@@ -0,0 +1,8 @@
+[
+ {
+ "_id": "5641e12d514bc5174df3d77e",
+ "description": "OPNFV vsperf project",
+ "name": "vsperf",
+ "creation_date": "2015-11-10 12:21:01.464979"
+ }
+]
diff --git a/tools/docker/testcontrol/auto/controller/Dockerfile b/tools/docker/testcontrol/auto/controller/Dockerfile
new file mode 100644
index 00000000..4fbf7294
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/Dockerfile
@@ -0,0 +1,23 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip && apt-get -y install openssh-server
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50052
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
+
diff --git a/tools/docker/testcontrol/auto/controller/list.env b/tools/docker/testcontrol/auto/controller/list.env
new file mode 100644
index 00000000..2883021b
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/list.env
@@ -0,0 +1,13 @@
+DUT_IP_ADDRESS=10.10.120.24
+DUT_USERNAME=opnfv
+DUT_PASSWORD=opnfv
+
+TGEN_IP_ADDRESS=10.10.120.25
+
+VSPERF_TESTS=phy2phy_tput,pvp_tput
+VSPERF_CONFFILE=vsperf.conf
+
+VSPERF_TRAFFICGEN_MODE=NO
+
+CLEAN_UP=NO
+
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/__init__.py b/tools/docker/testcontrol/auto/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf b/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf
new file mode 100644
index 00000000..50d40f49
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf
@@ -0,0 +1,21 @@
+VSWITCH_BRIDGE_NAME = 'vsperf-br0'
+WHITELIST_NICS = ['02:00.0', '02:00.1']
+TRAFFICGEN = 'Trex'
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.37/scripts/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_DURATION=1
+TRAFFICGEN_LOSSRATE=0
+TRAFFICGEN_RFC2544_TESTS=10
+#TRAFFICGEN_PKT_SIZES=(64,128,256,512,1024,1280,1518)
+TRAFFICGEN_PKT_SIZES=(64,)
+GUEST_TESTPMD_FWD_MODE = ['io']
+GUEST_IMAGE = ['/home/opnfv/vnfs/vloop-vnf-ubuntu-18.04_20180920.qcow2']
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = True
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
+
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py b/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..1b088fea
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,469 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF-controller
+"""
+
+# Fetching Environment Variable for controller, You can configure or
+# modifies list.env file for setting your environment variable.
+
+#pylint: disable=global-statement,no-else-continue
+#pylint: disable=too-many-branches
+
+import os
+import time
+import math
+import ast
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+TIMER = float()
+
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+
+TGEN_IP = os.getenv('TGEN_IP_ADDRESS')
+
+VSPERF_TEST = os.getenv('VSPERF_TESTS')
+VSPERF_CONF = os.getenv('VSPERF_CONFFILE')
+VSPERF_TRAFFICGEN_MODE = str(os.getenv('VSPERF_TRAFFICGEN_MODE'))
+
+START_COLLECTD = os.getenv('START_COLLECTD')
+START_BEATS = os.getenv('START_BEATS')
+CLEAN_UP = os.getenv('CLEAN_UP')
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+SANITY_CHECK_DONE_LIST = list()
+
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT Successfully Connected ..............................................[OK] \n ")
+
+def upload_test_config_file():
+ """
+ #Upload Test Config File on DUT
+ """
+ localpath = '/usr/src/app/vsperf/vsperf.conf'
+ if not os.path.exists(localpath):
+ print("VSPERF Test config File does not exists.......................[Failed]")
+ return
+ remotepath = '~/vsperf.conf'
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f {}".format(remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ check_test_config_cmd_1= "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result_1= str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result_1:
+ print(
+ "Test Configuration File Uploaded on DUT-Host.............................[OK] \n ")
+ else:
+ print("VSPERF Test config file upload failed.....................................[Critical]")
+
+def start_beats():
+ """
+ Start fileBeats on DUT
+ """
+ run_cmd = "echo '{}' | sudo -S service filebeat start".format(DUT_PWD)
+ DUT_CLIENT.run(run_cmd, pty=True)
+ print(
+ "Beats are started on DUT-Host............................................[OK] \n")
+
+def start_collectd():
+ """
+ start the collectd
+ """
+ run_cmd = "echo '{}' | sudo -S service collectd start".format(DUT_PWD)
+ DUT_CLIENT.run(run_cmd, pty=True)
+ print(
+ "Collectd is started on DUT-Host............................................[OK] \n")
+
+def run_vsperf_test():
+ """
+ Here we will perform the actual vsperf test
+ """
+ global TIMER
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(DUT_PWD)
+ DUT_CLIENT.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf && "
+ cmd += "./vsperf "
+ if VSPERF_CONF:
+ cmd += "--conf-file ~/vsperf.conf "
+ if "yes" in VSPERF_TRAFFICGEN_MODE.lower():
+ cmd += "--mode trafficgen"
+ vsperf_test_list = VSPERF_TEST.split(",")
+ print(vsperf_test_list)
+ for test in vsperf_test_list:
+ atest = cmd
+ atest += test
+ DUT_CLIENT.run(atest, pty=True)
+ print(
+ "Test Successfully running................................................[OK]\n ")
+
+
+def test_status():
+ """
+ Chechk for the test status after performing test
+ """
+ testtype_list = VSPERF_TEST.split(",")
+ num_test = len(testtype_list)
+ test_success = []
+ test_failed = []
+ testtype_list_len = len(testtype_list)
+ for test in testtype_list:
+ passed_minutes = 5
+ latest_result_cmd = "find /tmp -mindepth 1 -type d -cmin -{} -printf '%f'".format(
+ passed_minutes)
+ test_result_dir = str(
+ (DUT_CLIENT.execute(latest_result_cmd)[1]).split('find')[0])
+ test_date_cmd = "date +%F"
+ test_date = str(DUT_CLIENT.execute(test_date_cmd)[1]).replace("\n", "")
+ if test_date in test_result_dir:
+ testcase_check_cmd = "cd /tmp && cd `ls -t | grep results | head"
+ testcase_check_cmd += " -{} | tail -1` && find . -maxdepth 1 -name '*{}*'".\
+ format(testtype_list_len, test)
+ testcase_check_output = str(
+ DUT_CLIENT.execute(testcase_check_cmd)[1]).split('\n', 2)
+ check = 0
+ for i in testcase_check_output:
+ if (".csv" in i) or (".md" in i) or (".rst" in i):
+ check += 1
+ if check == 3:
+ test_success.append(test)
+ else:
+ test_failed.append(test)
+ testtype_list_len -= 1
+ if num_test == len(test_success):
+ print("All Test Successfully Completed on DUT-Host Results... [OK]")
+ elif not test_success:
+ print("All Test Failed on DUT-Host \nResults... [Failed]")
+ else:
+ print(
+ "Only {} Test failed Results ... [Failed]\n"\
+ "All other Test Successfully Completed on DUT-Host Results... [OK] ".\
+ format(test_failed))
+
+
+def vsperf_remove():
+ """
+ Actual removal of the VSPERF
+ """
+ vsperf_rm_cmd = "echo '{}' | sudo -S rm -r ~/vswitchperf".format(DUT_PWD)
+ DUT_CLIENT.run(vsperf_rm_cmd)
+ vsperfenv_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/vsperfenv".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(vsperfenv_rm_cmd)
+
+
+def remove_uploaded_config():
+ """
+ Remove all the uploaded configuration files
+ """
+ vconfig_rm_cmd = "rm ~/vsperf.conf"
+ DUT_CLIENT.run(vconfig_rm_cmd)
+ cdconfig_rm_cmd = "echo '{}' | sudo -S rm /opt/collectd/etc/collectd.conf".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(cdconfig_rm_cmd)
+
+
+def result_folders_remove():
+ """
+ Remove result folder on DUT
+ """
+ remove_cmd = "rm -r /tmp/*results*"
+ DUT_CLIENT.run(remove_cmd)
+
+
+def collectd_remove():
+ """
+ Remove collectd from DUT
+ """
+ collectd_dwn_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/collectd".format(
+ DUT_PWD)
+ DUT_CLIENT.run(collectd_dwn_rm_cmd)
+ collectd_rm_cmd = "echo '{}' | sudo -S rm -r -f /opt/collectd".format(
+ DUT_PWD)
+ DUT_CLIENT.run(collectd_rm_cmd)
+
+
+def terminate_vsperf():
+ """
+ Terminate the VSPERF and kill processes
+ """
+ stress_kill_cmd = "echo '{}' | sudo -S pkill stress &> /dev/null".format(
+ DUT_PWD)
+ python3_kill_cmd = "echo '{}' | sudo -S pkill python3 &> /dev/null".format(
+ DUT_PWD)
+ qemu_kill_cmd = "echo '{}' | sudo -S killall -9 qemu-system-x86_64 &> /dev/null".format(
+ DUT_PWD)
+ DUT_CLIENT.run(stress_kill_cmd)
+ DUT_CLIENT.run(python3_kill_cmd)
+ DUT_CLIENT.run(qemu_kill_cmd)
+
+ # sometimes qemu resists to terminate, so wait a bit and kill it again
+ qemu_check_cmd = "pgrep qemu-system-x86_64"
+ qemu_cmd_response = DUT_CLIENT.execute(qemu_check_cmd)[1]
+
+ if qemu_cmd_response != '':
+ time.sleep(5)
+ DUT_CLIENT.run(qemu_kill_cmd)
+ time.sleep(5)
+
+ ovs_kill_cmd = "echo '{}' | sudo pkill ovs-vswitchd &> /dev/null".format(
+ DUT_PWD)
+ ovsdb_kill_cmd = "echo '{}' | sudo pkill ovsdb-server &> /dev/null".format(
+ DUT_PWD)
+ vppctl_kill_cmd = "echo '{}' | sudo pkill vppctl &> /dev/null".format(
+ DUT_PWD)
+ vpp_kill_cmd = "echo '{}' | sudo pkill vpp &> /dev/null".format(DUT_PWD)
+ vpp_cmd = "echo '{}' | sudo pkill -9 vpp &> /dev/null".format(DUT_PWD)
+
+ DUT_CLIENT.run(ovs_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(ovsdb_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vppctl_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vpp_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vpp_cmd)
+ time.sleep(1)
+
+ print(
+ "All the VSPERF related process terminated successfully..............[OK]")
+
+
+def sanity_collectd_check():
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ global SANITY_CHECK_DONE_LIST
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(
+ DUT_PWD)
+ DUT_CLIENT.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(
+ DUT_CLIENT.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ SANITY_CHECK_DONE_LIST.append(int(1))
+ print(
+ "Collectd is working Fine ................................................[OK] \n ")
+ else:
+ print(
+ "Collectd Fail to Start, Install correctly before running Test....[Failed]\n ")
+ else:
+ print(
+ "Collectd is not installed yet........................................[Failed]\n")
+
+
+def sanity_vnf_path():
+ """
+ Check if VNF image is available on the configured path in Test Config File
+ """
+ # fetch the VNF path we placed in vsperf.conf file
+ global SANITY_CHECK_DONE_LIST
+ vsperf_conf_path = open('/usr/src/app/vsperf/vsperf.conf')
+ vsperf_conf_read = vsperf_conf_path.readlines()
+ for i in vsperf_conf_read:
+ if 'GUEST_IMAGE' in i:
+ vnf_image_path = i.split("'")[1]
+ vnf_path_check_cmd = "find {}".format(vnf_image_path)
+ vnf_path_check_result = str(
+ DUT_CLIENT.execute(vnf_path_check_cmd)[1])
+ if vnf_image_path in vnf_path_check_result:
+ SANITY_CHECK_DONE_LIST.append(int(2))
+ print(
+ "Test Configratuion file has Correct VNF path information on DUT-Host.." \
+ "...[OK]\n ")
+ else:
+ print(
+ "Test Configuration file has incorrect VNF path information......" \
+ "....[FAILED]\n")
+
+def sanity_vsperf_check():
+ """
+ We have to make sure that VSPERF is installed correctly
+ """
+ global SANITY_CHECK_DONE_LIST
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ SANITY_CHECK_DONE_LIST.append(int(3))
+ print("VSPERF Installed Correctly and Working fine......................." \
+ ".......[OK]\n")
+ else:
+ print(
+ "VSPERF DID Not Installed Correctly , INSTALL IT AGAIN...........[Critical]\n")
+ else:
+ print(
+ "VSPERF DID Not Installed Correctly , INSTALL IT AGAIN................[Critical]\n")
+ break
+
+def variable_from_test_config(aparameter):
+ """This function can be use to read any configuration paramter from vsperf.conf"""
+ read_cmd = 'cat ~/vsperf.conf | grep "{}"'.format(aparameter)
+ read_cmd_output = str(DUT_CLIENT.execute(read_cmd)[1])
+ print(read_cmd_output)
+ if not read_cmd_output or '#' in read_cmd_output:
+ return 0
+ return read_cmd_output.split("=")[1].strip()
+
+def cpumask2coreids(mask):
+ """conver mask to coreids"""
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i < intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1]-1))
+ i = i << 1
+ return coreids
+
+def sanity_cpu_allocation_check():
+ """It will check the cpu allocation before run test"""
+ global SANITY_CHECK_DONE_LIST
+ read_setting_cmd = "source vsperfenv/bin/activate ; cd vswitchperf* && "
+ read_setting_cmd += './vsperf --list-settings'
+ default_vsperf_settings = ast.literal_eval(str(DUT_CLIENT.execute(read_setting_cmd)[1]))
+ default_cpu_map = default_vsperf_settings["VSWITCH_VHOST_CPU_MAP"]
+ default_vswitch_pmd_cpu_mask = str(default_vsperf_settings["VSWITCH_PMD_CPU_MASK"])
+ default_vswitch_vhost_cpu_map = [str(x) for x in default_cpu_map]
+ vswitch_pmd_cpu_mask = variable_from_test_config("VSWITCH_PMD_CPU_MASK")
+ vswitch_cpu_map = (variable_from_test_config("VSWITCH_VHOST_CPU_MAP"))
+ vswitch_vhost_cpu_map = 0
+ if vswitch_cpu_map != 0:
+ vswitch_vhost_cpu_map = [str(x) for x in ast.literal_eval(vswitch_cpu_map)]
+
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map == 0:
+ print("CPU allocation Check Done,"\
+ "\nNo vswitch_pmd_cpu_mask or vswitch_vhost_cpu_map assign in test config file\n" \
+ "Using Default Settings ..................................................[OK]\n")
+ elif vswitch_pmd_cpu_mask != 0 and vswitch_vhost_cpu_map == 0:
+ core_id = cpumask2coreids(vswitch_pmd_cpu_mask)
+ print(core_id)
+ if len(default_vswitch_vhost_cpu_map) >= len(core_id):
+ if all(elem in default_vswitch_vhost_cpu_map for elem in core_id):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ elif vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map != 0:
+ core_id_1 = cpumask2coreids(default_vswitch_pmd_cpu_mask)
+ print(core_id_1)
+ if len(vswitch_vhost_cpu_map) >= len(core_id_1):
+ if all(elem in vswitch_vhost_cpu_map for elem in core_id_1):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ core_id_2 = cpumask2coreids(vswitch_pmd_cpu_mask)
+ print(core_id_2)
+ if len(vswitch_vhost_cpu_map) >= len(core_id_2):
+ if all(elem in vswitch_vhost_cpu_map for elem in core_id_2):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+
+
+
+def sanity_dut_conn_tgen_check():
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ global SANITY_CHECK_DONE_LIST
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(TGEN_IP)
+ tgen_connectivity_check_result = int(DUT_CLIENT.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ SANITY_CHECK_DONE_LIST.append(int(5))
+ print(
+ "DUT-Host is successfully reachable to Traffic Generator Host.............[OK]\n")
+ else:
+ print(
+ "DUT-host is unsuccessful to reach the Traffic Generator Host.............[Failed]")
+ print(
+ "Make sure to establish connection before running Test...............[Critical]\n")
+
+if DUT_IP:
+ host_connect()
+if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ sys.exit()
+else:
+ upload_test_config_file()
+ sanity_vnf_path()
+ sanity_cpu_allocation_check()
+ sanity_collectd_check()
+ sanity_vsperf_check()
+ sanity_dut_conn_tgen_check()
+ if "yes" in START_COLLECTD.lower():
+ start_collectd()
+ if "yes" in START_BEATS.lower():
+ start_beats()
+
+if 'v' in VSPERF_TEST:
+ if len(SANITY_CHECK_DONE_LIST) != 4:
+ print("Certain Sanity Checks Failed\n" \
+ "You can make changes based on the outputs and run" \
+ "the testcontrol auto container again")
+ else:
+ run_vsperf_test()
+ test_status()
+else:
+ if len(SANITY_CHECK_DONE_LIST) != 3:
+ print("Certain Sanity Checks Failed\n" \
+ "You can make changes based on the outputs and run" \
+ "the testcontrol auto container again")
+ else:
+ run_vsperf_test()
+ test_status()
+
+
+if "yes" in CLEAN_UP.lower():
+ vsperf_remove()
+ remove_uploaded_config()
+ result_folders_remove()
+ collectd_remove()
+ terminate_vsperf()
diff --git a/tools/docker/testcontrol/auto/docker-compose.yml b/tools/docker/testcontrol/auto/docker-compose.yml
new file mode 100644
index 00000000..50c528a6
--- /dev/null
+++ b/tools/docker/testcontrol/auto/docker-compose.yml
@@ -0,0 +1,22 @@
+version: '2'
+
+services:
+ testcontrol:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ env_file:
+ - ./controller/list.env
+ ports:
+ - 50052
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/testcontrol/interactive/controller/Dockerfile b/tools/docker/testcontrol/interactive/controller/Dockerfile
new file mode 100644
index 00000000..16cf59fd
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/Dockerfile
@@ -0,0 +1,22 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50052
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py b/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/output.txt b/tools/docker/testcontrol/interactive/controller/vsperf/output.txt
new file mode 100644
index 00000000..912c877b
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/output.txt
@@ -0,0 +1 @@
+[INFO ] 2019-08-27 18:09:46,085 : (root) - Overall test report written to "/tmp/results_2019-08-27_18-08-53/OvsDpdkVhost_test_report.rst"
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py b/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..d1c3838d
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,706 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=R0904
+# pylint: disable=R0902
+# twenty-two is reasonable in this script
+
+"""
+VSPER docker-controller.
+"""
+
+import io
+import time
+import ast
+import math
+
+from concurrent import futures
+
+import grpc
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+
+# pylint: disable=too-few-public-methods,no-self-use
+class PseudoFile(io.RawIOBase):
+ """
+ Handle ssh command output.
+ """
+
+ def write(self, chunk):
+ """
+ Write to file
+ """
+ if "error" in chunk:
+ return
+ with open("./output.txt", "w") as fref:
+ fref.write(chunk)
+
+
+class VsperfController(vsperf_pb2_grpc.ControllerServicer):
+ """
+ Main Controller Class
+ """
+
+ def __init__(self):
+ """
+ Initialization
+ """
+ self.client = None
+ self.dut_check = None
+ self.dut = None
+ self.user = None
+ self.pwd = None
+ self.vsperf_conf = None
+ self.tgen_client = None
+ self.tgen_check = None
+ self.tgen = None
+ self.tgen_user = None
+ self.tgenpwd = None
+ self.tgen_conf = None
+ self.scenario = None
+ self.testcase = None
+ self.tgen_ip_address = None
+ self.testtype = None
+ self.trex_conf = None
+ self.trex_params = None
+ self.conffile = None
+ self.tests_run_check = None
+ self.tgen_start_check = None
+ # Default TGen is T-Rex
+ self.trex_conffile = "trex_cfg.yml"
+ self.collectd_conffile = "collectd.conf"
+ self.test_upload_check = 0
+ self.sanity_check_done_list = list()
+
+ def setup(self):
+ """
+ Performs Setup of the client.
+ """
+ # Just connect to VM.
+ self.client = ssh.SSH(host=self.dut, user=self.user,
+ password=self.pwd)
+ self.client.wait()
+
+ def upload_config(self):
+ """
+ Perform file upload.
+ """
+ # self.client._put_file_shell(self.conffile, '~/vsperf.conf')
+ self.client.put_file(self.conffile, '~/{}'.format(self.conffile))
+ print("No")
+
+ def run_test(self):
+ """
+ Run test
+ """
+ # Sometimes hugepage store in /mnt/huge in order to free up the
+ # hugepage removing this stored hugepage is necessory
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(self.pwd)
+ self.client.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf* && "
+ cmd += "./vsperf "
+ if self.vsperf_conf:
+ cmd += "--conf-file ~/{} ".format(self.conffile)
+ # cmd += self.conffile
+ cmd += self.scenario
+ with PseudoFile() as pref:
+ self.client.run(cmd, stdout=pref, pty=True, timeout=0)
+
+ def TestStatus(self, request, context):
+ """
+ Chechk for the test status after performing test
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tests_run_check != 1:
+ return vsperf_pb2.StatusReply(message="No test have ran yet. [!]")
+ testtype_list = request.testtype.split(",")
+ test_success = []
+ test_failed = []
+ testtype_list_len = len(testtype_list)
+ for test in testtype_list:
+ #latest_result_cmd = "find /tmp -mindepth 1 -type d -cmin -5 -printf '%f'"
+ test_result_dir = str((self.client.\
+ execute("find /tmp -mindepth 1 -type d -cmin -5 -printf '%f'")[1]).\
+ split('find')[0])
+ #test_date_cmd = "date +%F"
+ test_date = str(self.client.execute("date +%F")[1]).replace("\n", "")
+ if test_date in test_result_dir:
+ testcase_check_cmd = "cd /tmp && cd `ls -t | grep results | head -{} | tail -1`".\
+ format(testtype_list_len)
+ testcase_check_cmd += " && find . -maxdepth 1 -name '*{}*'".\
+ format(test)
+ testcase_check_output = str(self.client.execute(testcase_check_cmd)[1]).\
+ split('\n', 2)
+ check = 0
+ for i in testcase_check_output:
+ if (".csv" in i) or (".md" in i) or (".rst" in i):
+ check += 1
+ if check == 3:
+ test_success.append(test)
+ else:
+ test_failed.append(test)
+ testtype_list_len -= 1
+ if len(testtype_list) == len(test_success):
+ return vsperf_pb2.StatusReply(message="All Test Successfully Completed on DUT-Host" \
+ "\nResults... [OK]")
+ if not test_success:
+ return vsperf_pb2.StatusReply(
+ message="All Test Failed on DUT-Host \nResults... [Failed]")
+ return vsperf_pb2.StatusReply(message="Only {} Test failed Results ... [Failed]\n"\
+ "All other Test Successfully Completed on DUT-Host Results... [OK] ".\
+ format(test_failed))
+
+ def HostConnect(self, request, context):
+ """
+ Handle host connectivity command from client
+ """
+ self.dut = request.ip
+ self.user = request.uname
+ self.pwd = request.pwd
+ self.setup()
+ check_cmd = "ls -l"
+ self.dut_check = int(self.client.execute(check_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def save_chunks_to_file(self, chunks, filename):
+ """
+ Write the output to file
+ """
+ with open(filename, 'w+') as fref:
+ fref.write(chunks)
+
+ def UploadConfigFile(self, request, context):
+ """
+ Handle upload config-file command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ chunks = request.Content
+ filename = request.Filename
+ self.conffile = filename
+ self.save_chunks_to_file(chunks, filename)
+ # This is chechking if vsperf.conf already exist first remove that and
+ # then upload the new file.
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name {}".format(filename)
+ check_test_result = str(self.client.execute(check_test_config_cmd)[1])
+ if "{}".format(filename) in check_test_result:
+ self.client.run("rm -f {}".format(filename))
+ self.upload_config()
+ self.test_upload_check = 1
+ print("Hello")
+ return vsperf_pb2.UploadStatus(Message="Successfully Uploaded", Code=1)
+
+ def StartTest(self, request, context):
+ """
+ Handle start-test command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ sanity_dict = {1:"Check installed VSPERF",
+ 2:"Check Test Config's VNF path is available on DUT-Host",
+ 3:"Check NIC PCIs is available on Traffic Generator",
+ 4:"Check CPU allocation on DUT-Host",
+ 5:"Check installed Collectd",
+ 6:"Check Connection between DUT-Host and Traffic Generator Host"}
+ sanity_dict_option_list = list(sanity_dict.keys())
+ remaining_sanity = [item for item in sanity_dict_option_list if item not in \
+ self.sanity_check_done_list]
+ if remaining_sanity:
+ sanity_return_msg = ""
+ for i_sanity in remaining_sanity:
+ sanity_return_msg += sanity_dict[i_sanity] + "\n"
+ return vsperf_pb2.StatusReply(message="The following sanity checks are either not"\
+ " performed yet or Does not satisfy test requirements" \
+ "\n{}".format(sanity_return_msg))
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ if self.tgen_start_check != 1:
+ return vsperf_pb2.StatusReply(message="Traffic Generator has not started yet [!]")
+ self.vsperf_conf = request.conffile
+ self.testtype = request.testtype
+ testtype_list = self.testtype.split(",")
+ self.tests_run_check = 1
+ for test in testtype_list:
+ self.scenario = test
+ self.run_test()
+ return vsperf_pb2.StatusReply(message="Test Successfully Completed")
+
+###### Traffic Generator Related functions ####
+ def TGenHostConnect(self, request, context):
+ """
+ Connect to TGen-Node
+ """
+ self.tgen = request.ip
+ self.tgen_user = request.uname
+ self.tgenpwd = request.pwd
+ self.tgen_setup()
+ check_tgen_cmd = "ls"
+ self.tgen_check = int(self.tgen_client.execute(check_tgen_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def tgen_setup(self):
+ """
+ Setup the T-Gen Client
+ """
+ # Just connect to VM.
+ self.tgen_client = ssh.SSH(host=self.tgen, user=self.tgen_user,
+ password=self.tgenpwd)
+ self.tgen_client.wait()
+
+ def StartBeats(self, request, context):
+ """
+ Start fileBeats on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ run_cmd = "echo '{}' | sudo -S service filebeat start".format(self.pwd)
+ #run_cmd = "sudo service filebeat start"
+ self.client.run(run_cmd, pty=True)
+ return vsperf_pb2.StatusReply(message="Beats are started on DUT-Host")
+
+ def DUTvsperfTestAvailability(self, request, context):
+ """
+ Before running test we have to make sure there is no other test running
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_ava_cmd = "ps -ef | grep -v grep | grep ./vsperf | awk '{print $2}'"
+ vsperf_ava_result = len((self.client.execute(vsperf_ava_cmd)[1]).split("\n"))
+ if vsperf_ava_result == 1:
+ return vsperf_pb2.StatusReply(message="DUT-Host is available for performing" \
+ " VSPERF Test\nYou can perform Test!")
+ return vsperf_pb2.StatusReply(message="DUT-Host is busy right now, Wait for some time\n\
+ Always Check availability before Running Test!")
+
+
+###Clean-UP process related functions####
+
+
+ def vsperf_remove(self):
+ """
+ Actual removal of the VSPERF
+ """
+ vsperf_rm_cmd = "echo '{}' | sudo -S rm -r ~/vswitchperf".format(
+ self.pwd)
+ self.client.run(vsperf_rm_cmd, pty=True)
+ vsperfenv_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/vsperfenv".format(
+ self.pwd)
+ self.client.run(vsperfenv_rm_cmd, pty=True)
+
+ def remove_uploaded_config(self):
+ """
+ Remove all the uploaded test configuration file
+ """
+ vconfig_rm_cmd = "rm ~/{}".format(self.conffile)
+ self.client.run(vconfig_rm_cmd, pty=True)
+
+ def result_folder_remove(self):
+ """
+ Remove result folder on DUT
+ """
+ remove_cmd = "rm -r /tmp/*results*"
+ self.client.run(remove_cmd, pty=True)
+
+ def collectd_remove(self):
+ """
+ Remove collectd from DUT
+ """
+ collectd_dwn_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/collectd".format(
+ self.pwd)
+ self.client.run(collectd_dwn_rm_cmd, pty=True)
+ collectd_rm_cmd = "echo '{}' | sudo -S rm -r -f /opt/collectd".format(
+ self.pwd)
+ self.client.run(collectd_rm_cmd, pty=True)
+
+ def RemoveVsperf(self, request, context):
+ """
+ Handle VSPERF removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.vsperf_remove()
+ return vsperf_pb2.StatusReply(message="Successfully VSPERF Removed")
+
+ def TerminateVsperf(self, request, context):
+ """
+ Terminate the VSPERF and kill processes
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ stress_kill_cmd = "pkill stress"
+ python3_kill_cmd = "pkill python3"
+ qemu_kill_cmd = "killall -9 qemu-system-x86_64"
+ self.client.send_command(stress_kill_cmd)
+ self.client.send_command(python3_kill_cmd)
+ self.client.send_command(qemu_kill_cmd)
+
+ # sometimes qemu resists to terminate, so wait a bit and kill it again
+ qemu_check_cmd = "pgrep qemu-system-x86_64"
+ qemu_cmd_response = self.client.execute(qemu_check_cmd)[1]
+
+ if qemu_cmd_response != '':
+ time.sleep(5)
+ self.client.send_command(qemu_kill_cmds)
+ time.sleep(5)
+
+ ovs_kill_cmd = "pkill ovs-vswitchd"
+ ovsdb_kill_cmd = "pkill ovsdb-server"
+ vppctl_kill_cmd = "pkill vppctl"
+ vpp_kill_cmd = "pkill vpp"
+ vpp_cmd = "pkill -9".format(self.pwd)
+
+ self.client.send_command(ovs_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(ovsdb_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vppctl_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vpp_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vpp_cmd)
+ time.sleep(1)
+
+ return vsperf_pb2.StatusReply(
+ message="All the VSPERF related process terminated successfully")
+
+ def RemoveResultFolder(self, request, context):
+ """
+ Handle result folder removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.result_folder_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully VSPERF Results Removed")
+
+ def RemoveUploadedConfig(self, request, context):
+ """
+ Handle all configuration file removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ self.remove_uploaded_config()
+ return vsperf_pb2.StatusReply(
+ message="Successfully All Uploaded Config Files Removed")
+
+ def RemoveCollectd(self, request, context):
+ """
+ Handle collectd removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.collectd_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully Collectd Removed From DUT-Host")
+
+ def RemoveEverything(self, request, context):
+ """
+ Handle of removing everything from DUT command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ self.vsperf_remove()
+ self.result_folder_remove()
+ self.remove_uploaded_config()
+ self.collectd_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully Everything Removed From DUT-Host")
+
+ def StartTGen(self, request, context):
+ """
+ Handle start-Tgen command from client
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ self.trex_params = request.params
+ run_cmd = "cd trex_2.37/scripts ; "
+ run_cmd += "./t-rex-64 "
+ run_cmd += self.trex_params
+ self.tgen_client.send_command(run_cmd)
+ self.tgen_start_check = 1
+ return vsperf_pb2.StatusReply(message="T-Rex Successfully running...")
+
+ def SanityCollectdCheck(self, request, context):
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(self.client.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(self.pwd)
+ self.client.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(self.client.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ self.sanity_check_done_list.append(int(5))
+ return vsperf_pb2.StatusReply(message="Collectd is working Fine")
+ return vsperf_pb2.StatusReply(message="Collectd Fail to Start, \
+ Install correctly before running Test")
+ return vsperf_pb2.StatusReply(message="Collectd is not installed yet.")
+
+ def SanityVNFpath(self, request, context):
+ """
+ Check if VNF image available on the mention path in Test Config File
+ """
+ # fetch the VNF path we placed in vsperf.conf file
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ vsperf_conf_path = 'cat ~/{} | grep "GUEST_IMAGE"'.format(self.conffile)
+ vsperf_conf_read = self.client.execute(vsperf_conf_path)[1]
+ vnf_image_path = vsperf_conf_read.split("'")[1]
+ vnf_path_check_cmd = "find {}".format(vnf_image_path)
+ vfn_path_check_result = str(self.client.execute(vnf_path_check_cmd)[1])
+ if vnf_image_path in vfn_path_check_result:
+ self.sanity_check_done_list.append(int(2))
+ return vsperf_pb2.StatusReply(message="Test Configratuion file has Correct "\
+ "VNF path information on DUT-Host.....[OK]")
+ return vsperf_pb2.StatusReply(message='Test Configuration file has wrongly placed VNF '\
+ 'path information \n'\
+ 'VNF is not available on DUT-Host................................[Failed]\n ')
+
+ def SanityVSPERFCheck(self, request, context):
+ """
+ We have to make sure that VSPERF install correctly
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && "
+ vsperf_check_command += "./vsperf --help"
+ vsperf_check_cmd_result = str(self.client.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ self.sanity_check_done_list.append(int(1))
+ return vsperf_pb2.StatusReply(
+ message="VSPERF Installed Correctly and Working fine")
+ return vsperf_pb2.StatusReply(message="VSPERF Does Not Installed Correctly ," \
+ "INSTALL IT AGAIN..............[Critical]")
+ return vsperf_pb2.StatusReply(message="VSPERF Does Not Installed Correctly ," \
+ "INSTALL IT AGAIN..............[Critical]")
+
+ def SanityNICCheck(self, request, context):
+ """
+ Check either NIC PCI ids are Correctly placed or not
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ trex_conf_path = "cat /etc/trex_cfg.yaml | grep interfaces"
+ trex_conf_read = self.tgen_client.execute(trex_conf_path)[1]
+ nic_pid_ids_list = [trex_conf_read.split("\"")[1], trex_conf_read.split("\"")[3]]
+ trex_nic_pic_id_cmd = "lspci | egrep -i --color 'network|ethernet'"
+ trex_nic_pic_id = str(self.tgen_client.execute(trex_nic_pic_id_cmd)[1]).split('\n')
+ acheck = 0
+ for k in trex_nic_pic_id:
+ for j in nic_pid_ids_list:
+ if j in k:
+ acheck += 1
+ else:
+ pass
+ if acheck == 2:
+ self.sanity_check_done_list.append(int(3))
+ return vsperf_pb2.StatusReply(message="Both the NIC PCI Ids are Correctly "\
+ "configured on TGen-Host..............")
+ return vsperf_pb2.StatusReply(message="You configured NIC PCI Ids Wrong in "\
+ "TGen-Host............................[OK]\n")
+
+ def SanityTgenConnDUTCheck(self, request, context):
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.tgen_ip_address = request.ip
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(
+ self.tgen_ip_address)
+ tgen_connectivity_check_result = int(
+ self.client.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ self.sanity_check_done_list.append(int(6))
+ return vsperf_pb2.StatusReply(
+ message="DUT-Host is successfully reachable to Traffic Generator......")
+ return vsperf_pb2.StatusReply(message="DUT-Host is unsuccessful to reach the \
+ Traffic Generator \nMake sure to establish connection \
+ between DUT-Host and TGen-Host before running Test\
+ ............... ")
+
+ def variable_from_test_config(self, aparameter):
+ """This function can be use to read any configuration paramter from vsperf.conf"""
+ read_cmd = 'cat ~/{} | grep "{}"'.format(aparameter, self.conffile)
+ read_cmd_output = str(self.client.execute(read_cmd)[1])
+ print(read_cmd_output)
+ if not read_cmd_output or '#' in read_cmd_output:
+ return 0
+ return read_cmd_output.split("=")[1].strip()
+
+ def cpumask2coreids(self, mask):
+ """conver mask to coreids"""
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i < intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1]-1))
+ i = i << 1
+ return coreids
+
+ def cpu_allocation_check(self, list1, list2):
+ """compare to cpu_map list"""
+ if len(list1) >= len(list2):
+ if all(elem in list1 for elem in list2):
+ self.sanity_check_done_list.append(int(4))
+ return vsperf_pb2.StatusReply(message="CPU allocation properly done on" \
+ " DUT-Host.................[OK]")
+ return vsperf_pb2.StatusReply(message="CPU allocation not done properly on " \
+ "DUT-Host............[Failed]")
+ return vsperf_pb2.StatusReply(message="CPU allocation not done properly on" \
+ " DUT-Host............[Failed]")
+
+ def SanityCPUAllocationCheck(self, request, context):
+ """
+ check for cpu-allocation on DUT-Host
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ read_setting_cmd = "source vsperfenv/bin/activate ; cd vswitchperf* && "
+ read_setting_cmd += './vsperf --list-settings'
+ default_vsperf_settings = ast.literal_eval(str(self.client.execute(read_setting_cmd)[1]))
+ default_cpu_map = default_vsperf_settings["VSWITCH_VHOST_CPU_MAP"]
+ default_vswitch_pmd_cpu_mask = str(default_vsperf_settings["VSWITCH_PMD_CPU_MASK"])
+ default_vswitch_vhost_cpu_map = [str(x) for x in default_cpu_map]
+ vswitch_pmd_cpu_mask = self.variable_from_test_config("VSWITCH_PMD_CPU_MASK")
+ vswitch_cpu_map = (self.variable_from_test_config("VSWITCH_VHOST_CPU_MAP"))
+ vswitch_vhost_cpu_map = 0
+
+ if vswitch_cpu_map != 0:
+ vswitch_vhost_cpu_map = [str(x) for x in ast.literal_eval(vswitch_cpu_map)]
+
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map == 0:
+ self.sanity_check_done_list.append(int(4))
+ return vsperf_pb2.StatusReply(message="CPU allocation Check Done,"\
+ "\nNo vswitch_pmd_cpu_mask or vswitch_vhost_cpu_map assign in test " \
+ "configuration file.\nUsing Default Settings..[OK]\n")
+ if vswitch_pmd_cpu_mask != 0 and vswitch_vhost_cpu_map == 0:
+ core_id = self.cpumask2coreids(vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(default_vswitch_vhost_cpu_map, core_id)
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map != 0:
+ core_id_1 = self.cpumask2coreids(default_vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(vswitch_vhost_cpu_map, core_id_1)
+ core_id_2 = self.cpumask2coreids(vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(vswitch_vhost_cpu_map, core_id_2)
+
+ def GetVSPERFConffromDUT(self, request, context):
+ """
+ This will extract the vsperf test configuration from DUT-Host
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ read_cmd = "cat ~/{}".format(self.conffile)
+ read_cmd_output = str(self.client.execute(read_cmd)[1])
+ return vsperf_pb2.StatusReply(message="{}".format(read_cmd_output))
+
+
+def serve():
+ """
+ Start servicing the client
+ """
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ vsperf_pb2_grpc.add_ControllerServicer_to_server(
+ VsperfController(), server)
+ server.add_insecure_port('[::]:50052')
+ server.start()
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except (SystemExit, KeyboardInterrupt, MemoryError, RuntimeError):
+ server.stop(0)
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/tools/docker/testcontrol/interactive/docker-compose.yml b/tools/docker/testcontrol/interactive/docker-compose.yml
new file mode 100644
index 00000000..431de124
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '2'
+
+services:
+ testcontrol:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ ports:
+ - 50052:50052
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/vsperf/Dockerfile b/tools/docker/vsperf/Dockerfile
new file mode 100644
index 00000000..effce15b
--- /dev/null
+++ b/tools/docker/vsperf/Dockerfile
@@ -0,0 +1,37 @@
+# To Build
+# docker build --rm -t vsperf .
+
+# -------- Builder stage.
+FROM python:3.6.10-slim-buster
+MAINTAINER Sridhar Rao <sridhar.rao@spirent.com>
+
+# Create a directory
+RUN mkdir /home/opnfv
+#
+# Update and Install required packages
+#
+RUN apt-get -y update
+RUN apt-get -y install git iputils-ping openssh-client tk
+
+#
+# Get vswitchperf
+#
+RUN cd /home/opnfv && \
+ git clone https://gerrit.opnfv.org/gerrit/vswitchperf
+
+#
+# Remove unnecessary python packages.
+#
+RUN cd /home/opnfv/vswitchperf && \
+ sed -e '/numpy/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/matplotlib/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pycrypto/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pypsi/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/paramiko/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pyzmq/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e "\$apyzmq" -i requirements.txt
+
+#
+# Build VSPERF
+#
+RUN cd /home/opnfv/vswitchperf/systems && ./build_base_machine.sh --trafficgen
diff --git a/tools/functions.py b/tools/functions.py
index d35f1f84..65c9978b 100644
--- a/tools/functions.py
+++ b/tools/functions.py
@@ -127,7 +127,7 @@ def settings_update_paths():
# expand OS wildcards in paths if needed
if glob.has_magic(tmp_tool):
tmp_glob = glob.glob(tmp_tool)
- if len(tmp_glob) == 0:
+ if not tmp_glob:
raise RuntimeError('Path to the {} is not valid: {}.'.format(tool, tmp_tool))
elif len(tmp_glob) > 1:
raise RuntimeError('Path to the {} is ambiguous {}'.format(tool, tmp_glob))
diff --git a/tools/k8s/cluster-deployment/k8scluster/.ansible-lint b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
new file mode 100644
index 00000000..036ecf52
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
@@ -0,0 +1,3 @@
+skip_list:
+ - '306'
+ - '301' \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/README.md b/tools/k8s/cluster-deployment/k8scluster/README.md
new file mode 100644
index 00000000..78fdbd03
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/README.md
@@ -0,0 +1,60 @@
+# OPNFV - k8s cluster setup
+
+This project aims to set up and programmatically deploy a Kubernetes cluster on CentOS 7 machines with the help of Kubeadm. It uses ansible and requires very little intervention.
+
+## Getting Started
+The following steps aim to describe the minimum required to successfully run this script.
+
+
+### Prerequisites
+
+Kubernetes and Ansible should be installed on the master node and docker and kubelet services should be running on the master and worker nodes.
+
+
+### Setup
+In order to configure the cluster an inventory file should be included. The inventory file (e.g.,`hosts`) has the following structure:
+
+```
+[master]
+master ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+```
+In this configuration file, connection details should be filled in. In case more nodes within the cluster are needed, add lines as necessary to the workers group within the `hosts` file.
+
+
+### Usage
+In order to use the script, download or clone [this repository] (https://gerrit.opnfv.org/gerrit/vswitchperf) to the root of what will be the master node.
+
+Navigate to its contents and execute the following command as regular user (this will prevent errors throughout configuration and deployment) on whichever machine you wish to use as the master node (this host will be the one running kubectl):
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts –tags “deploy”
+
+```
+You can verify the installation by running:
+```
+kubectl get nodes
+```
+And verifying the readiness of the nodes. More information may be obtained with `kubectl describe nodes` if needed.
+
+
+To clear the cluster, execute the following command
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “clear”
+```
+
+To deploy only CNI plugins
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “cni”
+```
+
+
+
+### Debugging
+
+In case a step goes wrong within the installation, ansible should display a message, however, there's also files to debug if the installation had something to do within k8s. In the case of the master node, we should be able to find a `log_init.txt` with necessary logs. On worker nodes, the relevant file is `node_joined.txt`.
diff --git a/tools/k8s/cluster-deployment/k8scluster/ansible.cfg b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
new file mode 100644
index 00000000..0cbe08f3
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
@@ -0,0 +1,9 @@
+[defaults]
+interpreter_python=/usr/bin/python3
+
+# enable logging
+log_path = ./cluster-deployment.log
+
+[ssh_connection]
+pipelining = True
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/hosts b/tools/k8s/cluster-deployment/k8scluster/hosts
new file mode 100644
index 00000000..dd928a8e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/hosts
@@ -0,0 +1,5 @@
+[master]
+master ansible_host=10.10.120.22 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host=10.10.120.21 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
diff --git a/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
new file mode 100644
index 00000000..5430bed5
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ roles:
+ - clustermanager \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
new file mode 100644
index 00000000..15f1f186
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
@@ -0,0 +1,28 @@
+#Edit these values only as per your environment
+#Enter your master node advertise ip address and cidr range for the pods.
+kube_ad_addr: "{{ ansible_host }}"
+kube_cidr_v: 10.244.0.0/16
+
+###################################################################################
+# Dont Edit these below values, these are mandatory to configure kubernetes cluster
+#packages:
+#- docker
+#- kubeadm
+#- kubectl
+
+#services:
+#- docker
+#- kubelet
+#- firewalld
+
+#ports:
+#- "6443/tcp"
+#- "10250/tcp"
+
+token_file: $HOME/log_init.txt
+###################################################################################
+# Dont Edit these above values, these are mandatory to configure kubernetes cluster
+
+
+
+PIP_executable_version: pip3.6 \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
new file mode 100644
index 00000000..4efeac61
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sriovdp-config
+ namespace: kube-system
+data:
+ config.json: |
+ {
+ "resourceList": [{
+ "resourceName": "intel_sriov_dpdk_a",
+ "selectors": {
+ "vendors": ["8086"],
+ "devices": ["10ed"],
+ "drivers": ["ixgbevf"],
+ "pfNames": ["eno3"]
+ }
+ }
+ ]
+ }
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
new file mode 100644
index 00000000..00110ad6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
@@ -0,0 +1,606 @@
+#
+# cloned from https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml
+#
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: psp.flannel.unprivileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
+ apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
+spec:
+ privileged: false
+ volumes:
+ - configMap
+ - secret
+ - emptyDir
+ - hostPath
+ allowedHostPaths:
+ - pathPrefix: "/etc/cni/net.d"
+ - pathPrefix: "/etc/kube-flannel"
+ - pathPrefix: "/run/flannel"
+ readOnlyRootFilesystem: false
+ # Users and groups
+ runAsUser:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ fsGroup:
+ rule: RunAsAny
+ # Privilege Escalation
+ allowPrivilegeEscalation: false
+ defaultAllowPrivilegeEscalation: false
+ # Capabilities
+ allowedCapabilities: ['NET_ADMIN']
+ defaultAddCapabilities: []
+ requiredDropCapabilities: []
+ # Host namespaces
+ hostPID: false
+ hostIPC: false
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ # SELinux
+ seLinux:
+ # SELinux is unused in CaaSP
+ rule: 'RunAsAny'
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+rules:
+ - apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames: ['psp.flannel.unprivileged']
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: flannel
+subjects:
+- kind: ServiceAccount
+ name: flannel
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: flannel
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: kube-flannel-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+data:
+ cni-conf.json: |
+ {
+ "name": "cbr0",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "flannel",
+ "delegate": {
+ "hairpinMode": true,
+ "isDefaultGateway": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ net-conf.json: |
+ {
+ "Network": "10.244.0.0/16",
+ "Backend": {
+ "Type": "vxlan"
+ }
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - ppc64le
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-s390x
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - s390x
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
new file mode 100644
index 00000000..97990192
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
@@ -0,0 +1,251 @@
+#
+# https://github.com/intel/multus-cni/blob/v3.4.1/images/multus-daemonset.yml
+#
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+spec:
+ group: k8s.cni.cncf.io
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ config:
+ type: string
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+rules:
+ - apiGroups: ["k8s.cni.cncf.io"]
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ verbs:
+ - get
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: multus
+subjects:
+- kind: ServiceAccount
+ name: multus
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: multus
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: multus-cni-config
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+data:
+ # NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
+ # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
+ # change the "args" line below from
+ # - "--multus-conf-file=auto"
+ # to:
+ # "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
+ # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
+ # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
+ cni-conf.json: |
+ {
+ "name": "multus-cni-network",
+ "type": "multus",
+ "capabilities": {
+ "portMappings": true
+ },
+ "delegates": [
+ {
+ "cniVersion": "0.3.1",
+ "name": "default-cni-network",
+ "plugins": [
+ {
+ "type": "flannel",
+ "name": "flannel.1",
+ "delegate": {
+ "isDefaultGateway": true,
+ "hairpinMode": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ ],
+ "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: amd64
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ image: nfvpe/multus:v3.4
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: ppc64le
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ # ppc64le support requires multus:latest for now. support 3.3 or later.
+ image: nfvpe/multus:latest-ppc64le
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "90Mi"
+ limits:
+ cpu: "100m"
+ memory: "90Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
new file mode 100644
index 00000000..8a854c06
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
@@ -0,0 +1,101 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-cr
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - nodes/status
+ verbs:
+ - get
+ - update
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ovs-cni-marker-cr
+subjects:
+- kind: ServiceAccount
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: ovs-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovs-cni
+spec:
+ selector:
+ matchLabels:
+ app: ovs-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: ovs-cni
+ spec:
+ serviceAccountName: ovs-cni-marker
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: ovs-cni-plugin
+ image: quay.io/kubevirt/ovs-cni-plugin:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: ovs-cni-marker
+ image: quay.io/kubevirt/ovs-cni-marker:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ args:
+ - -node-name
+ - $(NODE_NAME)
+ - -ovs-socket
+ - /host/var/run/openvswitch/db.sock
+ volumeMounts:
+ - name: ovs-var-run
+ mountPath: /host/var/run/openvswitch
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: ovs-var-run
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
new file mode 100644
index 00000000..6a28c146
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-cni-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriov-cni
+spec:
+ selector:
+ matchLabels:
+ name: sriov-cni
+ template:
+ metadata:
+ labels:
+ name: sriov-cni
+ tier: node
+ app: sriov-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: kube-sriov-cni
+ image: nfvpe/sriov-cni
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
new file mode 100644
index 00000000..9168b98c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
@@ -0,0 +1,127 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sriov-device-plugin
+ namespace: kube-system
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: ppc64le
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin:ppc64le
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
new file mode 100644
index 00000000..74bb520c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: userspace-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: userspace-cni
+spec:
+ selector:
+ matchLabels:
+ app: userspace-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: userspace-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: userspace-cni-plugin
+ image: parthyadav/userspace-cni:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
new file mode 100644
index 00000000..9d0ffda4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Kube-flannel
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
new file mode 100644
index 00000000..f797ddb6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
@@ -0,0 +1,22 @@
+---
+- name: Drain master node
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete master node
+ command: kubectl delete node {{ ansible_hostname }}
+
+- name: Kubeadm reset (master)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (master)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Delete $HOME/.kube/
+ file:
+ path: $HOME/.kube/
+ state: absent
+
+- name: Delete init log file
+ file:
+ path: "{{ token_file }}"
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
new file mode 100644
index 00000000..46ae50ec
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
@@ -0,0 +1,8 @@
+---
+- name: Drain worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl delete node {{ ansible_hostname }}
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
new file mode 100644
index 00000000..62a8c01f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
@@ -0,0 +1,11 @@
+---
+- name: Kubeadm reset (worker)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (worker)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Remove node_joined.txt
+ file:
+ path: $HOME/node_joined.txt
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
new file mode 100644
index 00000000..30740a44
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete ovs-cni-plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
new file mode 100644
index 00000000..44eabbd1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Multus
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
new file mode 100644
index 00000000..6d725ce8
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Delete SRIOV CNI Daemonset
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+- name: Delete SRIOV Device Plugin
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
new file mode 100644
index 00000000..72b3d869
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete userspace-cni plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
new file mode 100644
index 00000000..b2f280ef
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Install openshift python package
+ pip:
+ name: openshift
+ executable: "{{ PIP_executable_version }}"
+ when: inventory_hostname in groups['master']
+ become: yes
+
+- name: Check whether /etc/cni/net.d/ exists
+ stat:
+ path: /etc/cni/net.d
+ register: files_to_delete
+
+- name: Delete /etc/cni/net.d/
+ become: yes
+ command: sudo rm -r /etc/cni/net.d/
+ when: files_to_delete.stat.exists and files_to_delete.stat.isdir \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
new file mode 100644
index 00000000..4980e17e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
@@ -0,0 +1,14 @@
+---
+- name: Pulling images required for setting up a Kubernetes cluster
+ become: yes
+ command: kubeadm config images pull
+
+- name: Initializing Kubernetes cluster
+ become: yes
+ command: kubeadm init --apiserver-advertise-address={{ kube_ad_addr }} --pod-network-cidr={{ kube_cidr_v }}
+ register: output
+
+- name: Storing Logs and Generated token for future purpose.
+ copy:
+ content: "{{ output.stdout }}"
+ dest: "{{ token_file }}" \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
new file mode 100644
index 00000000..367d682f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
@@ -0,0 +1,11 @@
+---
+
+- name: Clean flannel
+ import_tasks: clear-flannel.yml
+
+- name: Deploy Kube-flannel
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+ wait: yes
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
new file mode 100644
index 00000000..9913cae4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
@@ -0,0 +1,12 @@
+---
+
+- name: Clean kubevirt-ovs
+ include: clear-kubevirt-ovs.yml
+
+- name: Deploy ovs-cni-plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
new file mode 100644
index 00000000..6fb77e42
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
@@ -0,0 +1,10 @@
+---
+
+- name: Clear Multus
+ include: clear-multus.yml
+
+- name: Deploy Multus
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
new file mode 100644
index 00000000..aaff5cf0
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
@@ -0,0 +1,26 @@
+---
+
+- name: clean sriov
+ include: clear-sriov.yml
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+ wait: yes
+
+- name: Deploy SRIOV Device Plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV CNI
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
new file mode 100644
index 00000000..32e3b9b1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Clean userspace-cni
+ include: clear-userspace.yml
+
+- name: Deploy userspace-cni plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
new file mode 100644
index 00000000..1a8c1879
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
@@ -0,0 +1,10 @@
+---
+- name: .kube directory creation in $HOME/
+ file:
+ path: $HOME/.kube
+ state: directory
+
+- name: Copying required files
+ shell: |
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
new file mode 100644
index 00000000..28c3f501
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
@@ -0,0 +1,83 @@
+- name: include master tasks
+ import_tasks: configure_master_node.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include folder settings for kube config
+ import_tasks: foldersettings.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include join worker tasks
+ import_tasks: workers.yml
+ when: inventory_hostname in groups['workers']
+ tags: deploy, join
+
+- name: cni pre-deploy
+ import_tasks: cni-pre-deploy.yml
+ tags: deploy, cni
+
+- name: deploy flannel
+ import_tasks: deploy-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear flannel
+ import_tasks: clear-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy multus
+ import_tasks: deploy-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear multus
+ import_tasks: clear-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy kubevirt-ovs
+ import_tasks: deploy-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear kubevirt-ovs
+ import_tasks: clear-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy sriov
+ import_tasks: deploy-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear sriov
+ import_tasks: clear-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy userspace
+ import_tasks: deploy-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear userspace
+ import_tasks: clear-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: drain and delete workers from master
+ import_tasks: clear-k8s-workers-drain.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: reset workers
+ import_tasks: clear-k8s-workers-reset.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: clear master
+ import_tasks: clear-k8s-master.yml
+ when: inventory_hostname in groups['master']
+ tags: clear \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
new file mode 100644
index 00000000..a0a815c4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
@@ -0,0 +1,15 @@
+---
+- name: check node is already in cluster
+ delegate_to: "{{ groups.master[0] }}"
+ command: "kubectl get nodes -n kube-system -o name"
+ register: get_node_register
+ changed_when: false
+
+- name: get join command
+ delegate_to: "{{ groups.master[0] }}"
+ command: kubeadm token create --print-join-command
+ register: join_command_raw
+
+- name: join cluster
+ shell: "sudo {{ join_command_raw.stdout_lines[0] }} --ignore-preflight-errors=all > $HOME/node_joined.txt"
+ when: ( 'node/' + ansible_hostname ) not in get_node_register.stdout_lines
diff --git a/tools/llc_management/__init__.py b/tools/llc_management/__init__.py
new file mode 100644
index 00000000..4774dc93
--- /dev/null
+++ b/tools/llc_management/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for RMD to perform LLC-Management
+"""
diff --git a/tools/llc_management/rmd.py b/tools/llc_management/rmd.py
new file mode 100644
index 00000000..308dda3c
--- /dev/null
+++ b/tools/llc_management/rmd.py
@@ -0,0 +1,198 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Perform L3-cache allocations for different workloads- VNFs, PMDs, vSwitch etc.
+based on the user-defined policies. This is done using Intel-RMD.
+Details about RMD can be found in: https://github.com/intel/rmd
+"""
+
+
+import itertools
+import json
+import logging
+import math
+import socket
+
+from collections import defaultdict
+from stcrestclient import resthttp
+from conf import settings as S
+
+DEFAULT_PORT = 8888
+DEFAULT_SERVER = '127.0.0.1'
+DEFAULT_VERSION = 'v1'
+
+
+def cpumask2coreids(mask):
+ """
+ Convert CPU mask in hex-string to list of core-IDs
+ """
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i <= intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1] - 1))
+ i = i << 1
+ return coreids
+
+
+def get_cos(category):
+ """
+ Obtain the Classof service for a particular category
+ """
+ return S.getValue(category.upper() + '_COS')
+
+
+def get_minmax(category):
+ """
+ Obtain the min-max values for a particular category
+ """
+ return S.getValue(category.upper() + '_CA')
+
+
+def guest_vm_settings_expanded(cores):
+ """
+ Check if are running pv+p mode
+ """
+ for core in cores:
+ if isinstance(core, str) and '#' in core:
+ return False
+ return True
+
+
+class IrmdHttp(object):
+ """
+ Intel RMD ReST API wrapper object
+ """
+
+ def __init__(self, server=None, port=None, api_version=None):
+ if not port:
+ server = DEFAULT_SERVER
+ if not port:
+ port = DEFAULT_PORT
+ if not api_version:
+ api_version = DEFAULT_VERSION
+ url = resthttp.RestHttp.url('http', server, port, api_version)
+ rest = resthttp.RestHttp(url, None, None, False, True)
+ try:
+ rest.get_request('workloads')
+ except (socket.error, resthttp.ConnectionError,
+ resthttp.RestHttpError):
+ raise RuntimeError('Cannot connect to RMD server: %s:%s' %
+ (server, port))
+ self._rest = rest
+ self.workloadids = []
+ self._logger = logging.getLogger(__name__)
+
+ def setup_cacheways(self, affinity_map):
+ """
+ Sets up the cacheways using RMD apis.
+ """
+ for cos_cat in affinity_map:
+ if S.getValue('POLICY_TYPE') == 'COS':
+ params = {'core_ids': affinity_map[cos_cat],
+ 'policy': get_cos(cos_cat)}
+ else:
+ minmax = get_minmax(cos_cat)
+ if len(minmax) < 2:
+ return
+ params = {'core_ids': affinity_map[cos_cat],
+ 'min_cache': minmax[0],
+ 'max_cache': minmax[1]}
+ try:
+ _, data = self._rest.post_request('workloads', None,
+ params)
+ if 'id' in data:
+ wl_id = data['id']
+ self.workloadids.append(wl_id)
+
+ except resthttp.RestHttpError as exp:
+ if str(exp).find('already exists') >= 0:
+ raise RuntimeError("The cacheway already exist")
+ else:
+ raise RuntimeError('Failed to connect: ' + str(exp))
+
+ def reset_all_cacheways(self):
+ """
+ Resets the cacheways
+ """
+ try:
+ for wl_id in self.workloadids:
+ self._rest.delete_request('workloads', str(wl_id))
+ except resthttp.RestHttpError as ecp:
+ raise RuntimeError('Failed to connect: ' + str(ecp))
+
+ def log_allocations(self):
+ """
+ Log the current cacheway settings.
+ """
+ try:
+ _, data = self._rest.get_request('workloads')
+ self._logger.info("Current Allocations: %s",
+ json.dumps(data, indent=4, sort_keys=True))
+ except resthttp.RestHttpError as ecp:
+ raise RuntimeError('Failed to connect: ' + str(ecp))
+
+
+class CacheAllocator(object):
+ """
+ This class exposes APIs for VSPERF to perform
+ Cache-allocation management operations.
+ """
+
+ def __init__(self):
+ port = S.getValue('RMD_PORT')
+ api_version = S.getValue('RMD_API_VERSION')
+ server_ip = S.getValue('RMD_SERVER_IP')
+ self.irmd_manager = IrmdHttp(str(server_ip), str(port),
+ str(api_version))
+
+ def setup_llc_allocation(self):
+ """
+ Wrapper for settingup cacheways
+ """
+ cpumap = defaultdict(list)
+ vswitchmask = S.getValue('VSWITCHD_DPDK_CONFIG')['dpdk-lcore-mask']
+ vnfcores = list(itertools.chain.from_iterable(
+ S.getValue('GUEST_CORE_BINDING')))
+ if not guest_vm_settings_expanded(vnfcores):
+ vnfcores = None
+ nncores = None
+ if S.getValue('LOADGEN') == 'StressorVM':
+ nncores = list(itertools.chain.from_iterable(
+ S.getValue('NN_CORE_BINDING')))
+ pmdcores = cpumask2coreids(S.getValue('VSWITCH_PMD_CPU_MASK'))
+ vswitchcores = cpumask2coreids(vswitchmask)
+ if vswitchcores:
+ cpumap['vswitch'] = vswitchcores
+ if vnfcores:
+ cpumap['vnf'] = vnfcores
+ if pmdcores:
+ cpumap['pmd'] = pmdcores
+ if nncores:
+ cpumap['noisevm'] = nncores
+ self.irmd_manager.setup_cacheways(cpumap)
+
+ def cleanup_llc_allocation(self):
+ """
+ Wrapper for cacheway cleanup
+ """
+ self.irmd_manager.reset_all_cacheways()
+
+ def log_allocations(self):
+ """
+ Wrapper for logging cacheway allocations
+ """
+ self.irmd_manager.log_allocations()
diff --git a/tools/lma/ansible-client/ansible.cfg b/tools/lma/ansible-client/ansible.cfg
new file mode 100644
index 00000000..307ef457
--- /dev/null
+++ b/tools/lma/ansible-client/ansible.cfg
@@ -0,0 +1,17 @@
+[defaults]
+inventory = ./hosts
+host_key_checking = false
+
+# additional path to search for roles in
+roles_path = roles
+
+# enable logging
+log_path = ./ansible.log
+
+[privilege_escalation]
+become=True
+become_method=sudo
+become_user=root
+
+[ssh_connection]
+pipelining = True
diff --git a/tools/lma/ansible-client/hosts b/tools/lma/ansible-client/hosts
new file mode 100644
index 00000000..eba586ce
--- /dev/null
+++ b/tools/lma/ansible-client/hosts
@@ -0,0 +1,2 @@
+[all]
+127.0.0.1 ansible_connection=local
diff --git a/tools/lma/ansible-client/playbooks/clean.yaml b/tools/lma/ansible-client/playbooks/clean.yaml
new file mode 100644
index 00000000..4f77b062
--- /dev/null
+++ b/tools/lma/ansible-client/playbooks/clean.yaml
@@ -0,0 +1,25 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#clean td-agent
+- name: clean td-agent
+ hosts: all
+ roles:
+ - clean-td-agent
+
+#clean collectd
+- name: clean collectd
+ hosts: all
+ roles:
+ - clean-collectd
diff --git a/tools/lma/ansible-client/playbooks/setup.yaml b/tools/lma/ansible-client/playbooks/setup.yaml
new file mode 100644
index 00000000..c79ee347
--- /dev/null
+++ b/tools/lma/ansible-client/playbooks/setup.yaml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#setup td-agent
+- name: setup td-agent
+ hosts: all
+ roles:
+ - td-agent
+
+- name: setup collectd
+ hosts: all
+ vars_prompt:
+ - name: host_name
+ prompt: "Enter host_name for collectd configuration"
+ private: no
+ roles:
+ - collectd
diff --git a/tools/lma/ansible-client/roles/clean-collectd/main.yml b/tools/lma/ansible-client/roles/clean-collectd/main.yml
new file mode 100644
index 00000000..97100cad
--- /dev/null
+++ b/tools/lma/ansible-client/roles/clean-collectd/main.yml
@@ -0,0 +1,44 @@
+# Copyright 2020 Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+- hosts: localhost
+
+ tasks:
+ - name: Check and install dependencies
+ yum:
+ name: docker
+ state: present
+
+ - name: Install python sdk
+ yum:
+ name: python-docker-py
+ state: present
+
+ - name: Stopping collectd container
+ docker_container:
+ name: collectd
+ state: stopped
+
+ - name: Removing collectd container
+ docker_container:
+ name: collectd
+ state: absent
+
+ # Removes the image (not recommended)
+ # - name: Remove image
+ # docker_image:
+ # state: absent
+ # name: opnfv/barometer-collectd
+ # tag: latest
diff --git a/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml b/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml
new file mode 100644
index 00000000..7c59c698
--- /dev/null
+++ b/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#TD-agent uninstallation
+- name: TD-agent Uninstallation
+ yum:
+ name: td-agent
+ state: absent
+
+- name: removing folder
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/td-agent/
+ - /var/log/td-agent/
diff --git a/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2 b/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2
new file mode 100644
index 00000000..ba953e3a
--- /dev/null
+++ b/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2
@@ -0,0 +1,44 @@
+Hostname "{{ host_name }}"
+Interval 10
+LoadPlugin intel_rdt
+LoadPlugin processes
+LoadPlugin interface
+LoadPlugin network
+LoadPlugin ovs_stats
+LoadPlugin cpu
+LoadPlugin memory
+#LoadPlugin csv
+#LoadPlugin write_http
+#LoadPlugin dpdkstat
+##############################################################################
+# Plugin configuration #
+##############################################################################
+<Plugin processes>
+ ProcessMatch "ovs-vswitchd" "ovs-vswitchd"
+ ProcessMatch "ovsdb-server" "ovsdb-server"
+ ProcessMatch "collectd" "collectd"
+</Plugin>
+
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+ ReportNumCpu true
+ ReportGuestState false
+ SubtractGuestState false
+</Plugin>
+
+<Plugin network>
+ Server "10.10.120.211" "30826"
+</Plugin>
+
+<Plugin ovs_stats>
+ Port "6640"
+ Address "127.0.0.1"
+ Socket "/usr/local/var/run/openvswitch/db.sock"
+ Bridges "vsperf-br0"
+</Plugin>
+
+<Plugin "intel_rdt">
+ Cores "2" "4-5" "6-7" "8" "9" "22" "23" "24" "25" "26" "27"
+</Plugin>
diff --git a/tools/lma/ansible-client/roles/collectd/tasks/main.yml b/tools/lma/ansible-client/roles/collectd/tasks/main.yml
new file mode 100644
index 00000000..0befb22b
--- /dev/null
+++ b/tools/lma/ansible-client/roles/collectd/tasks/main.yml
@@ -0,0 +1,60 @@
+# Copyright 2020 Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# Dependency check
+- name: Check and install dependencies
+ yum:
+ name: ['docker', 'python-docker-py']
+ state: present
+
+- name: Install pip
+ yum:
+ name: python-pip
+ state: present
+
+- name: install docker-py
+ pip: name=docker-py
+
+- name: Cloning barometer
+ git:
+ repo: https://gerrit.opnfv.org/gerrit/barometer
+ dest: /tmp/barometer
+
+- name: Create Folder
+ file:
+ path: /tmp/barometer/docker/src/collectd_sample_configs
+ state: directory
+
+# Build collectd
+- name: Downlaod and Build Image
+ command: chdir=/tmp/ {{ item }}
+ become: true
+ with_items:
+ - docker build -t opnfv/barometer-collectd -f barometer/docker/barometer-collectd/Dockerfile barometer/docker/barometer-collectd
+
+# Configuring collectd0
+- name: Ensure collectd is configured
+ template:
+ src: ../files/collectd.conf.j2
+ dest: /tmp/barometer/docker/src/collectd_sample_configs/collectd.conf
+
+# Running Collectd container #####################
+- name: Running collectd
+ command : chdir=/tmp/ {{ item }}
+ become: true
+ with_items:
+ - docker run -tid --name collectd --net=host -v /tmp/barometer/docker/src/collectd_sample_configs:/opt/collectd/etc/collectd.conf.d -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd /run_collectd.sh
+ - docker ps
diff --git a/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf b/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf
new file mode 100644
index 00000000..9d656e65
--- /dev/null
+++ b/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf
@@ -0,0 +1,63 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+<source>
+ @type tail
+ path /tmp/result*/*.log, /tmp/result*/*.dat, /tmp/result*/*.csv, /tmp/result*/stc-liveresults.dat.*, /var/log/userspace*.log, /var/log/sriovdp/*.log.*, /var/log/pods/**/*.log
+ path_key log_path
+# read_from_head true
+
+ <parse>
+ @type regexp
+ expression ^(?<msg>.*)$
+ </parse>
+
+ tag log.test
+</source>
+
+<filter log.test>
+ @type record_transformer
+ enable_ruby
+ <record>
+ host "#{Socket.gethostname}"
+ </record>
+</filter>
+
+
+<filter log.test>
+ @type parser
+ key_name log_path
+ reserve_data true
+ <parse>
+ @type regexp
+ expression /.*\/(?<file>.*)/
+ </parse>
+</filter>
+
+<match log.test>
+ @type copy
+
+ <store>
+ @type forward
+ send_timeout 10s
+ <server>
+ host 10.10.120.211
+ port 32224
+ </server>
+ </store>
+
+ <store>
+ @type stdout
+ </store>
+</match> \ No newline at end of file
diff --git a/tools/lma/ansible-client/roles/td-agent/tasks/main.yml b/tools/lma/ansible-client/roles/td-agent/tasks/main.yml
new file mode 100644
index 00000000..c7f50765
--- /dev/null
+++ b/tools/lma/ansible-client/roles/td-agent/tasks/main.yml
@@ -0,0 +1,30 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#TD-agent setup
+- name: TD-agent installation
+ shell: curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent4.sh | sh
+
+#replace the config file
+- name: Replace the content of my file
+ copy:
+ src: ../files/td-agent.conf
+ dest: /etc/td-agent/td-agent.conf
+
+#start the service
+- name: Starting and Enabling the TD-agent services
+ service:
+ name: td-agent
+ state: started
+ enabled: yes
diff --git a/tools/lma/ansible-server/ansible.cfg b/tools/lma/ansible-server/ansible.cfg
new file mode 100644
index 00000000..307ef457
--- /dev/null
+++ b/tools/lma/ansible-server/ansible.cfg
@@ -0,0 +1,17 @@
+[defaults]
+inventory = ./hosts
+host_key_checking = false
+
+# additional path to search for roles in
+roles_path = roles
+
+# enable logging
+log_path = ./ansible.log
+
+[privilege_escalation]
+become=True
+become_method=sudo
+become_user=root
+
+[ssh_connection]
+pipelining = True
diff --git a/tools/lma/ansible-server/group_vars/all.yml b/tools/lma/ansible-server/group_vars/all.yml
new file mode 100644
index 00000000..b0725ff5
--- /dev/null
+++ b/tools/lma/ansible-server/group_vars/all.yml
@@ -0,0 +1,27 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#apiserver advertise address
+ad_addr: 10.10.120.211
+
+#pod network cidr
+pod_cidr: 192.168.0.0/16
+
+#token generated by master
+token_file: join_token
+
+#give hostname
+vm3: 'vm3'
+vm2: 'vm2'
+vm1: 'vm1'
diff --git a/tools/lma/ansible-server/hosts b/tools/lma/ansible-server/hosts
new file mode 100644
index 00000000..0a13d754
--- /dev/null
+++ b/tools/lma/ansible-server/hosts
@@ -0,0 +1,12 @@
+[all]
+10.10.120.211 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.203 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.204 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+
+
+[master]
+10.10.120.211 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+
+[worker-nodes]
+10.10.120.203 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.204 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd \ No newline at end of file
diff --git a/tools/lma/ansible-server/playbooks/clean.yaml b/tools/lma/ansible-server/playbooks/clean.yaml
new file mode 100644
index 00000000..b4da66da
--- /dev/null
+++ b/tools/lma/ansible-server/playbooks/clean.yaml
@@ -0,0 +1,52 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# clean monitoring
+- name: Clean PAG setup
+ hosts: master
+ roles:
+ - clean-monitoring
+
+#clean logging
+- name: Clean EFK setup
+ hosts: master
+ roles:
+ - clean-logging
+
+#IF KUBELET IS RUNNING THEN RUN THIS
+#clean k8s cluster
+- name: Clean k8s cluster
+ hosts: master
+ roles:
+ - clean-k8s-cluster
+
+#reset worker-nodes
+- name: Reset worker-nodes
+ hosts: worker-nodes
+ roles:
+ - clean-k8s-worker-reset
+
+#unistall pre-requisites for k8s
+- name: unistall pre-requisites for k8s
+ hosts: all
+ roles:
+ - clean-k8s-pre
+
+#*************************************************************************************************************
+#THIS WILL DELETE DATA OF ELASTICSEARCH
+#*************************************************************************************************************
+# - name: Clean nfs server
+# hosts: all
+# roles:
+# - clean-nfs
diff --git a/tools/lma/ansible-server/playbooks/setup.yaml b/tools/lma/ansible-server/playbooks/setup.yaml
new file mode 100644
index 00000000..1f5ed1f5
--- /dev/null
+++ b/tools/lma/ansible-server/playbooks/setup.yaml
@@ -0,0 +1,44 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Pre-requisites for k8s and NFS server
+- name: Pre-requisites for k8s in all the nodes
+ hosts: all
+ roles:
+ - k8s-pre
+ - nfs
+
+#master setup for k8s
+- name: master setup for k8s
+ hosts: master
+ roles:
+ - k8s-master
+
+#worker setup for k8s
+- name: worker setup for k8s
+ hosts: worker-nodes
+ roles:
+ - k8s-worker
+
+#EFK setup in k8s
+- name: EFK setup in k8s
+ hosts: master
+ roles:
+ - logging
+
+#PAG setup in k8s
+- name: PAG setup in k8s
+ hosts: master
+ roles:
+ - monitoring
diff --git a/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml
new file mode 100644
index 00000000..83ac086d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml
@@ -0,0 +1,34 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#IF KUBELET IS RUNNING, THEN
+#reset k8s
+- name: reset k8s
+ shell: |
+ kubectl drain {{vm3}} --delete-local-data --force --ignore-daemonsets
+ kubectl drain {{vm2}} --delete-local-data --force --ignore-daemonsets
+ kubectl drain {{vm1}} --delete-local-data --force --ignore-daemonsets
+ kubectl delete node {{vm3}}
+ kubectl delete node {{vm2}}
+ kubectl delete node {{vm1}}
+ sudo kubeadm reset -f
+ sudo rm $HOME/.kube/config
+ when: "_svc_kubelet.rc == 0"
+
diff --git a/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml
new file mode 100644
index 00000000..6d12bd5f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml
@@ -0,0 +1,65 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Uninstalling K8s
+- name: Uninstalling K8s
+ yum:
+ name: ['kubeadm', 'kubectl', 'kubelet', 'docker-ce']
+ state: absent
+
+#Enabling Swap
+- name: Enabling Swap on all nodes
+ shell: swapon -a
+ ignore_errors: yes
+
+#Uncommenting Swap entries
+- name: Uncommenting Swap entries in /etc/fstab
+ replace:
+ path: /etc/fstab
+ regexp: '^# /(.*swap.*)'
+ replace: '\1'
+
+
+#Starting firewalld
+- name: 'Starting firewall'
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+
+# Enabling SELinux
+- name: Enabling SELinux on all nodes
+ shell: |
+ setenforce 1
+ sudo sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config
+
+#removing Docker repo
+- name: removing Docker repo
+ command: yum-config-manager --disable docker-ce-stable
+
+#removing K8s repo
+- name: removing repository details in Kubernetes repo file.
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ state: absent
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=1
+ repo_gpgcheck=1
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
diff --git a/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml
new file mode 100644
index 00000000..3ba9c9ea
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml
@@ -0,0 +1,26 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#IF KUBELET IS RUNNING, THEN
+#reset k8s
+- name: reset k8s
+ command: kubeadm reset -f
+ when: "_svc_kubelet.rc == 0"
+
diff --git a/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml b/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml
new file mode 100644
index 00000000..259065ed
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml
@@ -0,0 +1,193 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Deleting EFK setup from k8s cluster
+
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../../logging/files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop elastalert
+#***********************************************************************************************************
+- name: Delete elastalert config configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-conf-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert key configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-key-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert rule configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-rule-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert pod
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/elastalert.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop fluentd
+#***********************************************************************************************************
+
+- name: Delete fluentd service
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent-service.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete fluentd configmap
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete fluentd pod
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop nginx
+#***********************************************************************************************************
+- name: Delete nginx service
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-service.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx configmap
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-conf-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx key configmap
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-key-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx pod
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Kibana
+#***********************************************************************************************************
+- name: Stopping Kibana
+ k8s:
+ state: absent
+ src: /tmp/files/kibana/kibana.yaml
+ namespace: logging
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Elasticsearch
+#***********************************************************************************************************
+- name: Stopping Elasticsearch
+ k8s:
+ state: absent
+ src: /tmp/files/elasticsearch/elasticsearch.yaml
+ namespace: logging
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Elasticsearch operator
+#***********************************************************************************************************
+- name: Stopping Elasticsearch operator
+ shell: kubectl delete -f https://download.elastic.co/downloads/eck/1.2.0/all-in-one.yaml
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Persistent Volume
+#***********************************************************************************************************
+- name: Deleting Persistent Volume
+ k8s:
+ state: absent
+ src: /tmp/files/persistentVolume.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Storage Class
+#***********************************************************************************************************
+- name: Deleting Storage Class
+ k8s:
+ state: absent
+ src: /tmp/files/storageClass.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Namespace
+#***********************************************************************************************************
+- name: Deleting Namespace
+ k8s:
+ state: absent
+ src: /tmp/files/namespace.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml b/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml
new file mode 100644
index 00000000..49943ec0
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml
@@ -0,0 +1,48 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#Deleting PAG setup from k8s cluster
+
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#***********************************************************************************************************
+#copy yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy namespace yaml to /tmp/files/
+ copy:
+ src: ../../monitoring/files/monitoring-namespace.yaml
+ dest: /tmp/monitoring-namespace.yaml
+
+#***********************************************************************************************************
+#Deleting Namespace
+#***********************************************************************************************************
+- name: Deleting Namespace
+ k8s:
+ state: absent
+ src: /tmp/monitoring-namespace.yaml
+ namespace: monitoring
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/monitoring-namespace.yaml
+ file:
+ path: "/tmp/monitoring-namespace.yaml"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml b/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml
new file mode 100644
index 00000000..157db849
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml
@@ -0,0 +1,44 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Edit /etc/export
+- name: Edit /etc/export file for NFS
+ lineinfile:
+ path: /etc/exports
+ line: "{{item.line}}"
+ state: absent
+ with_items:
+ - {line: "/srv/nfs/master *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/srv/nfs/data *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/usr/share/monitoring_data/grafana *(rw,sync,no_root_squash,no_subtree_check)"}
+
+#uninstall NFS server
+- name: Uninstalling NFS server utils
+ yum:
+ name: nfs-utils
+ state: absent
+
+#remove Elasticsearch data
+- name: Removing Directory for elasticsearch
+ file:
+ path: "/srv/nfs/{{item}}"
+ state: absent
+ with_items:
+ - ['data', 'master']
+
+#remove Grafana data
+- name: Removing Directory for grafana
+ file:
+ path: "/usr/share/monitoring_data/grafana"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml
new file mode 100644
index 00000000..edc8f10b
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml
@@ -0,0 +1,49 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#pull k8s images
+- name: Pulling images required for setting up a Kubernetes cluster
+ shell: kubeadm config images pull
+
+#reset k8s
+- name: Resetting kubeadm
+ shell: kubeadm reset -f
+
+#init k8s
+- name: Initializing Kubernetes cluster
+ shell: kubeadm init --apiserver-advertise-address {{ad_addr}} --pod-network-cidr={{pod_cidr}}
+
+#Copying required files
+- name: Copying required files
+ shell: |
+ mkdir -p $HOME/.kube
+ sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+#get token
+- name: Storing token for future purpose.
+ shell: kubeadm token create --print-join-command
+ register: token
+
+#save token to join worker
+- name: Storing token for worker
+ local_action: copy content={{ token.stdout }} dest={{ token_file }}
+
+#install calico
+- name: Install Network Add-on
+ command: kubectl apply -f https://docs.projectcalico.org/v3.11/manifests/calico.yaml
+
+#Taint master
+- name: Taint master
+ command: kubectl taint nodes --all node-role.kubernetes.io/master-
diff --git a/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml
new file mode 100644
index 00000000..95526a28
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml
@@ -0,0 +1,72 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Disabling Swap
+- name: Disabling Swap on all nodes
+ shell: swapoff -a
+
+#Commenting Swap entries
+- name: Commenting Swap entries in /etc/fstab
+ replace:
+ path: /etc/fstab
+ regexp: '(^/.*swap*)'
+ replace: '# \1'
+
+#Stopping firewalld
+- name: 'Stopping firewall'
+ service:
+ name: firewalld
+ state: stopped
+ enabled: no
+
+#Disabling SELinux
+- name: Disabling SELinux on all nodes
+ shell: |
+ setenforce 0
+ sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
+
+#installing docker
+- name: Installing Docker
+ shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+#Adding K8s repo
+- name: Adding repository details in Kubernetes repo file.
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=1
+ repo_gpgcheck=1
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+#installing K8s ans docker
+- name: Installing K8s
+ yum:
+ name: ['kubeadm', 'kubectl', 'kubelet', 'docker-ce']
+ state: present
+
+#Starting docker and kubelet services
+- name: Starting and Enabling the required services
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - docker
+ - kubelet
diff --git a/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml
new file mode 100644
index 00000000..89d2b373
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml
@@ -0,0 +1,24 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Worker
+
+- name: Copying token to worker nodes
+ copy: src={{ token_file }} dest=join_token
+
+- name: Joining worker nodes with kubernetes master
+ shell: |
+ kubeadm reset -f
+ cat join_token | tail -1 > out.sh
+ sh out.sh
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml
new file mode 100644
index 00000000..a320ef75
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml
@@ -0,0 +1,48 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-config
+data:
+ elastalert.yaml: |
+ # This is the folder that contains the rule yaml files
+ # Any .yaml file will be loaded as a rule
+ rules_folder: rules
+ # How often ElastAlert will query Elasticsearch
+ # The unit can be anything from weeks to seconds
+ run_every:
+ minutes: 1
+ # ElastAlert will buffer results from the most recent
+ # period of time, in case some log sources are not in real time
+ buffer_time:
+ minutes: 15
+
+ scan_subdirectories: false
+
+ # The Elasticsearch hostname for metadata writeback
+ # Note that every rule can have its own Elasticsearch host
+ es_host: logging-es-http
+ es_port: 9200
+ es_username: ${ES_USERNAME}
+ es_password: ${ES_PASSWORD}
+ es_conn_timeout: 120
+ verify_certs: False
+ use_ssl: True
+ client_cert: '/opt/elastalert/key/elastalert.pem'
+ client_key: '/opt/elastalert/key/elastalert.key'
+ writeback_index: elastalert_status
+ writeback_alias: elastalert_alerts
+ alert_time_limit:
+ days: 2
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml
new file mode 100644
index 00000000..0c606a9c
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-key
+data:
+ elastalert.key: |
+ -----BEGIN PRIVATE KEY-----
+ MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC0uQ+B0gy3VB4w
+ 5CeWOx575lqSUuYvrGW3ILpV1gmj0ZZCMZUGvt4UvaCEaNPIAqNaHPmaslQqJb5C
+ PJH9pMN7vUVp3DACzmYrS4HdROHamn5gjebXs4hq43heLaIB1Kb+4F+7sEY88irK
+ xOevadcN35y5ld7lVUGRsj6JYcweaAeh/YZ/HaBT5RfdGF+x07NDus+mFqT8j3PD
+ rs2+JtEvEoWtjcxwFgloc9GkHsWZoV1AQHgyAWjmDXZtZeV0HQSkl7hWFG9vxTni
+ DvdrdhX0g+D+u8jWnlR4Za4jd64KbTp9C9trSHyMSRIvN5obm/H8O5MQ+sZ+NQ0X
+ PdK92MjbAgMBAAECggEASbRPxrpLxVjhFz91haeGvzErLxHwHvFIam9Gj0tDkzQe
+ +9AM3ztohzzvAhFejevFgzLd+WFRQf8yoQDi6XcQ4p5GeO38Bqj2siGRTRSSp/zq
+ HabBxqbJtA4hQQeLUwPPN5N6d6lke+an3RqBAuE/e8D+whGFXjJvE2SGbLEd9if2
+ uzHj37sPsVi8kRvgZBDOozmt7YFzQVO/1V+4Lw6nz48M3t+hOHaUXY0Yd8nsk5A6
+ kgoDQ4CGUHjtWfSrccZrYNk51Zows9/sX8axfJ94wKJSImWJcuW9PXIQhzT4exnH
+ sPOwY6Noy3nXRk9gcchT60fKpp+tsJZk3ezkwSpgwQKBgQDvsaYcbnIVdFZpaNKF
+ Tmt/w60CmfGeNozRygfi84ot7edUf93cB6WSKChcAE8fbq9Ji5USPNtfbnZfFXsI
+ IyTr2KHW3RkHuDEyu+Lan9JuReEH3QOG83vvN/oYA3J3hqUTCjEGkPjqnoFtdk8L
+ f7WH1jZvXYEMo0C48SXo+yGohQKBgQDBBGkzL928j1QB9NfiNFk70EalDsF8Im2W
+ n8bQ54KYspUybKD/Hmw0jIV7kdu2vhgGC4RYkn9c5qATtulbYJUgUBelaSi0vhXT
+ gfAuO+JIIZ50P+mkkxH/KIUyu1xWUB2jtMulqLLomdoBvfp/u51qCY6fT3WMCB+R
+ ouWLr2oZ3wKBgQCAuas4AaiLFRuDKKRGq0LYLsIvb3VvPmSKFjH+FETVPbrKipEf
+ pYup3p8uKYxUmSDSIoBAdyZpLe2sSuD0Ecu2TXU86yiSGL1zPawrNUHRrv2XN365
+ bvHUGv/Y/aDvyAPHIeYKXLkRZ2ai3rK8vi1Dcitxy4mOu+36ZKezY4tD8QKBgQCd
+ hakJUj4nPd20fwqUnF5a1z5gRGuZkEtZiunp4ZaOYegrL8YwjraGKExjrYTfXcIj
+ ZNDMrDpvKfRoQnWt0mPB7DtwDiNfZmZPqBLI2Kxya6VygBqA6lncoEgcQBY6hsW5
+ rbopZ0UjWTQ3CcFe71GnkUcpMuLetl51L7kgR7dShwKBgQC+vqjhe/h081JGLTo1
+ tKeRUCaDA/V3VHjFKgM5g+S3/KzgU/EaB1rq3Qja1quGv0zHveca3zibdNQi1ENm
+ KSutWh2zQXzzvmycPmVcthhOxaKzRXDjG0mXiA0bnSgK3F2o9t4196RYhIiiSvAH
+ shVjZMTK04h8ciTLIqK/GtZr+g==
+ -----END PRIVATE KEY-----
+ elastalert.pem: |
+ -----BEGIN CERTIFICATE-----
+ MIIDVzCCAj+gAwIBAgIJAORgkR7Y0Nk9MA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV
+ BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+ Q29tcGFueSBMdGQwHhcNMjAwNjI4MTM1NjAwWhcNMjEwNjI4MTM1NjAwWjBCMQsw
+ CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh
+ dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ tLkPgdIMt1QeMOQnljsee+ZaklLmL6xltyC6VdYJo9GWQjGVBr7eFL2ghGjTyAKj
+ Whz5mrJUKiW+QjyR/aTDe71FadwwAs5mK0uB3UTh2pp+YI3m17OIauN4Xi2iAdSm
+ /uBfu7BGPPIqysTnr2nXDd+cuZXe5VVBkbI+iWHMHmgHof2Gfx2gU+UX3RhfsdOz
+ Q7rPphak/I9zw67NvibRLxKFrY3McBYJaHPRpB7FmaFdQEB4MgFo5g12bWXldB0E
+ pJe4VhRvb8U54g73a3YV9IPg/rvI1p5UeGWuI3euCm06fQvba0h8jEkSLzeaG5vx
+ /DuTEPrGfjUNFz3SvdjI2wIDAQABo1AwTjAdBgNVHQ4EFgQUFAvjohHTavHmbRbj
+ Yq2h3cq7UMEwHwYDVR0jBBgwFoAUFAvjohHTavHmbRbjYq2h3cq7UMEwDAYDVR0T
+ BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAB9oDASl4OfF/D49i3KtVzjzge4up
+ WssBPYKVwASh3cXfLLe3NdY9ihdCXFd/8Rus0hBGaRPIyR06sZoHRDEfJ2xrRD6g
+ pr4iHRfaoEWqols7+iW0cgQehvw5efEpFL1vg9zK9kOwruS4ZUhDrak6GcO/O8Jh
+ 6lSGmidHSHrQmfqFeTotaezwylV/uHvRZHPvk2JhQfC+vFjn5/iN/0wCeQCwYvOC
+ rePq2ZFdYg/0bS9BYwKsT2w1Z/AU/wIMLmbNB1af+fTBBEQlxb4rAeDb+J9EoSQ5
+ MVP7jm3BVnHQCs6CA4LV4yRQNF2K6GkWem1oUg/H3S2SG8TAUlKpX/1XRw==
+ -----END CERTIFICATE-----
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml
new file mode 100644
index 00000000..af28b6f6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml
@@ -0,0 +1,132 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-rule
+data:
+ rule-node4-vswitch.yaml: |
+ name: vswitch-3-sec
+ type: any
+ index: node4*
+ filter:
+ - range:
+ time_vswitchd:
+ gt: 3 #Greater than
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: threshold
+ label: vswitchd start time > 3 sec
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ time_vswitchd: time_vswitchd
+ num_hits: num_hits
+ num_matches: num_matches
+
+ rule-node1-vswitch.yaml: |
+ name: vswitch-3-sec
+ type: any
+ index: node1*
+ filter:
+ - range:
+ time_vswitchd:
+ gt: 3 #Greater than
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: threshold
+ label: vswitchd start time > 3 sec
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ time_vswitchd: time_vswitchd
+ num_hits: num_hits
+ num_matches: num_matches
+
+ rule-node4-blacklist.yaml: |
+ name: error-finder-node4
+ type: blacklist
+ compare_key: alert
+ index: node4*
+ blacklist:
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "device not found"
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: pattern-match
+ label: failed
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ reason: alert
+ num_hits: num_hits
+ num_matches: num_matches
+ rule-node1-blacklist.yaml: |
+ name: error-finder-node1
+ type: blacklist
+ compare_key: alert
+ index: node1*
+ blacklist:
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "device not found"
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: pattern-match
+ label: failed
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ reason: alert
+ num_hits: num_hits
+ num_matches: num_matches
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml
new file mode 100644
index 00000000..9e32e2b7
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml
@@ -0,0 +1,76 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: elastalert
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ run: elastalert
+ template:
+ metadata:
+ labels:
+ run: elastalert
+ spec:
+ volumes:
+ - name: econfig
+ configMap:
+ name: elastalert-config
+ items:
+ - key: elastalert.yaml
+ path: elastalert.yaml
+ - name: erule
+ configMap:
+ name: elastalert-rule
+ items:
+ - key: rule-node4-vswitch.yaml
+ path: rule-node4-vswitch.yaml
+ - key: rule-node4-blacklist.yaml
+ path: rule-node4-blacklist.yaml
+ - key: rule-node1-blacklist.yaml
+ path: rule-node1-blacklist.yaml
+ - name: ekey
+ configMap:
+ name: elastalert-key
+ items:
+ - key: elastalert.key
+ path: elastalert.key
+ - key: elastalert.pem
+ path: elastalert.pem
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-es-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: elastalert
+ image: adi0509/elastalert:latest
+ env:
+ - name: ES_USERNAME
+ value: "elastic"
+ - name: ES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: logging-es-elastic-user
+ key: elastic
+ command: [ "sh", "-c"]
+ args: ["elastalert-create-index --config /opt/elastalert/elastalert.yaml; python -m elastalert.elastalert --config /opt/elastalert/elastalert.yaml"]
+ volumeMounts:
+ - mountPath: /opt/elastalert/
+ name: econfig
+ - mountPath: /opt/elastalert/rules/
+ name: erule
+ - mountPath: /opt/elastalert/key
+ name: ekey
diff --git a/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml b/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml
new file mode 100644
index 00000000..5b0a8476
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml
@@ -0,0 +1,231 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: elasticsearch.k8s.elastic.co/v1
+kind: Elasticsearch
+metadata:
+ name: logging
+spec:
+ version: 7.8.0
+ http:
+ service:
+ spec:
+ type: NodePort
+ ports:
+ - name: https
+ nodePort: 31111
+ port: 9200
+ protocol: TCP
+ targetPort: 9200
+ auth:
+ fileRealm:
+ - secretName: custom-user
+ nodeSets:
+ - name: vm1-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm1
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm1-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm1
+ - name: vm1-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm1
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm1-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm1
+ - name: vm2-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm2
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm2-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ - name: vm2-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm2
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm2-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ - name: vm3-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm3
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm3-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
+ - name: vm3-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm3
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm3-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
diff --git a/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml b/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml
new file mode 100644
index 00000000..3e71fe92
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+kind: Secret
+apiVersion: v1
+metadata:
+ name: custom-user
+stringData:
+ users: |-
+ elasticsearch:$2a$10$DzOu7/.Vo2FBDYworbUZe.LNL9tCUl18kpVZ6C/mvkKcXRzYrpmJu
+ users_roles: |-
+ kibana_admin:elasticsearch
+ superuser:elasticsearch
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml
new file mode 100644
index 00000000..36ff80d6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml
@@ -0,0 +1,525 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fluentd-config
+data:
+ index_template.json: |
+ {
+ "index_patterns": [
+ "node*"
+ ],
+ "settings": {
+ "index.lifecycle.name": "delete_policy",
+ "number_of_replicas": 1
+ }
+ }
+ fluent.conf: |
+ <source>
+ @type forward
+ port 24224
+ bind 0.0.0.0
+ tag log
+ </source>
+
+ #tag the .dat file
+ <match log>
+ @type rewrite_tag_filter
+ #Trex data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*counts.dat/
+ tag countdat.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*errors.dat/
+ tag errordat.${tag}
+ </rule>
+ #Spirent data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/stc-liveresults.dat.tx/
+ tag stcdattx.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/stc-liveresults.dat.rx/
+ tag stcdatrx.${tag}
+ </rule>
+ #Ixia data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*Statistics.csv/
+ tag ixia.${tag}
+ </rule>
+ #log files
+ <rule>
+ key log_path
+ pattern /vsperf-overall/
+ tag vsperf.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /vswitchd/
+ tag vswitchd.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/userspace/
+ tag userspace.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/sriovdp/
+ tag sriovdp.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/pods/
+ tag pods.${tag}
+ </rule>
+ </match>
+
+ #to find error
+ @include error.conf
+
+ #to parse time-series data
+ @include time-series.conf
+
+ #to calculate time analysis
+ @include time-analysis.conf
+
+ #give tag 'node1' if host is worker and tag 'node4' if host is pod12-node4
+ <match **.log>
+ @type rewrite_tag_filter
+ <rule>
+ key host
+ pattern /pod12-node4/
+ tag node4
+ </rule>
+ <rule>
+ key host
+ pattern /worker/
+ tag node1
+ </rule>
+ </match>
+
+
+ <filter node1>
+ @type elasticsearch_genid
+ hash_id_key _hash1
+ </filter>
+
+ #send the node1 log to node1 index in elasticsearch
+ <match node1>
+ @type copy
+ <store>
+ @type elasticsearch
+ host logging-es-http
+ port 9200
+ scheme https
+ ssl_verify false
+ user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
+ password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
+ logstash_format true
+ logstash_prefix node1
+ logstash_dateformat %Y%m%d
+ flush_interval 1s
+ id_key _hash1
+ remove_keys _hash1
+
+ enable_ilm true
+ application_name ${tag}
+ index_date_pattern ""
+ ilm_policy_id delete_policy
+ template_name delpol-test
+ template_file /fluentd/etc/index_template.json
+ ilm_policy {
+ "policy": {
+ "phases": {
+ "delete": {
+ "min_age": "3m",
+ "actions": {
+ "delete": {}
+ }
+ }
+ }
+ }
+ }
+ </store>
+ <store>
+ @type stdout
+ </store>
+ </match>
+
+ <filter node4>
+ @type elasticsearch_genid
+ hash_id_key _hash4
+ </filter>
+
+ #send the node4 log to node4 index in elasticsearch
+ <match node4>
+ @type copy
+ <store>
+ @type elasticsearch
+ host logging-es-http
+ port 9200
+ scheme https
+ ssl_verify false
+ user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
+ password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
+ logstash_format true
+ logstash_prefix node4
+ logstash_dateformat %Y%m%d
+ flush_interval 1s
+ id_key _hash4
+ remove_keys _hash4
+
+ enable_ilm true
+ application_name ${tag}
+ index_date_pattern ""
+ ilm_policy_id delete_policy
+ template_name delpol-test
+ template_file /fluentd/etc/index_template.json
+ ilm_policy {
+ "policy": {
+ "phases": {
+ "delete": {
+ "min_age": "3m",
+ "actions": {
+ "delete": {}
+ }
+ }
+ }
+ }
+ }
+ </store>
+ <store>
+ @type stdout
+ </store>
+ </match>
+ error.conf: |
+ <filter vsperf.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}).*(?<alert>Failed to run test|Failed to execute in '30' seconds|\('Result', 'Failed'\)|could not open socket: connection refused|Input\/output error)/
+ </parse>
+ </filter>
+
+ <filter vswitchd.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z).*(?<alert>dpdk\|ERR\|EAL: Error - exiting with code: 1|Failed to execute in '30' seconds|dpdk\|ERR\|EAL: Driver cannot attach the device|dpdk\|EMER\|Cannot create lock on)/
+ </parse>
+ </filter>
+ <filter vswitchd.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z).*dpdk\|ERR\|VHOST_CONFIG:.*(?<alert>device not found)/
+ </parse>
+ </filter>
+ time-series.conf: |
+ #parse *counts.dat
+ <filter countdat.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<rx_port>\d*),(?<tx_port>\d*),(?<rx_pkts>[\.\d]*),(?<tx_pkts>[\.\d]*),(?<rx_pps>[\.\d]*),(?<tx_pps>[\.\d]*),(?<rx_bps_num>[\.\d]*),(?<rx_bps_den>[\.\d]*),(?<tx_bps_num>[\.\d]*),(?<tx_bps_den>[\.\d]*)$/
+ types rx_port:integer,tx_port:integer,rx_pkts:float,tx_pkts:float,rx_pps:float,tx_pps:float,rx_bps_num:float,rx_bps_den:float,tx_bps_num:float,tx_bps_den:float
+ </parse>
+ </filter>
+
+ #parse *errors.dat
+ <filter errordat.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<dropped>[\.\d]*),(?<ooo>[\.\d]*),(?<dup>[\.\d]*),(?<seq_too_high>[\.\d]*),(?<seq_too_low>[\.\d]*)$/
+ types ts:integer,dropped:integer,ooo:integer,dup:integer,seq_too_high:integer,seq_too_low:integer
+ </parse>
+ </filter>
+
+ #parse stc-liveresults.dat.tx
+ <filter stcdattx.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<StrId>[\.\d]*),(?<BlkId>[\.\d]*),(?<FrCnt>[\.\d]*),(?<FrRate>[\.\d]*),(?<ERxFrCnt>[\.\d]*),(?<OctCnt>[\.\d]*),(?<OctRate>[\.\d]*),(?<bitCnt>[\.\d]*),(?<bitRate>[\.\d]*)$/
+ types ts:integer,StrId:integer,BlkId:integer,FrCnt:integer,FrRate:integer,ERxFrCnt:integer,OctCnt:integer,OctRate:integer,bitCnt:integer,bitRate:integer
+ </parse>
+ </filter>
+
+ #parse stc-liveresults.dat.rx
+ <filter stcdatrx.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(.*, |)(?<RxPrt>.*),(?<DrpFrCnt>[\.\d]*),(?<SeqRnLen>[\.\d]*),(?<AvgLat>.*),(?<DrpFrRate>[\.\d]*),(?<FrCnt>[\.\d]*),(?<FrRate>[\.\d]*),(?<MaxLat>[\.\d]*),(?<MinLat>[\.\d]*),(?<OctCnt>[\.\d]*),(?<OctRate>[\.\d]*)$/
+ types ts:integer,DrpFrCnt:integer,SeqRnLen:integer,FrCnt:integer,FrRate:integer,MaxLat:integer,MinLat:integer,OctCnt:integer,OctRate:integer
+ </parse>
+ </filter>
+ time-analysis.conf: |
+ # 1. Test Duration - Duration Between: first line and last line.
+ # 2. Setup Duration - Duration Between: Creating result directory TO Class found ---
+ # 3. Traffic Duration - Duration between From Starting traffic at 0.1 Gbps speed TO Traffic Results
+ # 4. Iteration Durations -- Example: Duration between - Starting traffic at 10.0 Gbps TO Starting traffic at 5.0 Gbps speed
+ # 5. Reporting Duration - Duration between From Traffic Results TO Write results to file
+ # 6. Vswitchd start Duration- Duration between From Starting vswitchd... TO send_traffic with
+
+ <match vsperf.log>
+ @type rewrite_tag_filter
+ <rule>
+ key msg
+ pattern /Creating result directory:/
+ tag firstline.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Write results to file/
+ tag lastline.${tag}
+ </rule>
+
+ <rule>
+ key msg
+ pattern /Class found/
+ tag setupend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 0.1 Gbps speed/
+ tag trafficstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Traffic Results/
+ tag trafficend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 10.0 Gbps/
+ tag iterationstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 5.0 Gbps speed/
+ tag iterationend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting vswitchd/
+ tag vswitchstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /send_traffic/
+ tag vswitch.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern ^.*$
+ tag logs.${tag}
+ </rule>
+ </match>
+
+ #############################################################################################
+ #save the starting log and append that log in ending log
+ #############################################################################################
+ <filter firstline.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${$vswitch_start="";$reportstart="";$firstline="";$traffic_start="";$iteration_start="";$firstline = record["msg"];return record["msg"];}
+ </record>
+ </filter>
+ <filter lastline.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" | "+$firstline + " | "+ $reportstart}
+ </record>
+ </filter>
+
+ <filter setupend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$firstline}
+ </record>
+ </filter>
+
+ <filter trafficstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${if $traffic_start.eql?("");$traffic_start=record["msg"];end;return record["msg"];}
+ </record>
+ </filter>
+ <filter trafficend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${if $reportstart.eql?("");$reportstart=record["msg"];end;return record["msg"]+" "+$traffic_start;}
+ </record>
+ </filter>
+
+ <filter iterationstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${if $iteration_start.eql?("");$iteration_start=record["msg"];end;return record["msg"];}
+ </record>
+ </filter>
+ <filter iterationend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$iteration_start}
+ </record>
+ </filter>
+
+ <filter vswitchstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${$vswitch_start=record["msg"];return record["msg"];}
+ </record>
+ </filter>
+ <filter vswitch.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$vswitch_start}
+ </record>
+ </filter>
+ #############################################################################################
+ #parse time from the log
+ #############################################################################################
+ <filter setupend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<setupend>.*) : Class found: Trex. (?<setupstart>.*) : .*$/
+ </parse>
+ </filter>
+ <filter iterationend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<iterationend>.*) : Starting traffic at 5.0 Gbps speed (?<iterationstart>.*) : Starting traffic at 10.0 Gbps speed$/
+ </parse>
+ </filter>
+ <filter vswitch.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<vswitch>.*) : send_traffic with <.*> (?<vswitchstart>.*) : Starting vswitchd...$/
+ </parse>
+ </filter>
+ <filter trafficend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<trafficend>.*) : Traffic Results: (?<trafficstart>.*) : Starting traffic at 0.1 Gbps speed/
+ </parse>
+ </filter>
+ <filter lastline.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<lastline>.*) : Write results to file: .* \| (?<firstline>.*) : Creating result directory: .* \| (?<reportstart>.*) : Traffic Results:$/
+ </parse>
+ </filter>
+ #############################################################################################
+ #calculate time
+ #############################################################################################
+ <filter setupend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ setup_duration ${ require 'time';Time.parse(record["setupend"])-Time.parse(record["setupstart"]); }
+ </record>
+ </filter>
+ <filter iterationend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ iteration_duration ${ require 'time';Time.parse(record["iterationend"])-Time.parse(record["iterationstart"]); }
+ </record>
+ </filter>
+ <filter vswitch.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ vswitch_duration ${ require 'time';Time.parse(record["vswitch"])-Time.parse(record["vswitchstart"]); }
+ </record>
+ </filter>
+ <filter trafficend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ traffic_duration ${ require 'time';Time.parse(record["trafficend"])-Time.parse(record["trafficstart"]); }
+ </record>
+ </filter>
+ <filter lastline.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ test_duration ${ require 'time';Time.parse(record["lastline"])-Time.parse(record["firstline"]); }
+ </record>
+ <record>
+ report_duration ${ require 'time';Time.parse(record["lastline"])-Time.parse(record["reportstart"]); }
+ </record>
+ </filter>
+ #############################################################################################
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml
new file mode 100644
index 00000000..9a43b82f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml
@@ -0,0 +1,34 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Service
+metadata:
+ name: fluentd
+ labels:
+ run: fluentd
+spec:
+ type: NodePort
+ ports:
+ - name: tcp
+ port: 32224
+ targetPort: 24224
+ protocol: TCP
+ nodePort: 32224
+ - name: udp
+ port: 32224
+ targetPort: 24224
+ protocol: UDP
+ nodePort: 32224
+ selector:
+ run: fluentd
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml
new file mode 100644
index 00000000..3830f682
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml
@@ -0,0 +1,65 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fluentd
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ run: fluentd
+ template:
+ metadata:
+ labels:
+ run: fluentd
+ spec:
+ volumes:
+ - name: fconfig
+ configMap:
+ name: fluentd-config
+ items:
+ - key: fluent.conf
+ path: fluent.conf
+ - key: error.conf
+ path: error.conf
+ - key: time-series.conf
+ path: time-series.conf
+ - key: time-analysis.conf
+ path: time-analysis.conf
+ - key: index_template.json
+ path: index_template.json
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-es-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: fluentd
+ image: adi0509/fluentd:latest
+ env:
+ - name: FLUENT_ELASTICSEARCH_USER
+ value: "elastic"
+ - name: FLUENT_ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: logging-es-elastic-user
+ key: elastic
+ ports:
+ - containerPort: 24224
+ protocol: TCP
+ - containerPort: 24224
+ protocol: UDP
+ volumeMounts:
+ - name: fconfig
+ mountPath: /fluentd/etc/
diff --git a/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml b/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml
new file mode 100644
index 00000000..5ec6937e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: kibana.k8s.elastic.co/v1
+kind: Kibana
+metadata:
+ name: logging
+spec:
+ version: 7.8.0
+ count: 1
+ elasticsearchRef:
+ name: logging
+ namespace: logging
diff --git a/tools/lma/ansible-server/roles/logging/files/namespace.yaml b/tools/lma/ansible-server/roles/logging/files/namespace.yaml
new file mode 100644
index 00000000..6964af5c
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/namespace.yaml
@@ -0,0 +1,17 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: logging
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml
new file mode 100644
index 00000000..f5a11e80
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nginx-config
+data:
+ default.conf: |
+ server {
+ listen 80;
+ ssl on;
+ ssl_certificate /etc/ssl/certs/kibana-access.pem;
+ ssl_certificate_key /etc/ssl/private/kibana-access.key;
+
+ location / {
+ proxy_pass https://logging-kb-http:5601;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 300s;
+ proxy_connect_timeout 75s;
+ }
+ }
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml
new file mode 100644
index 00000000..93d7d6ec
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nginx-key
+data:
+ kibana-access.key: |
+ -----BEGIN PRIVATE KEY-----
+ MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDI92SBrcEdzxqS
+ rt883bVcj4F4RuKtm+AjjOEhbFUd3XOp5Wa5NzyYQSTP9ZJHG0dYiTAKOJBVcgbc
+ YRMNUAzHAIskf1q2/SvwyLNRMJLkBD5HHjbnEcuKQm/+nPdzkfvo2zfRNTDPKS83
+ HqFQ779hT8ZLkSzoPuR0QD17ZNWYVMZv/r9wqnjX8U/k5AjrJOIxuaO9nGAgv2Pu
+ Qm6wuU8UBEaMRgPVHQ3ztflQr9QPr/S6HU0cl4Gu+Nwid6iC1RVYxANNq7E7wRvq
+ GMKRS5cA9Nlnu/b7IEI4LSx5yeTSDzwmZKTNnUWi2cpqk30M4G4cUokoz9bP+62I
+ YWEh3B8HAgMBAAECggEBAI1luzqepTSzBhBUp88sczGX6tFUlqLt/Ism0TPyBAVK
+ TdopBNima6T4mM0VDIGpSM6bX8ihObRU0Uz3pC8GtqbB1CSu0oXTpbn5jGlAkumJ
+ rsPdF2YHGD3ENwZfLKANA8A3lZNGKHxpjsXqcDgBJ5dxSKTclUsnDRhaJqgOL1bI
+ d9QCXdA1vbpxHDJWSo73E7omv3AyHi3HxMWU4gzyerUFSMFGqm0W5dPeeresNE3a
+ bv9/46YdykufuRuJZqsUDLCgUUcJPhbE5iOrB4iv8oaDqT0onxwzRQTSgidPxbp2
+ EmjVHpFCACltOKSqELM4+PQFCk8xUBya8HWD5UHrVDkCgYEA4y3WwmhtLUT/g3G3
+ cowvmxjgPl6xqkqTA7Xcdc3sk+6/jS1kayT5TL1qfpd1QL/K617jva9mfSMZ8ei9
+ Y7M/2QkSb0uHKulGR0+if+7sT0L8OYO/OE7c+HTZmZK4hD1CCJN2M34D9Qo2fzQ6
+ 4v+AO1wGiAtiNev0YIBKYNSco+sCgYEA4nY8m93XuC19z991sFRvE0UBeKcN2esg
+ TwY9UuYHJ56s+6UozkUgZArwYFW8LWFeIjkrrKELBNDsmJtTZ006TyUWxY/ccdjV
+ fJZTLV3niv6IQzy74aOmXV2vtNjxyBlllT9mvig6T0t0TvAtolsuSVHBL09zxcy4
+ wN4pGIfqllUCgYBYLq/hMKXIX7MK87YwqYfFHWfV7e3q2x2r4AjeVXuShKcoBsmm
+ 6Wg3yIKw9tuVsZzzthaSx6XxxxFIHH5/V9Hdzi6wstGZ74jPH3NFU5m4vpinPqOY
+ GMyfSMQ6X4BuHFUofQzxueWRVVCIGd8Nw/2jjPogDsMliRyH5OR6J61R1wKBgEa6
+ 8SEpf7fJlZL4UzS4mlylX9lEK+JVOqkT5NFggPmR6KtMIVuTYZN9iyg7fuOZlqIP
+ wyFOxzdA3bSoRrtr9ntDtUINNaflNoCMHvx7aNcTupFthazqxQpCOZ+9Zn691+lu
+ fPOFcvjTM0d4YnhkDCfgPfs90IYF8+phOOqtgMplAoGBAI+mcaUH7ADYxlONCi1E
+ gNHRvHJRBdQBaydKUfPxbe3vS5QJb8Gb5RU46vDl3w+YHUVwUi+Hj68zuKExXxhD
+ 9CGTAQIejtHWScZ1Djl3bcvNa/czHyuNVsGwvJ3fy1JzpxRmUUMPSdJ90A1n57Tk
+ LFEmZhwaj7YF869wfKngQ57d
+ -----END PRIVATE KEY-----
+ kibana-access.pem: |
+ -----BEGIN CERTIFICATE-----
+ MIIDVzCCAj+gAwIBAgIJAIQzf1mxHsvgMA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV
+ BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+ Q29tcGFueSBMdGQwHhcNMjAwNjI1MTY1NzQ3WhcNMjEwNjI1MTY1NzQ3WjBCMQsw
+ CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh
+ dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ yPdkga3BHc8akq7fPN21XI+BeEbirZvgI4zhIWxVHd1zqeVmuTc8mEEkz/WSRxtH
+ WIkwCjiQVXIG3GETDVAMxwCLJH9atv0r8MizUTCS5AQ+Rx425xHLikJv/pz3c5H7
+ 6Ns30TUwzykvNx6hUO+/YU/GS5Es6D7kdEA9e2TVmFTGb/6/cKp41/FP5OQI6yTi
+ MbmjvZxgIL9j7kJusLlPFARGjEYD1R0N87X5UK/UD6/0uh1NHJeBrvjcIneogtUV
+ WMQDTauxO8Eb6hjCkUuXAPTZZ7v2+yBCOC0secnk0g88JmSkzZ1FotnKapN9DOBu
+ HFKJKM/Wz/utiGFhIdwfBwIDAQABo1AwTjAdBgNVHQ4EFgQUrz/R+M2XkTTfjrau
+ VVBW6+pdatgwHwYDVR0jBBgwFoAUrz/R+M2XkTTfjrauVVBW6+pdatgwDAYDVR0T
+ BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAyIhJLwg9oTil0Rb1zbYQb0Mr0UYz
+ rlS4f8QkxygkGLAZ8q9VkR+NpKfqhYDSHofGg5Yg5/p54NRJh5M4ASuM7N9AK0LH
+ KbCvS+YRNWhmo+7H7zjDNkV8FbzG41nkt9jQjaKFF7GdKr4HkWvupMX6PwsAZ0jI
+ b2Y6QzFQP9wF0QoBHrK42u3eWbfYv2IIDd6xsV90ilKRDtKkCiI4dyKGK46YDyZB
+ 3eqJ08Pm67HDbxQLydRXkNJvd33PASRgE/VOh44n3xWG+Gu4IMz7EO/4monyuv1Q
+ V2v1A9NV+ZnAq4PT7WJY7fWYavDUr+kwxMAGNQkG/Cg3X4FYrRwrq6gk7Q==
+ -----END CERTIFICATE-----
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml
new file mode 100644
index 00000000..8aea53dd
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ run: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 8000
+ targetPort: 80
+ protocol: TCP
+ nodePort: 32000
+ selector:
+ run: nginx
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml
new file mode 100644
index 00000000..fdf5c835
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml
@@ -0,0 +1,58 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ run: nginx
+ template:
+ metadata:
+ labels:
+ run: nginx
+ spec:
+ volumes:
+ - name: nconfig
+ configMap:
+ name: nginx-config
+ items:
+ - key: default.conf
+ path: default.conf
+ - name: nkey
+ configMap:
+ name: nginx-key
+ items:
+ - key: kibana-access.key
+ path: kibana-access.key
+ - key: kibana-access.pem
+ path: kibana-access.pem
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-kb-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: nginx
+ image: nginx
+ volumeMounts:
+ - mountPath: /etc/nginx/conf.d/
+ name: nconfig
+ - mountPath: /etc/ssl/certs/
+ name: nkey
+ - mountPath: /etc/ssl/private/
+ name: nkey
+ ports:
+ - containerPort: 80
diff --git a/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml b/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml
new file mode 100644
index 00000000..c1a96077
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml
@@ -0,0 +1,105 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm1
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm1-master
+ nfs:
+ server: 10.10.120.211
+ path: "/srv/nfs/master"
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm1
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm1-data
+ nfs:
+ server: 10.10.120.211
+ path: "/srv/nfs/data"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm2
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm2-master
+ nfs:
+ server: 10.10.120.203
+ path: "/srv/nfs/master"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm2
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm2-data
+ nfs:
+ server: 10.10.120.203
+ path: "/srv/nfs/data"
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm3
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm3-master
+ nfs:
+ server: 10.10.120.204
+ path: "/srv/nfs/master"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm3
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm3-data
+ nfs:
+ server: 10.10.120.204
+ path: "/srv/nfs/data"
diff --git a/tools/lma/ansible-server/roles/logging/files/storageClass.yaml b/tools/lma/ansible-server/roles/logging/files/storageClass.yaml
new file mode 100644
index 00000000..a2f1e3aa
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/storageClass.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#storage class for VM1 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm1-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM1 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm1-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM2 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm2-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM2 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm2-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM3 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm3-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM3 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm3-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
diff --git a/tools/lma/ansible-server/roles/logging/tasks/main.yml b/tools/lma/ansible-server/roles/logging/tasks/main.yml
new file mode 100644
index 00000000..dcbf4d4d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/tasks/main.yml
@@ -0,0 +1,165 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#EFK setup in k8s cluster
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#Creating Namespace
+#***********************************************************************************************************
+- name: Creating Namespace
+ k8s:
+ state: present
+ src: /tmp/files/namespace.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#creating Storage Class
+#***********************************************************************************************************
+- name: creating Storage Class
+ k8s:
+ state: present
+ src: /tmp/files/storageClass.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume
+ k8s:
+ state: present
+ src: /tmp/files/persistentVolume.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#add user
+#***********************************************************************************************************
+- name: add user
+ k8s:
+ state: present
+ src: /tmp/files/elasticsearch/user-secret.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting Elasticsearch operator
+#***********************************************************************************************************
+- name: Starting Elasticsearch operator
+ shell: kubectl apply -f https://download.elastic.co/downloads/eck/1.2.0/all-in-one.yaml
+ ignore_errors: yes
+
+#***********************************************************************************************************
+#Starting Elasticsearch
+#***********************************************************************************************************
+- name: Starting Elasticsearch
+ k8s:
+ state: present
+ src: /tmp/files/elasticsearch/elasticsearch.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting Kibana
+#***********************************************************************************************************
+- name: Starting Kibana
+ k8s:
+ state: present
+ src: /tmp/files/kibana/kibana.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting nginx
+#***********************************************************************************************************
+- name: creating nginx configmap
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-conf-cm.yaml
+ namespace: logging
+
+- name: creating nginx key configmap
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-key-cm.yaml
+ namespace: logging
+
+- name: creating nginx pod
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx.yaml
+ namespace: logging
+
+- name: creating nginx service
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-service.yaml
+ namespace: logging
+#***********************************************************************************************************
+#Starting fluentd
+#***********************************************************************************************************
+- name: creating fluentd configmap
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent-cm.yaml
+ namespace: logging
+
+- name: creating fluentd pod
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent.yaml
+ namespace: logging
+
+- name: creating fluentd service
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent-service.yaml
+ namespace: logging
+#***********************************************************************************************************
+#Starting elastalert
+#***********************************************************************************************************
+- name: creating elastalert config configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-conf-cm.yaml
+ namespace: logging
+
+- name: creating elastalert key configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-key-cm.yaml
+ namespace: logging
+
+- name: creating elastalert rule configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-rule-cm.yaml
+ namespace: logging
+
+- name: creating elastalert pod
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/elastalert.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml
new file mode 100644
index 00000000..7b9abc47
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml
@@ -0,0 +1,37 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: alertmanager-config
+ namespace: monitoring
+data:
+ config.yml: |-
+ global:
+ route:
+ receiver: "webhook"
+ group_by: ['alertname', 'priority']
+ group_wait: 1s
+ group_interval: 5s
+ repeat_interval: 5s
+ routes:
+ - match:
+ severity: critical
+
+ receivers:
+ - name: "webhook"
+ webhook_configs:
+ - url: 'http://10.10.120.20/alertmanager'
+ send_resolved: true
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml
new file mode 100644
index 00000000..f1c3d78e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml
@@ -0,0 +1,62 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ name: alertmanager
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: alertmanager
+ labels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ spec:
+ containers:
+ - name: alertmanager
+ image: prom/alertmanager
+ args:
+ - --config.file=/etc/alertmanager/config.yml
+ - --storage.path=/alertmanager
+ - --cluster.peer=alertmanager1:6783
+ - --cluster.listen-address=0.0.0.0:6783
+ ports:
+ - containerPort: 9093
+ - containerPort: 6783
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/alertmanager
+ - name: alertmanager
+ mountPath: /alertmanager
+ restartPolicy: Always
+ volumes:
+ - name: config-volume
+ configMap:
+ name: alertmanager-config
+ - name: alertmanager
+ emptyDir: {}
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml
new file mode 100644
index 00000000..c67517d3
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml
@@ -0,0 +1,41 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: alertmanager
+ app: alertmanager
+ name: alertmanager
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: /
+ prometheus.io/port: '8080'
+
+spec:
+ selector:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ type: NodePort
+ ports:
+ - name: "9093"
+ port: 9093
+ targetPort: 9093
+ nodePort: 30930
+ - name: "6783"
+ port: 6783
+ targetPort: 6783
+ nodePort: 30679
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml
new file mode 100644
index 00000000..18b76456
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml
@@ -0,0 +1,62 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ name: alertmanager1
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: alertmanager1
+ labels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ spec:
+ containers:
+ - name: alertmanager1
+ image: prom/alertmanager
+ args:
+ - --config.file=/etc/alertmanager/config.yml
+ - --storage.path=/alertmanager
+ - --cluster.peer=alertmanager:6783
+ - --cluster.listen-address=0.0.0.0:6783
+ ports:
+ - containerPort: 9093
+ - containerPort: 6783
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/alertmanager
+ - name: alertmanager
+ mountPath: /alertmanager
+ restartPolicy: Always
+ volumes:
+ - name: config-volume
+ configMap:
+ name: alertmanager-config
+ - name: alertmanager
+ emptyDir: {}
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml
new file mode 100644
index 00000000..66d0d2b1
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml
@@ -0,0 +1,42 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: alertmanager1
+ app: alertmanager1
+ name: alertmanager1
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: /
+ prometheus.io/port: '8080'
+
+spec:
+ selector:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ type: NodePort
+ ports:
+ - name: "9093"
+ port: 9093
+ targetPort: 9093
+ nodePort: 30931
+ - name: "6783"
+ port: 6783
+ targetPort: 6783
+ nodePort: 30678
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml
new file mode 100644
index 00000000..6a62985e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml
@@ -0,0 +1,79 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: cadvisor
+ namespace: monitoring
+ labels:
+ adi10hero.monitoring: cadvisor
+ app: cadvisor
+spec:
+ selector:
+ matchLabels:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
+ template:
+ metadata:
+ name: cadvisor
+ labels:
+ adi10hero.monitoring: cadvisor
+ app: cadvisor
+ spec:
+ containers:
+ - image: gcr.io/google-containers/cadvisor
+ name: cadvisor
+ ports:
+ - containerPort: 8080
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /rootfs
+ name: cadvisor-hostpath0
+ readOnly: true
+ - mountPath: /var/run
+ name: cadvisor-hostpath1
+ - mountPath: /sys
+ name: cadvisor-hostpath2
+ readOnly: true
+ - mountPath: /sys/fs/cgroup
+ name: cadvisor-hostpath3
+ readOnly: true
+ - mountPath: /dev/disk
+ name: cadvisor-hostpath4
+ readOnly: true
+ - mountPath: /var/lib/docker
+ name: cadvisor-hostpath5
+ readOnly: true
+ restartPolicy: Always
+ volumes:
+ - hostPath:
+ path: /
+ name: cadvisor-hostpath0
+ - hostPath:
+ path: /var/run
+ name: cadvisor-hostpath1
+ - hostPath:
+ path: /sys
+ name: cadvisor-hostpath2
+ - hostPath:
+ path: /cgroup
+ name: cadvisor-hostpath3
+ - hostPath:
+ path: /dev/disk/
+ name: cadvisor-hostpath4
+ - hostPath:
+ path: /var/lib/docker/
+ name: cadvisor-hostpath5
diff --git a/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml
new file mode 100644
index 00000000..734240b8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml
@@ -0,0 +1,30 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
+ name: cadvisor
+ namespace: monitoring
+spec:
+ ports:
+ - name: "8080"
+ port: 8080
+ targetPort: 8080
+ selector:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
diff --git a/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
new file mode 100644
index 00000000..b6bfe0b6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
@@ -0,0 +1,51 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: collectd-exporter
+ namespace: monitoring
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: collectd-exporter
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ spec:
+ containers:
+ - args:
+ - --collectd.listen-address=0.0.0.0:25826
+ image: prom/collectd-exporter
+ name: collectd-exporter
+ ports:
+ - containerPort: 9103
+ - containerPort: 25826
+ protocol: UDP
+ securityContext:
+ runAsUser: 0
+ restartPolicy: Always
+ volumes: null
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml
new file mode 100644
index 00000000..5609d04a
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: collectd-exporter
+ namespace: monitoring
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+spec:
+ ports:
+ - name: "9103"
+ port: 9103
+ nodePort: 30103
+ - name: "25826"
+ port: 25826
+ protocol: UDP
+ nodePort: 30826
+ selector:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ type: NodePort
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml
new file mode 100644
index 00000000..e2b8c9fa
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: grafana-datasources
+ namespace: monitoring
+data:
+ prometheus.yaml: |-
+ {
+ "apiVersion": 1,
+ "datasources": [
+ {
+ "access":"proxy",
+ "editable": true,
+ "name": "prometheus",
+ "orgId": 1,
+ "type": "prometheus",
+ "url": "http://prometheus-main:9090",
+ "version": 1
+ }
+ ]
+ }
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml
new file mode 100644
index 00000000..afb00948
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ name: grafana
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: grafana
+ labels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ spec:
+ containers:
+ - name: grafana
+ image: grafana/grafana
+ ports:
+ - containerPort: 3000
+ env:
+ - name: GF_SECURITY_ADMIN_PASSWORD
+ value: admin
+ - name: GF_SECURITY_ADMIN_USER
+ value: admin
+ - name: GF_SERVER_DOMAIN
+ value: 10.10.120.20
+ - name: GF_SERVER_ROOT_URL
+ value: "%(protocol)s://%(domain)s:/metrics"
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/grafana
+ name: grafana-storage
+ - mountPath: /etc/grafana/provisioning/datasources
+ name: grafana-datasources
+ readOnly: false
+ restartPolicy: Always
+ volumes:
+ - name: grafana-storage
+ persistentVolumeClaim:
+ claimName: grafana-pvc
+ - name: grafana-datasources
+ configMap:
+ defaultMode: 420
+ name: grafana-datasources
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml
new file mode 100644
index 00000000..06bcc31b
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml
@@ -0,0 +1,31 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: grafana-pv
+ namespace: monitoring
+ labels:
+ app: grafana-pv
+ adi10hero.monitoring: grafana-pv
+spec:
+ storageClassName: monitoring
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteMany
+ nfs:
+ server: 10.10.120.211
+ path: "/usr/share/monitoring_data/grafana"
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml
new file mode 100644
index 00000000..2c2955c8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: grafana-pvc
+ namespace: monitoring
+ labels:
+ app: grafana-pvc
+ adi10hero.monitoring: grafana-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: monitoring
+ resources:
+ requests:
+ storage: 4Gi
+ selector:
+ matchLabels:
+ app: grafana-pv
+ adi10hero.monitoring: grafana-pv
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml
new file mode 100644
index 00000000..d1c9c9cc
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: grafana
+ namespace: monitoring
+ labels:
+ app: grafana
+ adi10hero.monitoring: grafana
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '3000'
+spec:
+ selector:
+ app: grafana
+ adi10hero.monitoring: grafana
+ type: NodePort
+ ports:
+ - name: "3000"
+ port: 3000
+ targetPort: 3000
+ nodePort: 30000
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
new file mode 100644
index 00000000..af3c5469
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kube-state-metrics
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ app: kube-state-metrics
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: kube-state-metrics
+ spec:
+ #serviceAccountName: prometheus
+ containers:
+ - name: kube-state-metrics
+ image: quay.io/coreos/kube-state-metrics:v1.2.0
+ ports:
+ - containerPort: 8080
+ name: monitoring
diff --git a/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
new file mode 100644
index 00000000..8d294391
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
@@ -0,0 +1,26 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: Service
+apiVersion: v1
+metadata:
+ name: kube-state-metrics
+ namespace: kube-system
+spec:
+ selector:
+ app: kube-state-metrics
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
diff --git a/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml b/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml
new file mode 100644
index 00000000..f1c9b889
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml
@@ -0,0 +1,18 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: monitoring
diff --git a/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml
new file mode 100644
index 00000000..9334b2f4
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml
@@ -0,0 +1,80 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: node-exporter-daemonset
+ namespace: monitoring
+ labels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+spec:
+ selector:
+ matchLabels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+ template:
+ metadata:
+ labels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9100"
+ spec:
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+ containers:
+ - ports:
+ - containerPort: 9100
+ protocol: TCP
+ resources:
+ requests:
+ cpu: 0.15
+ securityContext:
+ runAsUser: 0
+ privileged: true
+ image: prom/node-exporter:v0.15.2
+ args:
+ - --path.procfs
+ - /host/proc
+ - --path.sysfs
+ - /host/sys
+ - --collector.filesystem.ignored-mount-points
+ - '"^/(sys|proc|dev|host|etc)($|/)"'
+ name: node-exporter
+ volumeMounts:
+ - name: dev
+ mountPath: /host/dev
+ - name: proc
+ mountPath: /host/proc
+ - name: sys
+ mountPath: /host/sys
+ - name: rootfs
+ mountPath: /rootfs
+ volumes:
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sys
+ hostPath:
+ path: /sys
+ - name: rootfs
+ hostPath:
+ path: /
diff --git a/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml
new file mode 100644
index 00000000..dd0aea4d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: node-exporter
+ app: node-exporter
+ name: node-exporter
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9100"
+spec:
+ ports:
+ - name: "node-exporter"
+ port: 9100
+ targetPort: 9100
+ selector:
+ adi10hero.monitoring: node-exporter
+ app: node-exporter
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml
new file mode 100644
index 00000000..58b220a8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus-main
+ app: prometheus-main
+ name: prometheus-main
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus-main
+ protocol: TCP
+ port: 9090
+ nodePort: 30902
+ selector:
+ adi10hero.monitoring: prometheus1
+ app: prometheus
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml
new file mode 100644
index 00000000..917f978f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml
@@ -0,0 +1,609 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: prometheus-config
+ namespace: monitoring
+data:
+ alert.rules: |-
+ groups:
+ - name: targets
+ rules:
+ - alert: MonitorServiceDown
+ expr: up == 0
+ for: 30s
+ labels:
+ severity: critical
+ annotations:
+ summary: "Monitor service non-operational"
+ description: "Service {{ $labels.instance }} is down."
+ - alert: HighCpuLoad
+ expr: node_load1 > 1.9
+ for: 15s
+ labels:
+ severity: critical
+ annotations:
+ summary: "Service under high load"
+ description: "Docker host is under high load, the avg load 1m is at {{ $value}}. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - name: host and hardware
+ rules:
+ - alert: HostHighCpuLoad
+ expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host high CPU load (instance {{ $labels.instance }})"
+ description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostSwapIsFillingUp
+ expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host swap is filling up (instance {{ $labels.instance }})"
+ description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HighMemoryLoad
+ expr: (sum(node_memory_MemTotal_bytes) - sum(node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes) ) / sum(node_memory_MemTotal_bytes) * 100 > 85
+ for: 30s
+ labels:
+ severity: warning
+ annotations:
+ summary: "Server memory is almost full"
+ description: "Docker host memory usage is {{ humanize $value}}%. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - alert: HighStorageLoad
+ expr: (node_filesystem_size_bytes{fstype="aufs"} - node_filesystem_free_bytes{fstype="aufs"}) / node_filesystem_size_bytes{fstype="aufs"} * 100 > 85
+ for: 30s
+ labels:
+ severity: warning
+ annotations:
+ summary: "Server storage is almost full"
+ description: "Docker host storage usage is {{ humanize $value}}%. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - alert: HostNetworkTransmitErrors
+ expr: increase(node_network_transmit_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Transmit Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOutOfMemory
+ expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host out of memory (instance {{ $labels.instance }})"
+ description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostMemoryUnderMemoryPressure
+ expr: rate(node_vmstat_pgmajfault[1m]) > 1000
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host memory under memory pressure (instance {{ $labels.instance }})"
+ description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputIn
+ expr: sum by (instance) (irate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual network throughput in (instance {{ $labels.instance }})"
+ description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputOut
+ expr: sum by (instance) (irate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual network throughput out (instance {{ $labels.instance }})"
+ description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualDiskRateRead
+ expr: sum by (instance) (irate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual disk read rate (instance {{ $labels.instance }})"
+ description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualDiskRateWrite
+ expr: sum by (instance) (irate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual disk write rate (instance {{ $labels.instance }})"
+ description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOutOfDiskSpace
+ expr: (node_filesystem_avail_bytes{mountpoint="/rootfs"} * 100) / node_filesystem_size_bytes{mountpoint="/rootfs"} < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host out of disk space (instance {{ $labels.instance }})"
+ description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostDiskWillFillIn4Hours
+ expr: predict_linear(node_filesystem_free_bytes{fstype!~"tmpfs"}[1h], 4 * 3600) < 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host disk will fill in 4 hours (instance {{ $labels.instance }})"
+ description: "Disk will fill in 4 hours at current write rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostPhysicalComponentTooHot
+ expr: node_hwmon_temp_celsius > 75
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host physical component too hot (instance {{ $labels.instance }})"
+ description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNodeOvertemperatureAlarm
+ expr: node_hwmon_temp_alarm == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Host node overtemperature alarm (instance {{ $labels.instance }})"
+ description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostKernelVersionDeviations
+ expr: count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host kernel version deviations (instance {{ $labels.instance }})"
+ description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOomKillDetected
+ expr: increase(node_vmstat_oom_kill[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host OOM kill detected (instance {{ $labels.instance }})"
+ description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostEdacCorrectableErrorsDetected
+ expr: increase(node_edac_correctable_errors_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: info
+ annotations:
+ summary: "Host EDAC Correctable Errors detected (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostEdacUncorrectableErrorsDetected
+ expr: node_edac_uncorrectable_errors_total > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNetworkReceiveErrors
+ expr: increase(node_network_receive_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Receive Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNetworkTransmitErrors
+ expr: increase(node_network_transmit_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Transmit Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - name: container
+ rules:
+ - alert: ContainerKilled
+ expr: time() - container_last_seen > 60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container killed (instance {{ $labels.instance }})"
+ description: "A container has disappeared\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerCpuUsage
+ expr: sum by(instance, name) (rate(container_cpu_usage_seconds_total[3m]) * 100 > 80)
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container CPU usage (instance {{ $labels.instance }})"
+ description: "Container CPU usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerMemoryUsage
+ expr: (sum(container_memory_usage_bytes) BY (instance, name) / sum(container_spec_memory_limit_bytes > 0) BY (instance, name) * 100) > 125
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Memory usage (instance {{ $labels.instance }})"
+ description: "Container Memory usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerVolumeUsage
+ expr: (1 - (sum(container_fs_inodes_free) BY (instance) / sum(container_fs_inodes_total) BY (instance)) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Volume usage (instance {{ $labels.instance }})"
+ description: "Container Volume usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerVolumeIoUsage
+ expr: (sum(container_fs_io_current) BY (instance, name) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Volume IO usage (instance {{ $labels.instance }})"
+ description: "Container Volume IO usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerHighThrottleRate
+ expr: rate(container_cpu_cfs_throttled_seconds_total[3m]) > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container high throttle rate (instance {{ $labels.instance }})"
+ description: "Container is being throttled\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - name: kubernetes
+ rules:
+ - alert: KubernetesNodeReady
+ expr: kube_node_status_condition{condition="Ready",status="true"} == 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Node ready (instance {{ $labels.instance }})"
+ description: "Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesMemoryPressure
+ expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes memory pressure (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDiskPressure
+ expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes disk pressure (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesOutOfDisk
+ expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes out of disk (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has OutOfDisk condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesJobFailed
+ expr: kube_job_status_failed > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Job failed (instance {{ $labels.instance }})"
+ description: "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesCronjobSuspended
+ expr: kube_cronjob_spec_suspend != 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes CronJob suspended (instance {{ $labels.instance }})"
+ description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPersistentvolumeclaimPending
+ expr: kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes PersistentVolumeClaim pending (instance {{ $labels.instance }})"
+ description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesVolumeOutOfDiskSpace
+ expr: kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Volume out of disk space (instance {{ $labels.instance }})"
+ description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesVolumeFullInFourDays
+ expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Volume full in four days (instance {{ $labels.instance }})"
+ description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPersistentvolumeError
+ expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes PersistentVolume error (instance {{ $labels.instance }})"
+ description: "Persistent volume is in bad state\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetDown
+ expr: (kube_statefulset_status_replicas_ready / kube_statefulset_status_replicas_current) != 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet down (instance {{ $labels.instance }})"
+ description: "A StatefulSet went down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaScalingAbility
+ expr: kube_hpa_status_condition{condition="false", status="AbleToScale"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA scaling ability (instance {{ $labels.instance }})"
+ description: "Pod is unable to scale\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaMetricAvailability
+ expr: kube_hpa_status_condition{condition="false", status="ScalingActive"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA metric availability (instance {{ $labels.instance }})"
+ description: "HPA is not able to colelct metrics\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaScaleCapability
+ expr: kube_hpa_status_desired_replicas >= kube_hpa_spec_max_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA scale capability (instance {{ $labels.instance }})"
+ description: "The maximum number of desired Pods has been hit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPodNotHealthy
+ expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[1h:]) > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Pod not healthy (instance {{ $labels.instance }})"
+ description: "Pod has been in a non-ready state for longer than an hour.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPodCrashLooping
+ expr: rate(kube_pod_container_status_restarts_total[15m]) * 60 * 5 > 5
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes pod crash looping (instance {{ $labels.instance }})"
+ description: "Pod {{ $labels.pod }} is crash looping\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesReplicassetMismatch
+ expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes ReplicasSet mismatch (instance {{ $labels.instance }})"
+ description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDeploymentReplicasMismatch
+ expr: kube_deployment_spec_replicas != kube_deployment_status_replicas_available
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Deployment replicas mismatch (instance {{ $labels.instance }})"
+ description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetReplicasMismatch
+ expr: kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})"
+ description: "A StatefulSet has not matched the expected number of replicas for longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDeploymentGenerationMismatch
+ expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Deployment generation mismatch (instance {{ $labels.instance }})"
+ description: "A Deployment has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetGenerationMismatch
+ expr: kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet generation mismatch (instance {{ $labels.instance }})"
+ description: "A StatefulSet has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetUpdateNotRolledOut
+ expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet update not rolled out (instance {{ $labels.instance }})"
+ description: "StatefulSet update has not been rolled out.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDaemonsetRolloutStuck
+ expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})"
+ description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDaemonsetMisscheduled
+ expr: kube_daemonset_status_number_misscheduled > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})"
+ description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesCronjobTooLong
+ expr: time() - kube_cronjob_next_schedule_time > 3600
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes CronJob too long (instance {{ $labels.instance }})"
+ description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesJobCompletion
+ expr: kube_job_spec_completions - kube_job_status_succeeded > 0 or kube_job_status_failed > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes job completion (instance {{ $labels.instance }})"
+ description: "Kubernetes Job failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiServerErrors
+ expr: sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[2m])) / sum(rate(apiserver_request_count{job="apiserver"}[2m])) * 100 > 3
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes API server errors (instance {{ $labels.instance }})"
+ description: "Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiClientErrors
+ expr: (sum(rate(rest_client_requests_total{code=~"(4|5).."}[2m])) by (instance, job) / sum(rate(rest_client_requests_total[2m])) by (instance, job)) * 100 > 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes API client errors (instance {{ $labels.instance }})"
+ description: "Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesClientCertificateExpiresNextWeek
+ expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes client certificate expires next week (instance {{ $labels.instance }})"
+ description: "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesClientCertificateExpiresSoon
+ expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 24*60*60
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes client certificate expires soon (instance {{ $labels.instance }})"
+ description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiServerLatency
+ expr: histogram_quantile(0.99, sum(apiserver_request_latencies_bucket{verb!~"CONNECT|WATCHLIST|WATCH|PROXY"}) WITHOUT (instance, resource)) / 1e+06 > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes API server latency (instance {{ $labels.instance }})"
+ description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+
+ prometheus.yml: |-
+ global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+ rule_files:
+ - "/etc/prometheus/alert.rules"
+
+ scrape_configs:
+ - job_name: 'collectd-exporter'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['collectd-exporter:9103']
+
+ - job_name: 'cadvisor'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['cadvisor:8080']
+
+ - job_name: 'node-exporter'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['node-exporter:9100']
+
+ - job_name: 'prometheus'
+ scrape_interval: 10s
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'kube-state-metrics'
+ scrape_interval: 10s
+ static_configs:
+ - targets: ['kube-state-metrics.kube-system.svc.cluster.local:8080']
+
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets: ['alertmanager:9093', 'alertmanager1:9093']
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml
new file mode 100644
index 00000000..5b98b154
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus-deployment
+ namespace: monitoring
+ labels:
+ app: prometheus
+ adi10hero.monitoring: prometheus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ containers:
+ - name: prometheus
+ image: prom/prometheus
+ args:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --storage.tsdb.retention.size=3GB
+ - --storage.tsdb.retention.time=30d
+ - --web.console.libraries=/etc/prometheus/console_libraries
+ - --web.console.templates=/etc/prometheus/consoles
+ ports:
+ - containerPort: 9090
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: prometheus-config-volume
+ mountPath: /etc/prometheus/
+ - name: prometheus-storage-volume
+ mountPath: /prometheus/
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config-volume
+ configMap:
+ defaultMode: 420
+ name: prometheus-config
+ - name: prometheus-storage-volume
+ persistentVolumeClaim:
+ claimName: prometheus-pvc
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml
new file mode 100644
index 00000000..f10cd073
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml
@@ -0,0 +1,30 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-pv
+ namespace: monitoring
+ labels:
+ app: prometheus-pv
+ adi10hero.monitoring: prometheus-pv
+spec:
+ storageClassName: monitoring
+ capacity:
+ storage: 6Gi
+ accessModes:
+ - ReadWriteMany
+ hostPath:
+ path: "/usr/share/monitoring_data/prometheus"
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml
new file mode 100644
index 00000000..812fcc73
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: prometheus-pvc
+ namespace: monitoring
+ labels:
+ app: prometheus-pvc
+ adi10hero.monitoring: prometheus-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: monitoring
+ resources:
+ requests:
+ storage: 3Gi
+ selector:
+ matchLabels:
+ app: prometheus-pv
+ adi10hero.monitoring: prometheus-pv
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml
new file mode 100644
index 00000000..5be76d3e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml
@@ -0,0 +1,34 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ name: prometheus
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus
+ protocol: TCP
+ port: 9090
+ nodePort: 30900
+ selector:
+ adi10hero.monitoring: prometheus
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml
new file mode 100644
index 00000000..149bea84
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus1-deployment
+ namespace: monitoring
+ labels:
+ app: prometheus1
+ adi10hero.monitoring: prometheus1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
+ containers:
+ - name: prometheus
+ image: prom/prometheus
+ args:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --storage.tsdb.retention.size=3GB
+ - --storage.tsdb.retention.time=30d
+ - --web.console.libraries=/etc/prometheus/console_libraries
+ - --web.console.templates=/etc/prometheus/consoles
+ ports:
+ - containerPort: 9090
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: prometheus-config-volume
+ mountPath: /etc/prometheus/
+ - name: prometheus-storage-volume
+ mountPath: /prometheus/
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config-volume
+ configMap:
+ defaultMode: 420
+ name: prometheus-config
+ - name: prometheus-storage-volume
+ persistentVolumeClaim:
+ claimName: prometheus-pvc
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml
new file mode 100644
index 00000000..439deec1
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ name: prometheus1
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus1
+ protocol: TCP
+ port: 9090
+ nodePort: 30901
+ selector:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
diff --git a/tools/lma/ansible-server/roles/monitoring/tasks/main.yml b/tools/lma/ansible-server/roles/monitoring/tasks/main.yml
new file mode 100644
index 00000000..cd4e6aca
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/tasks/main.yml
@@ -0,0 +1,273 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#PAG setup in k8s cluster
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#Creating Namespace
+#***********************************************************************************************************
+- name: Creating Monitoring Namespace
+ k8s:
+ state: present
+ src: /tmp/files/monitoring-namespace.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume for Prometheus
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-pv.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume for Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-pv.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume Claim
+#***********************************************************************************************************
+- name: creating Persistent Volume Claim for Prometheus
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-pvc.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume Claim
+#***********************************************************************************************************
+- name: creating Persistent Volume Claim for Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-pvc.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the CAdvisor deamonset
+#***********************************************************************************************************
+- name: Creating cAdvisor deamonset
+ k8s:
+ state: present
+ src: /tmp/files/cadvisor/cadvisor-deamonset.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting the CAdvisor service
+#***********************************************************************************************************
+- name: Starting cAdvisor service
+ k8s:
+ state: present
+ src: /tmp/files/cadvisor/cadvisor-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Deploying and Starting the kube-system-metrics service
+#***********************************************************************************************************
+- name: Deploying kube-system-metrics
+ k8s:
+ state: present
+ src: /tmp/files/kube-state-metrics/kube-state-metrics-deployment.yaml
+ namespace: kube-system
+
+- name: Starting kube-system-metrics service
+ k8s:
+ state: present
+ src: /tmp/files/kube-state-metrics/kube-state-metrics-service.yaml
+ namespace: kube-system
+
+#***********************************************************************************************************
+#Making the NodeExporter deamonset
+#***********************************************************************************************************
+- name: Creating NodeExporter deamonset
+ k8s:
+ state: present
+ src: /tmp/files/node-exporter/nodeexporter-daemonset.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting the NodeExporter service
+#***********************************************************************************************************
+- name: Starting NodeExporter service
+ k8s:
+ state: present
+ src: /tmp/files/node-exporter/nodeexporter-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the collectd-exporter deployment
+#***********************************************************************************************************
+- name: Creating collectd-exporter deamonset
+ k8s:
+ state: present
+ src: /tmp/files/collectd-exporter/collectd-exporter-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the collectd-exporter service
+#***********************************************************************************************************
+- name: Creating collectd-exporter service
+ k8s:
+ state: present
+ src: /tmp/files/collectd-exporter/collectd-exporter-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Webhook goes here
+#***********************************************************************************************************
+
+#***********************************************************************************************************
+#Making the config file for Alertmanagers
+#***********************************************************************************************************
+- name: Creating config map for Alertmanagers
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-config.yaml
+ namespace: monitoring
+
+# - name: Creating config map for Alertmanagers
+# k8s:
+# state: present
+# src: /tmp/files/alertmanager1-config.yaml
+# namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 1st alertmanager deployment
+#***********************************************************************************************************
+- name: Creating 1st alertmanager deployment
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 1st alertmanager service
+#***********************************************************************************************************
+- name: Creating 1st alertmanager service
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 2nd alertmanager deployment
+#***********************************************************************************************************
+- name: Creating 2nd alertmanager deployment
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager1-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 2nd alertmanager service
+#***********************************************************************************************************
+- name: Creating 2nd alertmanager service
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager1-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the config file for Prometheus
+#***********************************************************************************************************
+- name: Creating 1st Prometheus Config
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-config.yaml
+ namespace: monitoring
+
+# - name: Creating 2nd Prometheus Config
+# k8s:
+# state: present
+# src: /tmp/files/prometheus1-config.yaml
+# namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Prometheus
+#***********************************************************************************************************
+- name: Starting Prometheus 1
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-deployment.yaml
+ namespace: monitoring
+
+- name: Starting Prometheus 2
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus1-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Prometheus Service
+#***********************************************************************************************************
+- name: Starting Prometheus 1 Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-service.yaml
+ namespace: monitoring
+
+- name: Starting Prometheus 2 Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus1-service.yaml
+ namespace: monitoring
+
+- name: Starting Main Prometheus Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/main-prometheus-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Grafana
+#***********************************************************************************************************
+- name: Creating Grafana Datasource Config
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-datasource-config.yaml
+ namespace: monitoring
+
+- name: Starting Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-deployment.yaml
+ namespace: monitoring
+
+- name: Starting Grafana Service
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/nfs/tasks/main.yml b/tools/lma/ansible-server/roles/nfs/tasks/main.yml
new file mode 100644
index 00000000..2380ea74
--- /dev/null
+++ b/tools/lma/ansible-server/roles/nfs/tasks/main.yml
@@ -0,0 +1,42 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#create Dir /srv/nfs
+- name: Create Directory for elasticserch
+ file: path="/srv/nfs/{{item}}" state=directory
+ with_items:
+ - ['data', 'master']
+
+- name: Create Directory for grafana
+ file: path="/usr/share/monitoring_data/grafana" state=directory
+
+#installing NFS
+- name: Installing NFS server utils
+ yum:
+ name: nfs-utils
+ state: present
+
+#update /etc/export file
+- name: Edit /etc/export file for NFS
+ lineinfile: path=/etc/exports line="{{item.line}}"
+ with_items:
+ - {line: "/srv/nfs/master *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/srv/nfs/data *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/usr/share/monitoring_data/grafana *(rw,sync,no_root_squash,no_subtree_check)"}
+
+#starting NFS service
+- name: 'starting NFS service'
+ service:
+ name: nfs
+ state: restarted
diff --git a/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb b/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb
new file mode 100644
index 00000000..d2e7886a
--- /dev/null
+++ b/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb
@@ -0,0 +1,784 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Logs and Metrics Analysis Notebook\n",
+ "\n",
+ "#### Used to capture anomalies in the logs and analyse / visualize the metrics in the vicinity of that time\n",
+ "\n",
+ "##### Contributors:\n",
+ "\n",
+ "- Adarsh Yadav <adiyadav0509@gmail.com> \n",
+ " \n",
+ " Log Analysis and Anomaly Finding\n",
+ " \n",
+ "\n",
+ "\n",
+ "\n",
+ "- Aditya Srivastava <adityasrivastava301199@gmail.com>\n",
+ " \n",
+ " Metrics Analysis and Visualization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Metrics Analysis and Visualization"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "\n",
+ "import datetime\n",
+ "import time\n",
+ "import requests\n",
+ "\n",
+ "from pprint import pprint\n",
+ "import json\n",
+ "from datetime import datetime, timedelta\n",
+ "\n",
+ "from elasticsearch import Elasticsearch\n",
+ "from elasticsearch_dsl import Search\n",
+ "from elasticsearch.connection import create_ssl_context\n",
+ "import ssl\n",
+ "import urllib3"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "PROMETHEUS = 'http://10.10.120.211:30902/' #do not change, unless sure"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#function to make DF out of query json\n",
+ "\n",
+ "def convert_to_df(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # making colums\n",
+ " headers = data_list[0]\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " for metric in metrics.keys():\n",
+ " res_df[metric] = np.nan\n",
+ " res_df['value'] = 0\n",
+ " \n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " metrics['value'] = data['value'][-1]\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n",
+ "\n",
+ "def convert_to_df_range(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " values = np.array(data['values'])\n",
+ " for time, value in values:\n",
+ " metrics['timestamp'] = time\n",
+ " metrics['value'] = value\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# functions to query\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " return time.mktime(datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple())\n",
+ "\n",
+ "def query_current(params={}):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query', \n",
+ " params=params)\n",
+ " return json.loads(res.text)\n",
+ "\n",
+ "\n",
+ "def query_range(start, end, params={}, steps = '30s'):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ " params[\"start\"] = convert_to_timestamp(start)\n",
+ " params[\"end\"] = convert_to_timestamp(end)\n",
+ " params[\"step\"] = steps\n",
+ "\n",
+ " # print(params)\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query_range', \n",
+ " params=params,\n",
+ " )\n",
+ "\n",
+ " return json.loads(res.text)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '100':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " unused_cores.append(int(key))\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '0':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " fully_used_cores.append(int(key))\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def plot_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ " \n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " used_cores = []\n",
+ "\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " user_row = curr_df.loc[curr_df['type'] == 'user']\n",
+ " sys_row = curr_df.loc[curr_df['type'] == 'system']\n",
+ "\n",
+ "\n",
+ " if np.any(sys_row != '0') or np.any(user_row != '0'):\n",
+ " used_cores.append(key)\n",
+ " type_grps = curr_df.groupby('type')\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " for type_key, new_item in type_grps:\n",
+ "\n",
+ " if type_key == 'system':\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(type_key)\n",
+ " ax1.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'user':\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(type_key)\n",
+ " ax2.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'wait':\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(type_key)\n",
+ " ax3.plot(new_item['timestamp'], new_item['value'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_dropped(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_0_total{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " interface_dropped_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interface_dropped_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_dropped_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_dropped_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_dropped_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " dropped_interfaces= []\n",
+ " drop_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " plot_iter = 111\n",
+ " for group in groups:\n",
+ " dropped = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " dropped_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " dropped.append([key, dropped_row['timestamp'].iloc[0]])\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[drop_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(plot_iter)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ " dropped_interfaces.append(dropped)\n",
+ " plt.suptitle('Interfaces Drop type {}'.format(drop_type), fontsize=14)\n",
+ " plt.show()\n",
+ " drop_type += 1\n",
+ " return dropped_interfaces\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_errors(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_0_total{exported_instance='\" + node + \"'}\"}\n",
+ " interfaces_errors_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interfaces_errors_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_errors_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_errors_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_errors_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " err_interfaces= []\n",
+ " err_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " for group in groups:\n",
+ " errors = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " err_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " erros.append([key, err_row['timestamp'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[err_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(111)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ "\n",
+ " err_interfaces.append(errors)\n",
+ " plt.suptitle('Interfaces Error type {}'.format(err_type), fontsize=14)\n",
+ " plt.show()\n",
+ " err_type += 1\n",
+ "\n",
+ " return err_interfaces"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_intel_rdt_bytes{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_bytes = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_bytes)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_ipc{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_ipc = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_ipc)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}, IPC value\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ "\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_memory_bandwidth_total{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_mem_bw = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_mem_bw)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " \n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_memory{exported_instance='\" + node + \"'} / (1024*1024*1024) \"} \n",
+ " target_memory_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_memory_usage_range)\n",
+ " \n",
+ " df = df.drop(['instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['memory'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Memory Type: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Testing Zone"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "get_memory_usage('2020-08-03 08:00:12', '2020-08-03 08:01:12', 'pod12-node4')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def analyse(timestamp, node):\n",
+ " ts = datetime.strptime(timestamp.split(',')[0], \"%Y-%m-%d %H:%M:%S\")\n",
+ " start = ts - timedelta(seconds=10)\n",
+ " end = ts + timedelta(seconds=10)\n",
+ " \n",
+ " start = str(start)\n",
+ " end = str(end)\n",
+ " steps = '5s'\n",
+ "\n",
+ " print(\"Starting Analysis from\",start,\"to\",end,'\\n\\n')\n",
+ "\n",
+ " if \"node4\" in node:\n",
+ " node = 'pod12-node4'\n",
+ "\n",
+ " #cpu analysis\n",
+ " print(\"=====CPU ANALYSIS=====\\n\")\n",
+ " unused = unused_cores(start, end, node, steps)\n",
+ " print(\"Unused Cores:\", unused)\n",
+ " fully_used = fully_used_cores(start, end, node, steps)\n",
+ " print(\"Fully Used Cores:\", fully_used)\n",
+ " print(\"Plotting used cores:\")\n",
+ " used_cores = plot_used_cores(start, end, node, steps)\n",
+ " \n",
+ " #interface analysis\n",
+ " print(\"=====Interfaces Dropped / Errors=====\\n\")\n",
+ " dropped_interfaces = interface_dropped(start, end, node, steps)\n",
+ " err_interfaces = interface_errors(start, end, node, steps)\n",
+ " \n",
+ " #RDT Analysis\n",
+ " print(\"=====RDT Analysis=====\\n\")\n",
+ " plot_rdt_bytes(start, end, node, steps)\n",
+ " plot_rdt_ipc(start, end, node, steps)\n",
+ " mem_bandwidht = get_rdt_memory_bandwidth(start, end, node, steps)\n",
+ " \n",
+ " #Memory Analysis:\n",
+ " print(\"=====Memory Analysis=====\\n\")\n",
+ " mem = get_memory_usage(start, end, node, steps)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "fully_used = fully_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching\n",
+ "plot_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "#csv\n",
+ "# use Analysis-Monitoring-Local Notebook for correct analysis \n",
+ "plot_used_cores(csv='metrics_data/cpu-0/cpu-user-2020-06-02')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "dropped_interfaces = interface_dropped('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "interface_errors('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### RDT\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "# fetch\n",
+ "plot_rdt_bytes('2020-07-31 08:00:12', '2020-07-31 08:01:12','pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "plot_rdt_ipc('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "get_rdt_memory_bandwidth('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### Memory\n",
+ "\n",
+ "- Memory usage\n",
+ "\n",
+ "```py\n",
+ "get_memory_usage('2020-08-03 08:00:12', '2020-08-03 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### Analyse everything\n",
+ "\n",
+ "```py\n",
+ "# example alert_time: 2020-08-03 08:00:12\n",
+ "# example index: 'pod12-node4'\n",
+ "analyse(alert_time,index)\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Checking Anomaly in logs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Give file name\n",
+ "foldername = \"results_2020-08-07_03-39-57\"\n",
+ "#Give index name - \"node1*\" or \"node4*\"\n",
+ "index = \"node4*\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "ssl_context = create_ssl_context()\n",
+ "ssl_context.check_hostname = False\n",
+ "ssl_context.verify_mode = ssl.CERT_NONE\n",
+ "urllib3.disable_warnings()\n",
+ "client = Elasticsearch(['https://elasticsearch:password123@10.10.120.211:31111'],verify_certs=False,ssl_context=ssl_context)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vsperf = \"vsperf-overall_\"+ foldername[8:] +\".log\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"alert\").query(\"match_phrase\", log_path=vsperf)\n",
+ "for hits in s.scan():\n",
+ " alert_time = hits.alert_time\n",
+ "\n",
+ "print(alert_time)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "analyse(alert_time,index)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/logs/dockerfile/elastalert/Dockerfile b/tools/lma/logs/dockerfile/elastalert/Dockerfile
new file mode 100644
index 00000000..3304ad17
--- /dev/null
+++ b/tools/lma/logs/dockerfile/elastalert/Dockerfile
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:alpine
+RUN apk --update upgrade && \
+ apk add gcc libffi-dev musl-dev python3-dev openssl-dev tzdata libmagic && \
+ rm -rf /var/cache/apk/*
+RUN pip install elastalert &&\
+ apk del gcc libffi-dev musl-dev python3-dev openssl-dev
+RUN mkdir -p /opt/elastalert && \
+ mkdir -p /opt/elastalert/rules &&\
+WORKDIR /opt/elastalert \ No newline at end of file
diff --git a/tools/lma/logs/dockerfile/fluentd/Dockerfile b/tools/lma/logs/dockerfile/fluentd/Dockerfile
new file mode 100644
index 00000000..19dea0f8
--- /dev/null
+++ b/tools/lma/logs/dockerfile/fluentd/Dockerfile
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM fluent/fluentd:v1.11.0-debian-1.0
+USER root
+RUN gem sources --add https://rubygems.org/
+RUN apt-get update \
+ && gem install fluent-plugin-elasticsearch \
+ && gem install elasticsearch-xpack\
+ && gem install fluent-plugin-rewrite-tag-filter\
+ && gem install fluent-plugin-dio
+USER fluent \ No newline at end of file
diff --git a/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb b/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb
new file mode 100644
index 00000000..1bc770a1
--- /dev/null
+++ b/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb
@@ -0,0 +1,308 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Trend Analysis\n",
+ "##### Contributor:\n",
+ "\n",
+ "- Adarsh Yadav <adiyadav0509@gmail.com> "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import seaborn as sns\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "import io \n",
+ "\n",
+ "from elasticsearch import Elasticsearch\n",
+ "from elasticsearch_dsl import Search\n",
+ "from elasticsearch.connection import create_ssl_context\n",
+ "import csv\n",
+ "import ssl\n",
+ "import urllib3\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Enter foldername and index"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Give folder name\n",
+ "# foldername = \"results_2020-06-12_06-47-56\"\n",
+ "foldername = \"result-test1\"\n",
+ "#Give index name - \"node1*\" or \"node4*\"\n",
+ "index = \"node4*\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ssl_context = create_ssl_context()\n",
+ "ssl_context.check_hostname = False\n",
+ "ssl_context.verify_mode = ssl.CERT_NONE\n",
+ "urllib3.disable_warnings()\n",
+ "client = Elasticsearch(['https://elasticsearch:password123@10.10.120.211:31111'],verify_certs=False,ssl_context=ssl_context)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Trex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/trex-liveresults-counts.dat\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"ts\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "trex = pd.DataFrame()\n",
+ "trex_data = dict()\n",
+ "for hits in s.scan():\n",
+ " trex_data['ts'] = hits.ts\n",
+ " trex_data['rx_pkts'] = hits.rx_pkts\n",
+ " trex_data['rx_port'] = hits.rx_port\n",
+ " trex_data['tx_port'] = hits.tx_port\n",
+ " trex = trex.append(trex_data, ignore_index=True)\n",
+ "if not trex.empty:\n",
+ " #convert 'ts' to datetime\n",
+ " trex['ts'] = pd.to_datetime(trex['ts'],unit='s')\n",
+ " trex_grp = trex.groupby('rx_port')\n",
+ " trex_rx_0 = trex_grp.get_group(0.0) \n",
+ " trex_rx_1 = trex_grp.get_group(1.0) \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not trex.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(trex_rx_0['ts'],\n",
+ " trex_rx_0['rx_pkts'],\n",
+ " 'tab:orange')\n",
+ " ax[0].title.set_text(\"At rx_port=0 & tx_port=1\")\n",
+ " ax[0].set(xlabel=\"timestamp\")\n",
+ " ax[0].set(ylabel=\"rx_pkts\")\n",
+ "\n",
+ " ax[1].plot(trex_rx_1['ts'],\n",
+ " trex_rx_1['rx_pkts'],\n",
+ " 'tab:green')\n",
+ " ax[1].title.set_text(\"At rx_port=1 & tx_port=0\")\n",
+ " ax[1].set(xlabel=\"timestamp\")\n",
+ " ax[1].set(ylabel=\"rx_pkts\")\n",
+ "\n",
+ " #change date format\n",
+ " myFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')\n",
+ " for i in range(2):\n",
+ " ax[i].xaxis.set_major_formatter(myFmt) \n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Spirent"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/stc-liveresults.dat.rx\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"ts\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "spirent = pd.DataFrame()\n",
+ "spirent_data = dict()\n",
+ "for hits in s.scan():\n",
+ " spirent_data['ts'] = hits.ts\n",
+ " spirent_data['RxPrt'] = hits.RxPrt\n",
+ " spirent_data['FrCnt'] = hits.FrCnt\n",
+ " spirent = spirent.append(spirent_data, ignore_index=True)\n",
+ "if not spirent.empty:\n",
+ " #convert 'ts' to datetime\n",
+ " spirent['ts'] = pd.to_datetime(spirent['ts'],unit='s')\n",
+ " spirent_grp = spirent.groupby('RxPrt')\n",
+ " spirent_rx_1 = spirent_grp.get_group('Port //1/1') \n",
+ " spirent_rx_2 = spirent_grp.get_group('Port //1/2') "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not spirent.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(spirent_rx_1['ts'],\n",
+ " spirent_rx_1['FrCnt'],\n",
+ " 'tab:orange')\n",
+ " ax[0].title.set_text(\"At RxPrt=//1/1\")\n",
+ " ax[0].set(xlabel=\"timestamp\")\n",
+ " ax[0].set(ylabel=\"FrCnt\")\n",
+ "\n",
+ " ax[1].plot(spirent_rx_2['ts'],\n",
+ " spirent_rx_2['FrCnt'],\n",
+ " 'tab:green')\n",
+ " ax[1].title.set_text(\"At RxPrt=//1/2\")\n",
+ " ax[1].set(xlabel=\"timestamp\")\n",
+ " ax[1].set(ylabel=\"FrCnt\")\n",
+ "\n",
+ " #change date format\n",
+ " myFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')\n",
+ " for i in range(2):\n",
+ " ax[i].xaxis.set_major_formatter(myFmt) \n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Ixia"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/Traffic Item Statistics.csv\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"msg\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "for hits in s.scan():\n",
+ " with open('./ixia-traffic.csv', 'a+') as f:\n",
+ " f.write(hits.msg+\"\\n\")\n",
+ " \n",
+ "ixia = pd.DataFrame()\n",
+ "if os.path.exists('./ixia-traffic.csv'):\n",
+ " ixia = pd.read_csv('./ixia-traffic.csv')\n",
+ " os.remove(f.name)\n",
+ " f.close()\n",
+ "if not ixia.empty:\n",
+ " ixia = ixia[['~ElapsedTime','Traffic Item 1:Frames Delta','Traffic Item 1:Loss %']].astype(float)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not ixia.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(ixia['~ElapsedTime'],\n",
+ " ixia['Traffic Item 1:Frames Delta'],\n",
+ " 'tab:orange')\n",
+ " ax[0].set(xlabel=\"Elapsed Time\")\n",
+ " ax[0].set(ylabel=\"Frames Delta\")\n",
+ "\n",
+ " ax[1].plot(ixia['~ElapsedTime'],\n",
+ " ixia['Traffic Item 1:Loss %'],\n",
+ " 'tab:green')\n",
+ " ax[1].set(xlabel=\"Elapsed Time\")\n",
+ " ax[1].set(ylabel=\"Loss %\")\n",
+ "\n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Time Analysis"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"setup_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Setup duration: \", hits.setup_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"iteration_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Iteration duration: \", hits.iteration_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"traffic_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Traffic duration: \", hits.traffic_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"test_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Test duration: \", hits.test_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"report_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Report duration: \", hits.report_duration,\"s\")\n",
+ " \n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"vswitch_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Vswitch starting duration: \", hits.vswitch_duration,\"s\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/metrics/dashboard/cpu_usage_using.json b/tools/lma/metrics/dashboard/cpu_usage_using.json
new file mode 100644
index 00000000..85f7f122
--- /dev/null
+++ b/tools/lma/metrics/dashboard/cpu_usage_using.json
@@ -0,0 +1,750 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 4,
+ "iteration": 1596637894836,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{exported_instance='$host'}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{cpu='$core', exported_instance='$host'}",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU utilization per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{cpu='$core',exported_instance='$host'}",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "10s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [
+ {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": false,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "CPU Usage",
+ "uid": "XeDwSiSGk",
+ "version": 13
+} \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/memory_using.json b/tools/lma/metrics/dashboard/memory_using.json
new file mode 100644
index 00000000..3b92d8f5
--- /dev/null
+++ b/tools/lma/metrics/dashboard/memory_using.json
@@ -0,0 +1,337 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 6,
+ "iteration": 1597616052316,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 15,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_memory{exported_instance='$host', memory='$type'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "used",
+ "value": "used"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "type",
+ "options": [
+ {
+ "selected": false,
+ "text": "buffered",
+ "value": "buffered"
+ },
+ {
+ "selected": false,
+ "text": "cached",
+ "value": "cached"
+ },
+ {
+ "selected": false,
+ "text": "free",
+ "value": "free"
+ },
+ {
+ "selected": false,
+ "text": "slab_recl",
+ "value": "slab_recl"
+ },
+ {
+ "selected": false,
+ "text": "slab_unrecl",
+ "value": "slab_unrecl"
+ },
+ {
+ "selected": true,
+ "text": "used",
+ "value": "used"
+ }
+ ],
+ "query": "buffered,cached,free,slab_recl,slab_unrecl,used",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Memory",
+ "uid": "kuro-mem",
+ "version": 4
+} \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/ovs_stats_using.json b/tools/lma/metrics/dashboard/ovs_stats_using.json
new file mode 100644
index 00000000..1e679fbe
--- /dev/null
+++ b/tools/lma/metrics/dashboard/ovs_stats_using.json
@@ -0,0 +1,854 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 6,
+ "iteration": 1596643135141,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_rx_octets_total{exported_instance='$host'}[$__interval])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average RX values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_tx_octets_total{exported_instance='$host'}[$__interval])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average TX values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_collisions_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_dropped_0_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_dropped_1_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "C"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_errors_0_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "D"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_errors_1_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "E"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average Collisions, Drops and Error values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [
+ {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": false,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "OVS Stats",
+ "uid": "K1N5ciIGz",
+ "version": 7
+ } \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/rdt_using.json b/tools/lma/metrics/dashboard/rdt_using.json
new file mode 100644
index 00000000..a0ce7987
--- /dev/null
+++ b/tools/lma/metrics/dashboard/rdt_using.json
@@ -0,0 +1,833 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 7,
+ "iteration": 1597615840124,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_bytes{exported_instance='$host', intel_rdt='$intel_rdt'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RDT Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_ipc{exported_instance='$host', intel_rdt='$intel_rdt'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "IPC values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_memory_bandwidth_total{exported_instance='$host', type='local'}[$range])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(collectd_intel_rdt_memory_bandwidth_total{exported_instance='$host', type='remote'}[$range])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory Bandwidth Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "2",
+ "value": "2"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "intel_rdt",
+ "options": [
+ {
+ "selected": false,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": true,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "RDT (L3 Cache)",
+ "uid": "kuro-rdt",
+ "version": 9
+} \ No newline at end of file
diff --git a/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb
new file mode 100644
index 00000000..10c59d84
--- /dev/null
+++ b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb
@@ -0,0 +1,644 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Metrics Analysis Notebook (k8s)\n",
+ "\n",
+ "#### Used to analyse / visualize the metrics, data fetched from prometheus (monitoring cluster)\n",
+ "\n",
+ "### Contributor: Aditya Srivastava <adityasrivastava301199@gmail.com>\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "\n",
+ "import datetime\n",
+ "import time\n",
+ "import requests\n",
+ "\n",
+ "from pprint import pprint\n",
+ "import json\n",
+ "from datetime import datetime"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "PROMETHEUS = 'http://10.10.120.211:30902/' #do not change, unless sure"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#function to make DF out of query json\n",
+ "\n",
+ "def convert_to_df(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # making colums\n",
+ " headers = data_list[0]\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " for metric in metrics.keys():\n",
+ " res_df[metric] = np.nan\n",
+ " res_df['value'] = 0\n",
+ " \n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " metrics['value'] = data['value'][-1]\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n",
+ "\n",
+ "def convert_to_df_range(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " values = np.array(data['values'])\n",
+ " for time, value in values:\n",
+ " metrics['timestamp'] = time\n",
+ " metrics['value'] = value\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# functions to query\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " return time.mktime(datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple())\n",
+ "\n",
+ "def query_current(params={}):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query', \n",
+ " params=params)\n",
+ " return json.loads(res.text)\n",
+ "\n",
+ "\n",
+ "def query_range(start, end, params={}, steps = '30s'):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ " params[\"start\"] = convert_to_timestamp(start)\n",
+ " params[\"end\"] = convert_to_timestamp(end)\n",
+ " params[\"step\"] = steps\n",
+ "\n",
+ " print(params)\n",
+ " \n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query_range', \n",
+ " params=params,\n",
+ " )\n",
+ "\n",
+ " return json.loads(res.text)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '100':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " unused_cores.append(int(key))\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '0':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " fully_used_cores.append(int(key))\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def plot_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " \n",
+ " # \n",
+ " df['rate'] = df['value'].diff()\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text('CPU usage')\n",
+ " ax1.plot(df['epoch'], df['rate'])\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ " \n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " used_cores = []\n",
+ "\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ "\n",
+ " if idle_row['value'].iloc[0] != '100':\n",
+ " used_cores.append(key)\n",
+ " type_grps = curr_df.groupby('type')\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " for type_key, new_item in type_grps:\n",
+ "\n",
+ " if type_key == 'system':\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(type_key)\n",
+ " ax1.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'user':\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(type_key)\n",
+ " ax2.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'wait':\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(type_key)\n",
+ " ax3.plot(new_item['timestamp'], new_item['value'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_dropped(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_0_total{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " interface_dropped_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interface_dropped_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_dropped_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_dropped_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_dropped_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " dropped_interfaces= []\n",
+ " drop_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " plot_iter = 111\n",
+ " for group in groups:\n",
+ " dropped = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " dropped_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " dropped.append([key, dropped_row['timestamp'].iloc[0]])\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[drop_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(plot_iter)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ " dropped_interfaces.append(dropped)\n",
+ " plt.suptitle('Interfaces Drop type {}'.format(drop_type), fontsize=14)\n",
+ " plt.show()\n",
+ " drop_type += 1\n",
+ " return dropped_interfaces\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_errors(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_0_total{exported_instance='\" + node + \"'}\"}\n",
+ " interfaces_errors_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interfaces_errors_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_errors_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_errors_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_errors_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " err_interfaces= []\n",
+ " err_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " for group in groups:\n",
+ " errors = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " err_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " erros.append([key, err_row['timestamp'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[err_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(111)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ "\n",
+ " err_interfaces.append(errors)\n",
+ " plt.suptitle('Interfaces Error type {}'.format(err_type), fontsize=14)\n",
+ " plt.show()\n",
+ " err_type += 1\n",
+ "\n",
+ " return err_interfaces"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_intel_rdt_bytes{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_bytes = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_bytes)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_ipc{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_ipc = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_ipc)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}, IPC value\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ "\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_memory_bandwidth_total{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_mem_bw = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_mem_bw)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " \n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_memory{exported_instance='\" + node + \"'} / (1024*1024*1024) \"} \n",
+ " target_memory_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_memory_usage_range)\n",
+ "\n",
+ " df = df.drop(['instance', 'job'], axis = 1)\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Testing Zone"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "# prom fetch\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "print(cores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "fully_used = fully_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching\n",
+ "plot_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "#csv\n",
+ "# use Analysis-Monitoring-Local Notebook for correct analysis \n",
+ "plot_used_cores(csv='metrics_data/cpu-0/cpu-user-2020-06-02')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "dropped_interfaces = interface_dropped('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "interface_errors('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### RDT\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "# fetch\n",
+ "plot_rdt_bytes('2020-07-31 08:00:12', '2020-07-31 08:01:12','pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "plot_rdt_ipc('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "get_rdt_memory_bandwidth('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb
new file mode 100644
index 00000000..0385b6f9
--- /dev/null
+++ b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb
@@ -0,0 +1,913 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Metrics Analysis Notebook (local)\n",
+ "\n",
+ "#### Used to analyse / visualize the metrics when uploaded via csv file\n",
+ "\n",
+ "### Contributor: Aditya Srivastava <adityasrivastava301199@gmail.com>\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "import json\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import pandas as pd\n",
+ "from pprint import pprint\n",
+ "import re\n",
+ "import requests\n",
+ "import time"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " global DATETIME_FORMAT\n",
+ " return time.mktime(datetime.strptime(s, DATETIME_FORMAT).timetuple())\n",
+ "\n",
+ "def convert_to_time_string(epoch):\n",
+ " global DATETIME_FORMAT\n",
+ " t = datetime.fromtimestamp(float(epoch)/1000.)\n",
+ " return t.strftime(DATETIME_FORMAT)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Note: \n",
+ " \n",
+ "Path will be used as a parameter in almost every function\n",
+ "\n",
+ "path / rootdir / csv : (str) Path to the folder whose direct children are metric folders\n",
+ "\n",
+ "example: /path/to/folder\n",
+ "\n",
+ "When : \n",
+ "```sh\n",
+ "ls /path/to/folder\n",
+ "\n",
+ "# output should be directories such as\n",
+ "# cpu-0 cpu-1 cpu-2 ..........................\n",
+ "# processes-ovs-vswitchd ........processes-ovsdb-server\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_cpu_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"cpu-\\d{1,2}\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " # read 3 files from this folder...\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'user' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['user'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'system' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['system'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'idle' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['idle'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " _df['cpu'] = dirname.split('-')[-1]\n",
+ "\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ "\n",
+ " total = df['user'] + df['system'] + df['idle']\n",
+ "\n",
+ " df['user_percentage'] = df['user']*100 / total\n",
+ " df['system_percentage'] = df['system']*100 / total\n",
+ " df['idle_percentage'] = df['idle']*100 / total\n",
+ " \n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ "\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " unused_cores.append(key)\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] < 99.999]\n",
+ " if np.any(idle_values):\n",
+ " unused_cores.pop(-1)\n",
+ "\n",
+ " unused_cores = set(unused_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in unused_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ " \n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ " \n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(rootdir, verbose=False):\n",
+ " \n",
+ "\n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ "\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] <= 10]\n",
+ " if np.any(idle_values):\n",
+ " fully_used_cores.append(key)\n",
+ "\n",
+ " fully_used_cores = set(fully_used_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in fully_used_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ "\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ "\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def used_cores(rootdir, verbose=False):\n",
+ "\n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Used Cores :\")\n",
+ "\n",
+ " used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] < 99.999]\n",
+ " if np.any(idle_values):\n",
+ " used_cores.append(key)\n",
+ "\n",
+ " used_cores = set(used_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in used_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ "\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ "\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_interfaces_data(rootdir):\n",
+ "\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"interface-.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " # read 3 files from this folder...\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['error_rx'] = temp_df['rx']\n",
+ " _df['error_tx'] = temp_df['tx']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['dropped_rx'] = temp_df['rx']\n",
+ " _df['dropped_tx'] = temp_df['tx']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " _df['interface'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "def interface_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_interfaces_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " dropped = {'rx':[], 'tx':[]}\n",
+ "\n",
+ " itr = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['dropped_rx'] == 1):\n",
+ " dropped_rows = curr_df[curr_df['dropped_rx'] == 1]\n",
+ " dropped['rx'].append([key, dropped_row['epoch'].iloc[0]])\n",
+ " if np.any(curr_df['dropped_tx'] == 1):\n",
+ " dropped_rows = curr_df[curr_df['dropped_tx'] == 1]\n",
+ " dropped['tx'].append([key, dropped_row['epoch'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[itr%2], edgecolor='red')\n",
+ " ax = fig.add_subplot(211)\n",
+ " ax.title.set_text(\"Interface: {} Dropped (rx)\".format(key))\n",
+ " ax.plot(item['epoch'], item['dropped_rx'])\n",
+ "\n",
+ " ax1 = fig.add_subplot(212)\n",
+ " ax1.title.set_text(\"Interface: {} Dropped (tx)\".format(key))\n",
+ " ax1.plot(item['epoch'], item['dropped_tx'])\n",
+ "\n",
+ " itr += 1\n",
+ "\n",
+ " plt.suptitle('Interface Dropped', fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " return dropped\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def interface_errors(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_interfaces_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " errors = {'rx':[], 'tx':[]}\n",
+ "\n",
+ " itr = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['error_rx'] == 1):\n",
+ " err_rows = curr_df[curr_df['error_rx'] == 1]\n",
+ " errors['rx'].append([key, err_row['epoch'].iloc[0]])\n",
+ " if np.any(curr_df['error_tx'] == 1):\n",
+ " err_rows = curr_df[curr_df['error_tx'] == 1]\n",
+ " errors['tx'].append([key, err_row['epoch'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[itr%2], edgecolor='red')\n",
+ " ax = fig.add_subplot(211)\n",
+ " ax.title.set_text(\"Interface: {} Errors (rx)\".format(key))\n",
+ " ax.plot(item['epoch'], item['error_rx'])\n",
+ "\n",
+ " ax1 = fig.add_subplot(212)\n",
+ " ax1.title.set_text(\"Interface: {} Errors (tx)\".format(key))\n",
+ " ax1.plot(item['epoch'], item['error_tx'])\n",
+ "\n",
+ " itr += 1\n",
+ "\n",
+ " plt.suptitle('Interface Erros', fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " return errors\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### OVS Stats (Non DPDK)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_ovs_stats_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"ovs_stats-.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " if 'dpdk' in dirname:\n",
+ " continue #ignoring dpdk\n",
+ "\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['interface'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def ovs_stats_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_ovs_stats_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'dropped' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"Interface: {} Dropped {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def ovs_stats_errors(rootdir, verbose=False):\n",
+ "\n",
+ "\n",
+ " df = fetch_ovs_stats_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'error' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"Interface: {} Errors {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### DPDK"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_dpdk_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\".*dpdk.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['dpdk'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fetch_dpdk_data(rootdir)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def dpdk_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_dpdk_data(rootdir)\n",
+ " group = df.groupby(['dpdk'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'dropped' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"DpDK: {} Dropped {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def dpdk_errors(rootdir, verbose=False):\n",
+ "\n",
+ "\n",
+ " df = fetch_dpdk_data(rootdir)\n",
+ " group = df.groupby(['dpdk'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'error' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"DpDK: {} Errors {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dpdk_dropped(rootdir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT (need to be testes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_rdt_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\".*rdt.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'bytes' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ " \n",
+ " if 'bandwidth' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'ipc' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['intel_rdt'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'bytes' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT BYTES, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'ipc' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT IPC, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'bandwidht' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT Memory Bandwidht, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory (following functions still need to written for csv)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_memory_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"memory\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " print(dirname)\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames: \n",
+ " col_name = file.split('-')[1]\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(rootdir, verbose=False):\n",
+ " df = fetch_memory_data(rootdir)\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " i = 0\n",
+ " for col in df:\n",
+ " if df[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(df['epoch'], df[col])\n",
+ " plt.title(\"{} Memory\".format(col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "cores = unused_cores(rootdir='metrics_data')\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "fully_used = fully_used_cores('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "plot_used_cores(csv='metrics_data')\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dropped_interfaces = interface_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "interface_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### OVS Stats\n",
+ "\n",
+ "- OVS Stats Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "ovs_stats_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- OVS Stats Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "ovs_stats_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### DPDK \n",
+ "\n",
+ "- DPDK Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dpdk_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- DPDK Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dpdk_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "\n",
+ "\n",
+ "##### RDT (Do not run yet)\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "plot_rdt_bytes('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "plot_rdt_ipc('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "get_rdt_memory_bandwidth('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### Memory\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "get_memory_usage('metrics_data')\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/yamllintrc b/tools/lma/yamllintrc
new file mode 100644
index 00000000..9714a565
--- /dev/null
+++ b/tools/lma/yamllintrc
@@ -0,0 +1,25 @@
+# Copyright 2020 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+extends: relaxed
+
+rules:
+ empty-lines:
+ max-start: 1
+ max-end: 1
+ colons:
+ max-spaces-after: 1
+ max-spaces-before: 1
+ line-length:
+ max: 250
diff --git a/tools/load_gen/stress_ng/stress_ng.py b/tools/load_gen/stress_ng/stress_ng.py
index c2592dd1..41bfe990 100644
--- a/tools/load_gen/stress_ng/stress_ng.py
+++ b/tools/load_gen/stress_ng/stress_ng.py
@@ -30,6 +30,3 @@ class StressNg(Stress):
'name': 'stress-ng'
}
_logger = logging.getLogger(__name__)
-
- def __init__(self, stress_config):
- super(StressNg, self).__init__(stress_config)
diff --git a/tools/load_gen/stressorvm/__init__.py b/tools/load_gen/stressorvm/__init__.py
new file mode 100644
index 00000000..6a22d81c
--- /dev/null
+++ b/tools/load_gen/stressorvm/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2017-2018 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package with wrapper for Stressor-VMs
+"""
diff --git a/tools/load_gen/stressorvm/stressor_vm.py b/tools/load_gen/stressorvm/stressor_vm.py
new file mode 100644
index 00000000..82329d2b
--- /dev/null
+++ b/tools/load_gen/stressorvm/stressor_vm.py
@@ -0,0 +1,155 @@
+# Copyright 2017-2018 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper file to create and manage Stressor-VM as loadgen
+"""
+
+import locale
+import logging
+import os
+import re
+import subprocess
+import time
+from tools import tasks
+from tools.load_gen.load_gen import ILoadGenerator
+from conf import settings as S
+
+
+class QemuVM(tasks.Process):
+ """
+ Class for controling an instance of QEMU
+ """
+ def __init__(self, index):
+ self._running = False
+ self._logger = logging.getLogger(__name__)
+ self._number = index
+ pnumber = int(S.getValue('NN_BASE_VNC_PORT')) + self._number
+ cpumask = ",".join(S.getValue('NN_CORE_BINDING')[self._number])
+ self._monitor = '%s/vm%dmonitor' % ('/tmp', pnumber)
+ self._logfile = (os.path.join(S.getValue('LOG_DIR'),
+ S.getValue('NN_LOG_FILE')) +
+ str(self._number))
+ self._log_prefix = 'vnf_%d_cmd : ' % pnumber
+ name = 'NN%d' % index
+ vnc = ':%d' % pnumber
+ self._shared_dir = '%s/qemu%d_share' % ('/tmp', pnumber)
+ if not os.path.exists(self._shared_dir):
+ try:
+ os.makedirs(self._shared_dir)
+ except OSError as exp:
+ raise OSError("Failed to create shared directory %s: %s" %
+ self._shared_dir, exp)
+
+ self.nics_nr = S.getValue('NN_NICS_NR')[self._number]
+ self.image = S.getValue('NN_IMAGE')[self._number]
+ self._cmd = ['sudo', '-E', 'taskset', '-c', cpumask,
+ S.getValue('TOOLS')['qemu-system'],
+ '-m', S.getValue('NN_MEMORY')[self._number],
+ '-smp', S.getValue('NN_SMP')[self._number],
+ '-cpu', 'host,migratable=off',
+ '-drive', 'if={},file='.format(
+ S.getValue('NN_BOOT_DRIVE_TYPE')[self._number]) +
+ self.image, '-boot',
+ 'c', '--enable-kvm',
+ '-monitor', 'unix:%s,server,nowait' % self._monitor,
+ '-nographic', '-vnc', str(vnc), '-name', name,
+ '-snapshot', '-net none', '-no-reboot',
+ '-drive',
+ 'if=%s,format=raw,file=fat:rw:%s,snapshot=off' %
+ (S.getValue('NN_SHARED_DRIVE_TYPE')[self._number],
+ self._shared_dir)
+ ]
+
+ def start(self):
+ """
+ Start QEMU instance
+ """
+ super(QemuVM, self).start()
+ self._running = True
+
+ def stop(self, sig, slp):
+ """
+ Stops VNF instance.
+ """
+ if self._running:
+ self._logger.info('Killing VNF...')
+ # force termination of VNF and wait to terminate; It will avoid
+ # sporadic reboot of host.
+ super(QemuVM, self).kill(signal=sig, sleep=slp)
+ # remove shared dir if it exists to avoid issues with file consistency
+ if os.path.exists(self._shared_dir):
+ tasks.run_task(['rm', '-f', '-r', self._shared_dir], self._logger,
+ 'Removing content of shared directory...', True)
+ self._running = False
+
+ def affinitize_nn(self):
+ """
+ Affinitize the SMP cores of a NN instance.
+ This function is same as the one in vnfs/qemu/qemu.py
+
+ :returns: None
+ """
+ thread_id = (r'.* CPU #%d: .* thread_id=(\d+)')
+ cur_locale = locale.getdefaultlocale()[1]
+ proc = subprocess.Popen(
+ ('echo', 'info cpus'), stdout=subprocess.PIPE)
+ while not os.path.exists(self._monitor):
+ time.sleep(1)
+ output = subprocess.check_output(
+ ('sudo', 'socat', '-', 'UNIX-CONNECT:%s' % self._monitor),
+ stdin=proc.stdout)
+ proc.wait()
+
+ # calculate the number of CPUs specified by NN_SMP
+ cpu_nr = int(S.getValue('NN_SMP')[self._number])
+ # pin each NN's core to host core based on configured BINDING
+ for cpu in range(0, cpu_nr):
+ match = None
+ guest_thread_binding = S.getValue('NN_CORE_BINDING')[self._number]
+ for line in output.decode(cur_locale).split('\n'):
+ match = re.search(thread_id % cpu, line)
+ if match:
+ self._affinitize_pid(guest_thread_binding[cpu],
+ match.group(1))
+ break
+ if not match:
+ self._logger.error('Failed to affinitize guest core #%d. Could'
+ ' not parse tid.', cpu)
+
+
+# pylint: disable=super-init-not-called,unused-argument
+class StressorVM(ILoadGenerator):
+ """
+ Wrapper Class for Load-Generation through stressor-vm
+ """
+ def __init__(self, _config):
+ self.qvm_list = []
+ for vmindex in range(int(S.getValue('NN_COUNT'))):
+ qvm = QemuVM(vmindex)
+ self.qvm_list.append(qvm)
+
+ def start(self):
+ """Start stressor VMs
+ """
+ for nvm in self.qvm_list:
+ nvm.start()
+ nvm.affinitize_nn()
+
+ def kill(self, signal='-9', sleep=2):
+ """
+ Stop Stressor VMs
+ """
+ for nvm in self.qvm_list:
+ nvm.stop(signal, sleep)
diff --git a/tools/md-testvnf/config.json b/tools/md-testvnf/config.json
new file mode 100644
index 00000000..fcfbf0cc
--- /dev/null
+++ b/tools/md-testvnf/config.json
@@ -0,0 +1,11 @@
+{
+ "username": "",
+ "password": "",
+ "networks" : "",
+ "source_image": "",
+ "flavor": "",
+ "domain_name": "",
+ "floating_ip_network" : "",
+
+ "ssh_path": ""
+}
diff --git a/tools/md-testvnf/http/ks.cfg b/tools/md-testvnf/http/ks.cfg
new file mode 100644
index 00000000..46aa3310
--- /dev/null
+++ b/tools/md-testvnf/http/ks.cfg
@@ -0,0 +1,88 @@
+install
+cdrom
+lang en_US.UTF-8
+keyboard us
+network --bootproto=dhcp
+rootpw centos
+firewall --disabled
+selinux --permissive
+timezone UTC
+unsupported_hardware
+bootloader --location=mbr
+text
+skipx
+zerombr
+clearpart --all --initlabel
+part / --fstype="ext4" --grow --size=100
+auth --enableshadow --passalgo=sha512 --kickstart
+firstboot --disabled
+reboot
+services --disabled kdump
+user --name=centos --plaintext --password centos
+url --url=http://centos.osuosl.org/7.8.2003/os/x86_64
+repo --name=updates --baseurl=http://centos.osuosl.org/7.8.2003/updates/x86_64
+
+%packages --nobase --ignoremissing
+openssh-clients
+sudo
+wget
+nfs-utils
+net-tools
+perl-libwww-perl
+bzip2
+vim
+rsync
+man
+man-pages
+parted
+-fprintd-pam
+-intltool
+
+# unnecessary firmware
+-aic94xx-firmware
+-atmel-firmware
+-b43-openfwwf
+-bfa-firmware
+-ipw2100-firmware
+-ipw2200-firmware
+-ivtv-firmware
+-iwl1000-firmware
+-iwl100-firmware
+-iwl105-firmware
+-iwl135-firmware
+-iwl2000-firmware
+-iwl2030-firmware
+-iwl3160-firmware
+-iwl3945-firmware
+-iwl4965-firmware
+-iwl5000-firmware
+-iwl5150-firmware
+-iwl6000-firmware
+-iwl6000g2a-firmware
+-iwl6000g2b-firmware
+-iwl6050-firmware
+-iwl7260-firmware
+-libertas-sd8686-firmware
+-libertas-sd8787-firmware
+-libertas-usb8388-firmware
+-ql2100-firmware
+-ql2200-firmware
+-ql23xx-firmware
+-ql2400-firmware
+-ql2500-firmware
+-rt61pci-firmware
+-rt73usb-firmware
+-xorg-x11-drv-ati-firmware
+-zd1211-firmware
+%end
+
+%post
+yum -y upgrade
+# update root certs
+wget https://raw.githubusercontent.com/bagder/curl/master/lib/mk-ca-bundle.pl
+perl mk-ca-bundle.pl /etc/pki/tls/certs/ca-bundle.crt
+rm certdata.txt mk-ca-bundle.pl
+# sudo
+echo "%centos ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/centos
+sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
+%end \ No newline at end of file
diff --git a/tools/md-testvnf/playbook.yml b/tools/md-testvnf/playbook.yml
new file mode 100644
index 00000000..81a51f5e
--- /dev/null
+++ b/tools/md-testvnf/playbook.yml
@@ -0,0 +1,36 @@
+---
+- hosts: all
+ vars:
+ username: "testvnf"
+ password: "testvnf"
+ become: true
+ tasks:
+
+ - name: create a new user
+ user:
+ name: "{{ username }}"
+ state: present
+ groups: "wheel"
+ password: "{{ password | password_hash('sha512') }}"
+ comment: "user for ansible connection"
+
+ - lineinfile:
+ path: /etc/sudoers
+ state: present
+ regexp: '^%wheel'
+ line: '%wheel ALL=(ALL) NOPASSWD: ALL'
+ validate: 'visudo -cf %s'
+
+ - name: Ansible create file if it doesn't exist example
+ file:
+ path: "temp"
+ state: touch
+
+
+ - name: install epel-release
+ package:
+ name: epel-release
+ state: present
+
+ - name: Execute the deployment script
+ command: /home/centos/deploycentostools.sh deploy \ No newline at end of file
diff --git a/tools/md-testvnf/scripts/ansible.sh b/tools/md-testvnf/scripts/ansible.sh
new file mode 100644
index 00000000..770e2483
--- /dev/null
+++ b/tools/md-testvnf/scripts/ansible.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -eux
+yum -y update
+# Install EPEL repository.
+yum -y install epel-release
+
+# Install Ansible.
+yum -y install ansible \ No newline at end of file
diff --git a/tools/md-testvnf/scripts/deploycentostools.sh b/tools/md-testvnf/scripts/deploycentostools.sh
new file mode 100755
index 00000000..694b020c
--- /dev/null
+++ b/tools/md-testvnf/scripts/deploycentostools.sh
@@ -0,0 +1,364 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# Directory for package build
+BUILD_DIR="/opt/rapid"
+TREX_DIR="/opt/trex"
+RAMSPEED_DIR="/opt/ramspeed"
+STRESSNG_DIR="/opt/stressng"
+UNIXBENCH_DIR="/opt/unixbench"
+DPDK_VERSION="20.05"
+PROX_COMMIT="80dfeb5c734cc4d681f467e853a541a8a91fe1cf"
+PROX_CHECKOUT="git checkout ${PROX_COMMIT}"
+## Next line is overruling the PROX_COMMIT and will replace the version with a very specific patch. Should be commented out
+## if you want to use a committed version of PROX with the COMMIT ID specified above
+## As an example: Following line has the commit for testing IMIX, IPV6, ... It is the merge of all PROX commits on May 27th 2020
+#PROX_CHECKOUT="git fetch \"https://gerrit.opnfv.org/gerrit/samplevnf\" refs/changes/23/70223/1 && git checkout FETCH_HEAD"
+MULTI_BUFFER_LIB_VER="0.52"
+export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+# By default, do not update OS
+OS_UPDATE="n"
+# By default, asumming that we are in the VM
+K8S_ENV="n"
+
+# If already running from root, no need for sudo
+SUDO=""
+[ $(id -u) -ne 0 ] && SUDO="sudo"
+
+function os_pkgs_install()
+{
+ ${SUDO} yum install -y deltarpm yum-utils
+
+ # NASM repository for AESNI MB library
+ #${SUDO} yum-config-manager --add-repo http://www.nasm.us/nasm.repo
+
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+ ${SUDO} yum install -y git wget gcc unzip libpcap-devel ncurses-devel \
+ libedit-devel lua-devel kernel-devel iperf3 pciutils \
+ numactl-devel vim tuna openssl-devel wireshark \
+ make driverctl
+
+ ${SUDO} wget https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+ ${SUDO} rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+}
+
+function k8s_os_pkgs_runtime_install()
+{
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+
+ # Install required dynamically linked libraries + required packages
+ ${SUDO} yum install -y numactl-libs libpcap openssh openssh-server \
+ openssh-clients sudo
+}
+
+function os_cfg()
+{
+ # huge pages to be used by DPDK
+ ${SUDO} sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+ ${SUDO} sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+ # Enabling tuned with the realtime-virtual-guest profile
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm
+ # Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+ # do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ # Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+ # isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+ # When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+ # and update the realtime-virtual-guest-variables.conf accordingly.
+ echo "isolated_cores=1-3" | ${SUDO} tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+ ${SUDO} tuned-adm profile realtime-virtual-guest
+
+ # Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+ # assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+ ${SUDO} chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+ ${SUDO} mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+ ${SUDO} mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+ ${SUDO} systemctl daemon-reload
+ ${SUDO} systemctl enable check-prox-system-setup.service
+ popd > /dev/null 2>&1
+}
+
+function k8s_os_cfg()
+{
+ [ ! -f /etc/ssh/ssh_host_rsa_key ] && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ecdsa_key ] && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ed25519_key ] && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ''
+
+ [ ! -d /var/run/sshd ] && mkdir -p /var/run/sshd
+
+ USER_NAME="centos"
+ USER_PWD="centos"
+
+ useradd -m -d /home/${USER_NAME} -s /bin/bash -U ${USER_NAME}
+ echo "${USER_NAME}:${USER_PWD}" | chpasswd
+ usermod -aG wheel ${USER_NAME}
+
+ echo "%wheel ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/wheelnopass
+}
+
+function mblib_install()
+{
+ export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+
+ # Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+ unzip v${MULTI_BUFFER_LIB_VER}.zip
+ pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function trex_install()
+{
+ pushd ${TREX_DIR} > /dev/null 2>&1
+ wget --no-cache https://trex-tgen.cisco.com/trex/release/latest
+ tar -xzvf latest
+ popd > /dev/null 2>&1
+}
+
+function unixbench_install()
+{
+ pushd ${UNIXBENCH_DIR} > /dev/null 2>&1
+ git clone https://github.com/kdlucas/byte-unixbench
+ popd > /dev/null 2>&1
+}
+
+function ramspeed_install()
+{
+ RAMSPEED_BUILD_DIR = "${RAMSPEED_DIR}/ramspeed-smp"
+ pushd ${RAMSPEED_DIR} > /dev/null 2>&1
+ git clone https://github.com/cruvolo/ramspeed-smp
+ pushd ${RAMSPEED_BUILD_DIR} > /dev/null 2>&1
+ chmod 766 build.sh
+ source build.sh
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function collectd_install()
+{
+ ${SUDO} yum -y install collectd
+}
+
+function fio_install()
+{
+ ${SUDO} yum -y install fio
+}
+
+function stressng_install()
+{
+ STRESSNG_BUILD_DIR = "${STRESSNG_DIR}/stress-ng"
+ pushd ${STRESSNG_DIR} > /dev/null 2>&1
+ git clone https://github.com/ColinIanKing/stress-ng
+ ${SUDO} yum -y install libaio-devel libbsd-devel libcap-devel libattr-devel libgcrypt-devel
+ ${SUDO} yum -y install Judy-devel keyutils-libs-devel lksctp-tools-devel libatomic zlib-devel
+ pushd ${STRESSNG_BUILD_DIR} > /dev/null 2>&1
+ make clean
+ make
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function dpdk_install()
+{
+ # Build DPDK for the latest kernel installed
+ LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+ export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+ # Get and compile DPDK
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz
+ tar -xf ./dpdk-${DPDK_VERSION}.tar.xz
+ popd > /dev/null 2>&1
+
+ ${SUDO} ln -s ${RTE_SDK} ${BUILD_DIR}/dpdk
+
+ pushd ${RTE_SDK} > /dev/null 2>&1
+ make config T=${RTE_TARGET}
+ # Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+ # Uncomment the sed command to enable the driver compilation
+ #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
+
+ # For Kubernetes environment we use host vfio module
+ if [ "${K8S_ENV}" == "y" ]; then
+ sed -i 's/CONFIG_RTE_EAL_IGB_UIO=y/CONFIG_RTE_EAL_IGB_UIO=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_LIBRTE_KNI=y/CONFIG_RTE_LIBRTE_KNI=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_KNI_KMOD=y/CONFIG_RTE_KNI_KMOD=n/g' ${RTE_SDK}/build/.config
+ fi
+
+ # Compile with MB library
+ sed -i '/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/c\CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y' ${RTE_SDK}/build/.config
+ make -j`getconf _NPROCESSORS_ONLN`
+ ln -s ${RTE_SDK}/build ${RTE_SDK}/${RTE_TARGET}
+ popd > /dev/null 2>&1
+}
+
+function prox_compile()
+{
+ # Compile PROX
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+ popd > /dev/null 2>&1
+}
+
+function prox_install()
+{
+ # Clone and compile PROX
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ git clone https://git.opnfv.org/samplevnf
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX > /dev/null 2>&1
+ bash -c "${PROX_CHECKOUT}"
+ popd > /dev/null 2>&1
+ prox_compile
+ popd > /dev/null 2>&1
+}
+
+function port_info_build()
+{
+ [ ! -d ${BUILD_DIR}/port_info ] && echo "Skipping port_info compilation..." && return
+
+ pushd ${BUILD_DIR}/port_info > /dev/null 2>&1
+ make
+ ${SUDO} cp ${BUILD_DIR}/port_info/build/app/port_info_app ${BUILD_DIR}/port_info_app
+ popd > /dev/null 2>&1
+}
+
+function create_minimal_install()
+{
+ ldd ${BUILD_DIR}/prox | awk '{ if ($(NF-1) != "=>") print $(NF-1) }' >> ${BUILD_DIR}/list_of_install_components
+
+ echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components
+
+ tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+}
+
+function cleanup()
+{
+ ${SUDO} yum autoremove -y
+ ${SUDO} yum clean all
+ ${SUDO} rm -rf /var/cache/yum
+}
+
+function k8s_runtime_image()
+{
+ k8s_os_pkgs_runtime_install
+ k8s_os_cfg
+ cleanup
+
+ pushd / > /dev/null 2>&1
+ tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+ popd > /dev/null 2>&1
+
+ ldconfig
+
+ #rm -rf ${BUILD_DIR}/install_components.tgz
+}
+
+function print_usage()
+{
+ echo "Usage: ${0} [OPTIONS] [COMMAND]"
+ echo "Options:"
+ echo " -u, --update Full OS update"
+ echo " -k, --kubernetes Build for Kubernetes environment"
+ echo "Commands:"
+ echo " deploy Run through all deployment steps"
+ echo " compile PROX compile only"
+ echo " runtime_image Apply runtime configuration only"
+}
+
+COMMAND=""
+# Parse options and comman
+for opt in "$@"; do
+ case ${opt} in
+ -u|--update)
+ echo 'Full OS update will be done!'
+ OS_UPDATE="y"
+ ;;
+ -k|--kubernetes)
+ echo "Kubernetes environment is set!"
+ K8S_ENV="y"
+ ;;
+ compile)
+ COMMAND="compile"
+ ;;
+ runtime_image)
+ COMMAND="runtime_image"
+ ;;
+ deploy)
+ COMMAND="deploy"
+ ;;
+ *)
+ echo "Unknown option/command ${opt}"
+ print_usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ "${COMMAND}" == "compile" ]; then
+ echo "PROX compile only..."
+ prox_compile
+elif [ "${COMMAND}" == "runtime_image" ]; then
+ echo "Runtime image intallation and configuration..."
+ k8s_runtime_image
+elif [ "${COMMAND}" == "deploy" ]; then
+ [ ! -d ${BUILD_DIR} ] && ${SUDO} mkdir -p ${BUILD_DIR}
+ ${SUDO} chmod 0777 ${BUILD_DIR}
+
+ os_pkgs_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ k8s_os_cfg
+ else
+ os_cfg
+ fi
+
+ mblib_install
+ dpdk_install
+ prox_install
+ trex_install
+ collectd_install
+ stressng_install
+ fio_install
+ unixbench_install
+ ramspeed_install
+
+
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ port_info_build
+ create_minimal_install
+ fi
+
+ cleanup
+else
+ print_usage
+fi
diff --git a/tools/md-testvnf/scripts/sshConfig.sh b/tools/md-testvnf/scripts/sshConfig.sh
new file mode 100644
index 00000000..b746cde6
--- /dev/null
+++ b/tools/md-testvnf/scripts/sshConfig.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -eux
+sudo mv temp /home/testvnf/authorized_keys
+sudo mkdir /home/testvnf/.ssh
+sudo mv /home/testvnf/authorized_keys /home/testvnf/.ssh/
+sudo chmod 700 /home/testvnf/.ssh
+sudo chmod 600 /home/testvnf/.ssh/authorized_keys
+sudo chown testvnf /home/testvnf/.ssh
+sudo chown testvnf /home/testvnf/.ssh/authorized_keys
+# Add `sync` so Packer doesn't quit too early, before the large file is deleted.
+sync \ No newline at end of file
diff --git a/tools/md-testvnf/testVNF_image.json b/tools/md-testvnf/testVNF_image.json
new file mode 100644
index 00000000..2b27a28a
--- /dev/null
+++ b/tools/md-testvnf/testVNF_image.json
@@ -0,0 +1,72 @@
+{
+ "builders": [
+ {
+ "boot_command": [
+ "<tab> text biosdevname=0 net.ifnames=0 ",
+ "ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg<enter><wait>"
+ ],
+ "accelerator": "kvm",
+ "boot_wait": "10s",
+ "disk_size": 2048,
+ "disk_interface": "virtio-scsi",
+ "http_directory": "http",
+ "iso_checksum": "101bc813d2af9ccf534d112cbe8670e6d900425b297d1a4d2529c5ad5f226372",
+ "iso_checksum_type": "sha256",
+ "iso_url": "http://centos.osuosl.org/7.8.2003/isos/x86_64/CentOS-7-x86_64-NetInstall-2003.iso",
+ "output_directory": "image",
+ "qemuargs": [ [ "-m", "1024M" ]],
+ "shutdown_command": "echo 'centos'|sudo -S /sbin/halt -h -p",
+ "ssh_password": "centos",
+ "ssh_port": 22,
+ "ssh_username": "centos",
+ "ssh_wait_timeout": "10000s",
+ "type": "qemu",
+ "vm_name": "packer-centos-7-x86_64-openstack",
+ "vnc_bind_address": "0.0.0.0",
+ "Headless": "true"
+ },
+ {
+ "name": "openstack",
+ "type": "openstack",
+ "image_name": "testvnf_image",
+ "identity_endpoint": "{{user `identiy_endpoint`}}",
+ "username": "{{user `username`}}",
+ "password": "{{user `password`}}",
+ "ssh_username": "centos",
+ "networks" : "{{user `networks`}}",
+ "source_image": "{{user `source_image`}}",
+ "flavor": "{{user `flavor`}}",
+ "domain_name": "{{user `domain_name`}}",
+ "ssh_timeout": "15m",
+ "use_floating_ip": "true",
+ "floating_ip_network" : "{{user `floating_ip_network`}}"
+ }
+],
+ "provisioners": [
+ {
+ "type": "shell",
+ "execute_command": "echo testvnf | {{.Vars}} sudo -S -E bash '{{.Path}}'",
+ "script": "scripts/ansible.sh"
+ },
+ {
+ "type": "file",
+ "source": "scripts/deploycentostools.sh",
+ "destination": "deploycentostools.sh"
+ },
+ {
+ "type": "ansible-local",
+ "playbook_file": "playbook.yml"
+ },
+ {
+ "type": "file",
+ "source": "{{user `ssh_path`}}",
+ "destination": "temp"
+ },
+ {
+ "type": "shell",
+ "execute_command": "echo testvnf | {{.Vars}} sudo -S -E bash '{{.Path}}'",
+ "script": "scripts/sshConfig.sh"
+ }
+ ]
+}
+
diff --git a/tools/module_manager.py b/tools/module_manager.py
index dd1d92be..943399ba 100644
--- a/tools/module_manager.py
+++ b/tools/module_manager.py
@@ -160,7 +160,7 @@ class ModuleManager(object):
self._logger.info('Unable to get list of dependecies for module \'%s\'.', module)
# ...and try to continue, just for case that dependecies are already loaded
- if len(deps):
+ if deps:
return deps.split(',')
else:
return []
diff --git a/tools/namespace.py b/tools/namespace.py
index 9131398f..50374b95 100644
--- a/tools/namespace.py
+++ b/tools/namespace.py
@@ -135,9 +135,8 @@ def reset_port_to_root(port, name):
port, name), False)
-# pylint: disable=unused-argument
# pylint: disable=invalid-name
-def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
+def validate_add_ip_to_namespace_eth(_result, port, name, ip_addr, cidr):
"""
Validation function for integration testcases
"""
@@ -147,7 +146,7 @@ def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr):
_LOGGER, 'Validating ip address in namespace...', False))
-def validate_assign_port_to_namespace(result, port, name, port_up=False):
+def validate_assign_port_to_namespace(_result, port, name, _port_up=False):
"""
Validation function for integration testcases
"""
@@ -157,14 +156,14 @@ def validate_assign_port_to_namespace(result, port, name, port_up=False):
_LOGGER, 'Validating port in namespace...'))
-def validate_create_namespace(result, name):
+def validate_create_namespace(_result, name):
"""
Validation function for integration testcases
"""
return name in get_system_namespace_list()
-def validate_delete_namespace(result, name):
+def validate_delete_namespace(_result, name):
"""
Validation function for integration testcases
"""
diff --git a/tools/networkcard.py b/tools/networkcard.py
index 2cd296fb..758010d2 100644
--- a/tools/networkcard.py
+++ b/tools/networkcard.py
@@ -191,7 +191,7 @@ def get_mac(pci_handle):
"""
mac_path = glob.glob(os.path.join(_PCI_DIR, _PCI_NET, '*', 'address').format(pci_handle))
# kernel driver is loaded and MAC can be read
- if len(mac_path) and os.path.isfile(mac_path[0]):
+ if mac_path and os.path.isfile(mac_path[0]):
with open(mac_path[0], 'r') as _file:
return _file.readline().rstrip('\n')
diff --git a/tools/os_deploy_tgen/__init__.py b/tools/os_deploy_tgen/__init__.py
new file mode 100644
index 00000000..1b2d5ea6
--- /dev/null
+++ b/tools/os_deploy_tgen/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Package to deploy Traffic-generator in Openstack
+"""
diff --git a/tools/os_deploy_tgen/osclients/__init__.py b/tools/os_deploy_tgen/osclients/__init__.py
new file mode 100644
index 00000000..e73a36c9
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Openstack Client
+"""
diff --git a/tools/os_deploy_tgen/osclients/glance.py b/tools/os_deploy_tgen/osclients/glance.py
new file mode 100644
index 00000000..f59f0d8d
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/glance.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Glance CLient
+"""
+
+def get_image(glance_client, image_name):
+ """
+ Get the IMage
+ """
+ for image in glance_client.images.list():
+ if image.name == image_name:
+ return image
+ return None
+
+
+def get_supported_versions(glance_client):
+ """
+ Get Supported Version
+ """
+ return set(version['id'] for version in glance_client.versions.list())
diff --git a/tools/os_deploy_tgen/osclients/heat.py b/tools/os_deploy_tgen/osclients/heat.py
new file mode 100755
index 00000000..8681731b
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/heat.py
@@ -0,0 +1,156 @@
+# Copyright 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Heat Client
+"""
+
+#import sys
+import time
+
+from heatclient import exc
+from oslo_log import log as logging
+from timeout_decorator import timeout
+
+LOG = logging.getLogger(__name__)
+
+
+def create_stack(heat_client, stack_name, template, parameters,
+ environment=None):
+ """
+ Create Stack
+ """
+ stack_params = {
+ 'stack_name': stack_name,
+ 'template': template,
+ 'parameters': parameters,
+ 'environment': environment,
+ }
+
+ stack = heat_client.stacks.create(**stack_params)['stack']
+ LOG.info('New stack: %s', stack)
+
+ wait_stack_completion(heat_client, stack['id'])
+
+ return stack['id']
+
+
+def get_stack_status(heat_client, stack_id):
+ """
+ Get Stack Status
+ """
+ # stack.get operation may take long time and run out of time. The reason
+ # is that it resolves all outputs which is done serially. On the other hand
+ # stack status can be retrieved from the list operation. Internally listing
+ # supports paging and every request should not take too long.
+ for stack in heat_client.stacks.list():
+ if stack.id == stack_id:
+ return stack.status, stack.stack_status_reason
+ else:
+ raise exc.HTTPNotFound(message='Stack %s is not found' % stack_id)
+ return None
+
+def get_id_with_name(heat_client, stack_name):
+ """
+ Get Stack ID by name
+ """
+ # This method isn't really necessary since the Heat client accepts
+ # stack_id and stack_name interchangeably. This is provided more as a
+ # safety net to use ids which are guaranteed to be unique and provides
+ # the benefit of keeping the Shaker code consistent and more easily
+ # traceable.
+ stack = heat_client.stacks.get(stack_name)
+ return stack.id
+
+
+def wait_stack_completion(heat_client, stack_id):
+ """
+ Wait for Stack completion
+ """
+ reason = None
+ status = None
+
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s', status)
+ if status not in ['IN_PROGRESS', '']:
+ break
+
+ time.sleep(5)
+
+ if status != 'COMPLETE':
+ resources = heat_client.resources.list(stack_id)
+ for res in resources:
+ if (res.resource_status != 'CREATE_COMPLETE' and
+ res.resource_status_reason):
+ LOG.error('Heat stack resource %(res)s of type %(type)s '
+ 'failed with %(reason)s',
+ dict(res=res.logical_resource_id,
+ type=res.resource_type,
+ reason=res.resource_status_reason))
+
+ raise exc.StackFailure(stack_id, status, reason)
+
+
+# set the timeout for this method so we don't get stuck polling indefinitely
+# waiting for a delete
+@timeout(600)
+def wait_stack_deletion(heat_client, stack_id):
+ """
+ Wait for stack deletion
+ """
+ try:
+ heat_client.stacks.delete(stack_id)
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s Stack reason: %s', status, reason)
+ if status == 'FAILED':
+ raise exc.StackFailure('Failed to delete stack %s' % stack_id)
+
+ time.sleep(5)
+
+ except TimeoutError:
+ LOG.error('Timed out waiting for deletion of stack %s' % stack_id)
+
+ except exc.HTTPNotFound:
+ # once the stack is gone we can assume it was successfully deleted
+ # clear the exception so it doesn't confuse the logs
+ #if sys.version_info < (3, 0):
+ # sys.exc_clear()
+ LOG.info('Stack %s was successfully deleted', stack_id)
+
+
+def get_stack_outputs(heat_client, stack_id):
+ """
+ Get Stack Output
+ """
+ # try to use optimized way to retrieve outputs, fallback otherwise
+ if hasattr(heat_client.stacks, 'output_list'):
+ try:
+ output_list = heat_client.stacks.output_list(stack_id)['outputs']
+
+ result = {}
+ for output in output_list:
+ output_key = output['output_key']
+ value = heat_client.stacks.output_show(stack_id, output_key)
+ result[output_key] = value['output']['output_value']
+
+ return result
+ except BaseException as err:
+ LOG.info('Cannot get output list, fallback to old way: %s', err)
+
+ outputs_list = heat_client.stacks.get(stack_id).to_dict()['outputs']
+ return dict((item['output_key'], item['output_value'])
+ for item in outputs_list)
diff --git a/tools/os_deploy_tgen/osclients/neutron.py b/tools/os_deploy_tgen/osclients/neutron.py
new file mode 100644
index 00000000..f75077dc
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/neutron.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Neutron client
+"""
+
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def choose_external_net(neutron_client):
+ """
+ Choose External Network
+ """
+ ext_nets = neutron_client.list_networks(
+ **{'router:external': True})['networks']
+ if not ext_nets:
+ raise Exception('No external networks found')
+ return ext_nets[0]['name']
diff --git a/tools/os_deploy_tgen/osclients/nova.py b/tools/os_deploy_tgen/osclients/nova.py
new file mode 100644
index 00000000..b2baa34f
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/nova.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Nova Client
+"""
+
+import itertools
+import re
+import time
+
+from novaclient import client as nova_client_pkg
+from oslo_log import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class ForbiddenException(nova_client_pkg.exceptions.Forbidden):
+ """
+ Custome Exception
+ """
+
+
+
+def get_available_compute_nodes(nova_client, flavor_name):
+ """
+ Return available compute nodes
+ """
+ try:
+ host_list = [dict(host=svc.host, zone=svc.zone)
+ for svc in
+ nova_client.services.list(binary='nova-compute')
+ if svc.state == 'up' and svc.status == 'enabled']
+
+ # If the flavor has aggregate_instance_extra_specs set then filter
+ # host_list to pick only the hosts matching the chosen flavor.
+ flavor = get_flavor(nova_client, flavor_name)
+
+ if flavor is not None:
+ extra_specs = flavor.get_keys()
+
+ for item in extra_specs:
+ if "aggregate_instance_extra_specs" in item:
+ LOG.debug('Flavor contains %s, using compute node '
+ 'filtering', extra_specs)
+
+ # getting the extra spec seting for flavor in the
+ # standard format of extra_spec:value
+ extra_spec = item.split(":")[1]
+ extra_spec_value = extra_specs.get(item)
+
+ # create a set of aggregate host which match
+ agg_hosts = set(itertools.chain(
+ *[agg.hosts for agg in
+ nova_client.aggregates.list() if
+ agg.metadata.get(extra_spec) == extra_spec_value]))
+
+ # update list of available hosts with
+ # host_aggregate cross-check
+ host_list = [elem for elem in host_list if
+ elem['host'] in agg_hosts]
+
+ LOG.debug('Available compute nodes: %s ', host_list)
+
+ return host_list
+
+ except nova_client_pkg.exceptions.Forbidden as error:
+ msg = 'Forbidden to get list of compute nodes'
+ raise ForbiddenException(msg) from error
+
+
+def does_flavor_exist(nova_client, flavor_name):
+ """
+ Check if flavor exists
+ """
+ for flavor in nova_client.flavors.list():
+ if flavor.name == flavor_name:
+ return True
+ return False
+
+
+def create_flavor(nova_client, **kwargs):
+ """
+ Create a flavor
+ """
+ try:
+ nova_client.flavors.create(**kwargs)
+ except nova_client_pkg.exceptions.Forbidden as error:
+ msg = 'Forbidden to create flavor'
+ raise ForbiddenException(msg) from error
+
+
+def get_server_ip(nova_client, server_name, ip_type):
+ """
+ Get IP of the compute
+ """
+ server = nova_client.servers.find(name=server_name)
+ addresses = server.addresses
+ ips = [v['addr'] for v in itertools.chain(*addresses.values())
+ if v['OS-EXT-IPS:type'] == ip_type]
+ if not ips:
+ raise Exception('Could not get IP address of server: %s' % server_name)
+ if len(ips) > 1:
+ raise Exception('Server %s has more than one IP addresses: %s' %
+ (server_name, ips))
+ return ips[0]
+
+
+def get_server_host_id(nova_client, server_name):
+ """
+ Get the host id
+ """
+ server = nova_client.servers.find(name=server_name)
+ return server.hostId
+
+
+def check_server_console(nova_client, server_id, len_limit=100):
+ """
+ Check Server console
+ """
+ try:
+ console = (nova_client.servers.get(server_id)
+ .get_console_output(len_limit))
+ except nova_client_pkg.exceptions.ClientException as exc:
+ LOG.warning('Error retrieving console output: %s. Ignoring', exc)
+ return None
+
+ for line in console.splitlines():
+ if (re.search(r'\[critical\]', line, flags=re.IGNORECASE) or
+ re.search(r'Cloud-init.*Datasource DataSourceNone\.', line)):
+ message = ('Instance %(id)s has critical cloud-init error: '
+ '%(msg)s. Check metadata service availability' %
+ dict(id=server_id, msg=line))
+ LOG.error(message)
+ return message
+ if re.search(r'\[error', line, flags=re.IGNORECASE):
+ LOG.error('Error message in instance %(id)s console: %(msg)s',
+ dict(id=server_id, msg=line))
+ elif re.search(r'warn', line, flags=re.IGNORECASE):
+ LOG.info('Warning message in instance %(id)s console: %(msg)s',
+ dict(id=server_id, msg=line))
+
+ return None
+
+
+def _poll_for_status(nova_client, server_id, final_ok_states, poll_period=20,
+ status_field="status"):
+ """
+ Poll for status
+ """
+ LOG.debug('Poll instance %(id)s, waiting for any of statuses %(statuses)s',
+ dict(id=server_id, statuses=final_ok_states))
+ while True:
+ obj = nova_client.servers.get(server_id)
+
+ err_msg = check_server_console(nova_client, server_id)
+ if err_msg:
+ raise Exception('Critical error in instance %s console: %s' %
+ (server_id, err_msg))
+
+ status = getattr(obj, status_field)
+ if status:
+ status = status.lower()
+
+ LOG.debug('Instance %(id)s has status %(status)s',
+ dict(id=server_id, status=status))
+
+ if status in final_ok_states:
+ break
+ if status in ('error', 'paused'):
+ raise Exception(obj.fault['message'])
+
+ time.sleep(poll_period)
+
+
+def wait_server_shutdown(nova_client, server_id):
+ """
+ Wait server shutdown
+ """
+ _poll_for_status(nova_client, server_id, ['shutoff'])
+
+
+def wait_server_snapshot(nova_client, server_id):
+ """
+ Wait server snapshot
+ """
+ task_state_field = "OS-EXT-STS:task_state"
+ server = nova_client.servers.get(server_id)
+ if hasattr(server, task_state_field):
+ _poll_for_status(nova_client, server.id, [None, '-', ''],
+ status_field=task_state_field)
+
+
+def get_flavor(nova_client, flavor_name):
+ """
+ Get the flavor
+ """
+ for flavor in nova_client.flavors.list():
+ if flavor.name == flavor_name:
+ return flavor
+ return None
diff --git a/tools/os_deploy_tgen/osclients/openstack.py b/tools/os_deploy_tgen/osclients/openstack.py
new file mode 100644
index 00000000..58297e6c
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/openstack.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Openstack Client - Main File
+"""
+
+import os_client_config
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenStackClientException(Exception):
+ '''
+ Custom Exception
+ '''
+
+
+def init_profiling(os_profile):
+ """
+ Initialize Profiling
+ """
+ if os_profile:
+ osprofiler_profiler = importutils.try_import("osprofiler.profiler")
+
+ if osprofiler_profiler: # lib is present
+ osprofiler_profiler.init(os_profile)
+ trace_id = osprofiler_profiler.get().get_base_id()
+ LOG.info('Profiling is enabled, trace id: %s', trace_id)
+ else: # param is set, but lib is not present
+ LOG.warning('Profiling could not be enabled. To enable profiling '
+ 'please install "osprofiler" library')
+
+
+class OpenStackClient():
+ """
+ Client Class
+ """
+ def __init__(self, openstack_params):
+ """
+ Initialize
+ """
+ LOG.debug('Establishing connection to OpenStack')
+
+ init_profiling(openstack_params.get('os_profile'))
+
+ config = os_client_config.OpenStackConfig()
+ cloud_config = config.get_one_cloud(**openstack_params)
+ if openstack_params['os_insecure']:
+ cloud_config.config['verify'] = False
+ cloud_config.config['cacert'] = None
+ self.keystone_session = cloud_config.get_session()
+ self.nova = cloud_config.get_legacy_client('compute')
+ self.neutron = cloud_config.get_legacy_client('network')
+ self.glance = cloud_config.get_legacy_client('image')
+
+ # heat client wants endpoint to be always set
+ endpoint = cloud_config.get_session_endpoint('orchestration')
+ if not endpoint:
+ raise OpenStackClientException(
+ 'Endpoint for orchestration service is not found')
+ self.heat = cloud_config.get_legacy_client('orchestration',
+ endpoint=endpoint)
+
+ # Ping OpenStack
+ self.keystone_session.get_token()
+
+ LOG.info('Connection to OpenStack is initialized')
diff --git a/tools/os_deploy_tgen/osdt.py b/tools/os_deploy_tgen/osdt.py
new file mode 100644
index 00000000..0aad8597
--- /dev/null
+++ b/tools/os_deploy_tgen/osdt.py
@@ -0,0 +1,601 @@
+# Copyright 2020 Spirent Communications, Mirantis
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Code to deploy Trafficgenerator on Openstack.
+This Code is based on Openstack Shaker.
+"""
+
+
+import collections
+import functools
+import random
+#import sys
+import os
+import copy
+import logging
+#import json
+import jinja2
+#import shutil
+#import datetime
+#import time
+
+#from conf import merge_spec
+from conf import settings as S
+
+from tools.os_deploy_tgen.utilities import utils
+from tools.os_deploy_tgen.osclients import heat
+from tools.os_deploy_tgen.osclients import neutron
+from tools.os_deploy_tgen.osclients import nova
+from tools.os_deploy_tgen.osclients import openstack
+
+LOG = logging.getLogger(__name__)
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
+
+class DeploymentException(Exception):
+ """ Exception Handling """
+
+
+def prepare_for_cross_az(compute_nodes, zones):
+ """
+ Deployment across Availability Zones
+ """
+ if len(zones) != 2:
+ LOG.info('cross_az is specified, but len(zones) is not 2')
+ return compute_nodes
+
+ masters = []
+ slaves = []
+ for node in compute_nodes:
+ if node['zone'] == zones[0]:
+ masters.append(node)
+ else:
+ slaves.append(node)
+
+ res = []
+ for i in range(min(len(masters), len(slaves))):
+ res.append(masters[i])
+ res.append(slaves[i])
+
+ return res
+
+
+def generate_agents(compute_nodes, accommodation, unique):
+ """
+ Generate TestVNF Instances
+ """
+ print('Number of compute nodes')
+ print(compute_nodes)
+ density = accommodation.get('density') or 1
+
+ zones = accommodation.get('zones')
+ if zones:
+ compute_nodes = [
+ c for c in compute_nodes if c['zone'] in zones or
+ ':'.join(filter(None, [c['zone'], c['host']])) in zones]
+ if 'cross_az' in accommodation:
+ compute_nodes = prepare_for_cross_az(compute_nodes, zones)
+
+ best_effort = accommodation.get('best_effort', False)
+ compute_nodes_requested = accommodation.get('compute_nodes')
+ if compute_nodes_requested:
+ if compute_nodes_requested > len(compute_nodes):
+ print(str(len(compute_nodes)))
+ if best_effort:
+ LOG.info('Allowing best_effort accommodation:')
+ else:
+ raise DeploymentException(
+ 'Exception Not enough compute nodes %(cn)s for requested '
+ 'instance accommodation %(acc)s' %
+ dict(cn=compute_nodes, acc=accommodation))
+ else:
+ compute_nodes = random.sample(compute_nodes,
+ compute_nodes_requested)
+
+ cn_count = len(compute_nodes)
+ iterations = cn_count * density
+ ite = 0
+ if 'single_room' in accommodation and 'pair' in accommodation:
+ # special case to allow pair, single_room on single compute node
+ if best_effort and iterations == 1:
+ LOG.info('Allowing best_effort accommodation: '
+ 'single_room, pair on one compute node')
+ else:
+ iterations //= 2
+ node_formula = lambda x: compute_nodes[x % cn_count]
+
+ agents = {}
+
+ for ite in range(iterations):
+ if 'pair' in accommodation:
+ master_id = '%s_master_%s' % (unique, ite)
+ slave_id = '%s_slave_%s' % (unique, ite)
+ master = dict(id=master_id, mode='master', slave_id=slave_id)
+ slave = dict(id=slave_id, mode='slave', master_id=master_id)
+
+ if 'single_room' in accommodation:
+ master_formula = lambda x: ite * 2
+ slave_formula = lambda x: ite * 2 + 1
+ elif 'double_room' in accommodation:
+ master_formula = lambda x: ite
+ slave_formula = lambda x: ite
+ else: # mixed_room
+ master_formula = lambda x: ite
+ slave_formula = lambda x: ite + 1
+
+ mas = node_formula(master_formula(ite))
+ master['node'], master['zone'] = mas['host'], mas['zone']
+ sla = node_formula(slave_formula(ite))
+ slave['node'], slave['zone'] = sla['host'], sla['zone']
+
+ agents[master['id']] = master
+ agents[slave['id']] = slave
+ else:
+ if 'single_room' in accommodation:
+ agent_id = '%s_agent_%s' % (unique, ite)
+ agents[agent_id] = dict(id=agent_id,
+ node=node_formula(ite)['host'],
+ zone=node_formula(ite)['zone'],
+ mode='alone')
+
+ if not agents:
+ raise DeploymentException('Not enough compute nodes %(cn)s for '
+ 'requested instance accommodation %(acc)s' %
+ dict(cn=compute_nodes, acc=accommodation))
+
+ # inject availability zone
+ for agent in agents.values():
+ avz = agent['zone']
+ if agent['node']:
+ avz += ':' + agent['node']
+ agent['availability_zone'] = avz
+
+ return agents
+
+
+def _get_stack_values(stack_outputs, vm_name, params):
+ """
+ Collect the output from Heat Stack Deployment
+ """
+ result = {}
+ for param in params:
+ out = stack_outputs.get(vm_name + '_' + param)
+ if out:
+ result[param] = out
+ return result
+
+
+def filter_agents(agents, stack_outputs, override=None):
+ """
+ Filter Deployed Instances - If Required.
+ """
+ deployed_agents = {}
+
+ # first pass, ignore non-deployed
+ for agent in agents.values():
+ stack_values = _get_stack_values(stack_outputs, agent['id'], ['ip'])
+ new_stack_values = _get_stack_values(stack_outputs, agent['id'], ['pip'])
+ mac_values = _get_stack_values(stack_outputs, agent['id'], ['dmac'])
+
+ if override:
+ stack_values.update(override(agent))
+
+ if not stack_values.get('ip'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ if not new_stack_values.get('pip'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ if not mac_values.get('dmac'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ agent.update(stack_values)
+ agent.update(new_stack_values)
+
+ # workaround of Nova bug 1422686
+ if agent.get('mode') == 'slave' and not agent.get('ip'):
+ LOG.info('IP address is missing in agent: %s', agent)
+ continue
+
+ deployed_agents[agent['id']] = agent
+
+ # second pass, check pairs
+ result = {}
+ for agent in deployed_agents.values():
+ print(agent.get('mode'))
+ print(agent.get('ip'))
+ print(agent.get('pip'))
+ print(agent.get('dmac'))
+ if (agent.get('mode') == 'alone' or
+ (agent.get('mode') == 'master' and
+ agent.get('slave_id') in deployed_agents) or
+ (agent.get('mode') == 'slave' and
+ agent.get('master_id') in deployed_agents)):
+ result[agent['id']] = agent
+
+ return result
+
+
+def distribute_agents(agents, get_host_fn):
+ """
+ Distribute TestVNF Instances
+ """
+ result = {}
+
+ hosts = set()
+ buckets = collections.defaultdict(list)
+ for agent in agents.values():
+ agent_id = agent['id']
+ # we assume that server name equals to agent_id
+ host_id = get_host_fn(agent_id)
+
+ if host_id not in hosts:
+ hosts.add(host_id)
+ agent['node'] = host_id
+ buckets[agent['mode']].append(agent)
+ else:
+ LOG.info('Filter out agent %s, host %s is already occupied',
+ agent_id, host_id)
+
+ if buckets['alone']:
+ result = dict((a['id'], a) for a in buckets['alone'])
+ else:
+ for master, slave in zip(buckets['master'], buckets['slave']):
+ master['slave_id'] = slave['id']
+ slave['master_id'] = master['id']
+
+ result[master['id']] = master
+ result[slave['id']] = slave
+
+ return result
+
+
+def normalize_accommodation(accommodation):
+ """
+ Planning the Accomodation of TestVNFs
+ """
+ result = {}
+
+ for stk in accommodation:
+ if isinstance(stk, dict):
+ result.update(stk)
+ else:
+ result[stk] = True
+
+ # override scenario's availability zone accommodation
+ if S.hasValue('SCENARIO_AVAILABILITY_ZONE'):
+ result['zones'] = S.getValue('SCENARIO_AVAILABILITY_ZONE')
+ # override scenario's compute_nodes accommodation
+ if S.hasValue('SCENARIO_COMPUTE_NODES'):
+ result['compute_nodes'] = S.getValue('SCENARIO_COMPUTE_NODES')
+
+ return result
+
+
+class Deployment():
+ """
+ Main Deployment Class
+ """
+ def __init__(self):
+ """
+ Initialize
+ """
+ self.openstack_client = None
+ self.stack_id = None
+ self.privileged_mode = True
+ self.flavor_name = None
+ self.image_name = None
+ self.stack_name = None
+ self.external_net = None
+ self.dns_nameservers = None
+ # The current run "owns" the support stacks, it is tracked
+ # so it can be deleted later.
+ self.support_stacks = []
+ self.trackstack = collections.namedtuple('TrackStack', 'name id')
+
+ def connect_to_openstack(self, openstack_params, flavor_name, image_name,
+ external_net, dns_nameservers):
+ """
+ Connect to Openstack
+ """
+ LOG.debug('Connecting to OpenStack')
+
+ self.openstack_client = openstack.OpenStackClient(openstack_params)
+
+ self.flavor_name = flavor_name
+ self.image_name = image_name
+
+ if S.hasValue('STACK_NAME'):
+ self.stack_name = S.getValue('STACK_NAME')
+ else:
+ self.stack_name = 'testvnf_%s' % utils.random_string()
+
+ self.dns_nameservers = dns_nameservers
+ # intiailizing self.external_net last so that other attributes don't
+ # remain uninitialized in case user forgets to create external network
+ self.external_net = (external_net or
+ neutron.choose_external_net(
+ self.openstack_client.neutron))
+
+ def _get_compute_nodes(self, accommodation):
+ """
+ Get available comput nodes
+ """
+ try:
+ comps = nova.get_available_compute_nodes(self.openstack_client.nova,
+ self.flavor_name)
+ print(comps)
+ return comps
+ except nova.ForbiddenException:
+ # user has no permissions to list compute nodes
+ LOG.info('OpenStack user does not have permission to list compute '
+ 'nodes - treat him as non-admin')
+ self.privileged_mode = False
+ count = accommodation.get('compute_nodes')
+ if not count:
+ raise DeploymentException(
+ 'When run with non-admin user the scenario must specify '
+ 'number of compute nodes to use')
+
+ zones = accommodation.get('zones') or ['nova']
+ return [dict(host=None, zone=zones[n % len(zones)])
+ for n in range(count)]
+
+ #def _deploy_from_hot(self, specification, server_endpoint, base_dir=None):
+ def _deploy_from_hot(self, specification, base_dir=None):
+ """
+ Perform Heat stack deployment
+ """
+ accommodation = normalize_accommodation(
+ specification.get('accommodation') or
+ specification.get('vm_accommodation'))
+
+ agents = generate_agents(self._get_compute_nodes(accommodation),
+ accommodation, self.stack_name)
+
+ # render template by jinja
+ vars_values = {
+ 'agents': agents,
+ 'unique': self.stack_name,
+ }
+ heat_template = utils.read_file(specification['template'],
+ base_dir=base_dir)
+ compiled_template = jinja2.Template(heat_template)
+ rendered_template = compiled_template.render(vars_values)
+ LOG.info('Rendered template: %s', rendered_template)
+
+ # create stack by Heat
+ try:
+ merged_parameters = {
+ 'external_net': self.external_net,
+ 'image': self.image_name,
+ 'flavor': self.flavor_name,
+ 'dns_nameservers': self.dns_nameservers,
+ }
+ except AttributeError as err:
+ LOG.error('Failed to gather required parameters to create '
+ 'heat stack: %s', err)
+ raise
+
+ merged_parameters.update(specification.get('template_parameters', {}))
+ try:
+ self.stack_id = heat.create_stack(
+ self.openstack_client.heat, self.stack_name,
+ rendered_template, merged_parameters, None)
+ except heat.exc.StackFailure as err:
+ self.stack_id = err.args[0]
+ raise
+
+ # get info about deployed objects
+ outputs = heat.get_stack_outputs(self.openstack_client.heat,
+ self.stack_id)
+ override = self._get_override(specification.get('override'))
+
+ agents = filter_agents(agents, outputs, override)
+
+ if (not self.privileged_mode) and accommodation.get('density', 1) == 1:
+ get_host_fn = functools.partial(nova.get_server_host_id,
+ self.openstack_client.nova)
+ agents = distribute_agents(agents, get_host_fn)
+
+ return agents
+
+ def _get_override(self, override_spec):
+ """
+ Collect the overrides
+ """
+ def override_ip(agent, ip_type):
+ """
+ Override the IP
+ """
+ return dict(ip=nova.get_server_ip(
+ self.openstack_client.nova, agent['id'], ip_type))
+
+ if override_spec:
+ if override_spec.get('ip'):
+ return functools.partial(override_ip,
+ ip_type=override_spec.get('ip'))
+
+
+ #def deploy(self, deployment, base_dir=None, server_endpoint=None):
+ def deploy(self, deployment, base_dir=None):
+ """
+ Perform Deployment
+ """
+ agents = {}
+
+ if not deployment:
+ # local mode, create fake agent
+ agents.update(dict(local=dict(id='local', mode='alone',
+ node='localhost')))
+
+ if deployment.get('template'):
+ if self.openstack_client:
+ # deploy topology specified by HOT
+ agents.update(self._deploy_from_hot(
+ #deployment, server_endpoint, base_dir=base_dir))
+ deployment, base_dir=base_dir))
+ else:
+ raise DeploymentException(
+ 'OpenStack client is not initialized. '
+ 'Template-based deployment is ignored.')
+
+ if not agents:
+ print("No VM Deployed - Deploy")
+ raise Exception('No agents deployed.')
+
+ if deployment.get('agents'):
+ # agents are specified statically
+ agents.update(dict((a['id'], a) for a in deployment.get('agents')))
+
+ return agents
+
+def read_scenario(scenario_name):
+ """
+ Collect all Information about the scenario
+ """
+ scenario_file_name = scenario_name
+ LOG.debug('Scenario %s is resolved to %s', scenario_name,
+ scenario_file_name)
+
+ scenario = utils.read_yaml_file(scenario_file_name)
+
+ schema = utils.read_yaml_file(S.getValue('SCHEMA'))
+ utils.validate_yaml(scenario, schema)
+
+ scenario['title'] = scenario.get('title') or scenario_file_name
+ scenario['file_name'] = scenario_file_name
+
+ return scenario
+
+def _extend_agents(agents_map):
+ """
+ Add More info to deployed Instances
+ """
+ extended_agents = {}
+ for agent in agents_map.values():
+ extended = copy.deepcopy(agent)
+ if agent.get('slave_id'):
+ extended['slave'] = copy.deepcopy(agents_map[agent['slave_id']])
+ if agent.get('master_id'):
+ extended['master'] = copy.deepcopy(agents_map[agent['master_id']])
+ extended_agents[agent['id']] = extended
+ return extended_agents
+
+def play_scenario(scenario):
+ """
+ Deploy a scenario
+ """
+ deployment = None
+ output = dict(scenarios={}, agents={})
+ output['scenarios'][scenario['title']] = scenario
+
+ try:
+ deployment = Deployment()
+
+ openstack_params = utils.pack_openstack_params()
+ try:
+ deployment.connect_to_openstack(
+ openstack_params, S.getValue('FLAVOR_NAME'),
+ S.getValue('IMAGE_NAME'), S.getValue('EXTERNAL_NET'),
+ S.getValue('DNS_NAMESERVERS'))
+ except BaseException as excep:
+ LOG.warning('Failed to connect to OpenStack: %s. Please '
+ 'verify parameters: %s', excep, openstack_params)
+
+ base_dir = os.path.dirname(scenario['file_name'])
+ scenario_deployment = scenario.get('deployment', {})
+ agents = deployment.deploy(scenario_deployment, base_dir=base_dir)
+
+ if not agents:
+ print("No VM Deployed - Play-Scenario")
+ raise Exception('No agents deployed.')
+
+ agents = _extend_agents(agents)
+ output['agents'] = agents
+ LOG.debug('Deployed agents: %s', agents)
+ print(agents)
+
+ if not agents:
+ raise Exception('No agents deployed.')
+
+ except BaseException as excep:
+ if isinstance(excep, KeyboardInterrupt):
+ LOG.info('Caught SIGINT. Terminating')
+ # record = dict(id=utils.make_record_id(), status='interrupted')
+ else:
+ error_msg = 'Error while executing scenario: %s' % excep
+ LOG.exception(error_msg)
+ return output
+
+def act():
+ """
+ Kickstart the Scenario Deployment
+ """
+ for scenario_name in S.getValue('SCENARIOS'):
+ LOG.info('Play scenario: %s', scenario_name)
+ print('Play scenario: {}'.format(scenario_name))
+ scenario = read_scenario(scenario_name)
+ play_output = play_scenario(scenario)
+ print(play_output)
+ return play_output
+ return None
+
+def update_vsperf_configuration(agents):
+ """
+ Create Configuration file for VSPERF.
+ """
+ tgen = S.getValue('TRAFFICGEN')
+ east_chassis_ip = agents[0]['public_ip']
+ # east_data_ip = agents[0]['private_ip']
+ if len(agents) == 2:
+ west_chassis_ip = agents[1]['public_ip']
+ # west_data_ip = agents[1]['private_ip']
+ else:
+ west_chassis_ip = east_chassis_ip
+ # west_data_ip = east_chassis_ip
+ if "TestCenter" in tgen:
+ S.setValue('TRAFFICGEN_STC_EAST_CHASSIS_ADDR', east_chassis_ip)
+ S.setValue('TRAFFICGEN_STC_WEST_CHASSIS_ADDR', west_chassis_ip)
+ if "Ix" in tgen:
+ S.setValue("TRAFFICGEN_EAST_IXIA_HOST", east_chassis_ip)
+ S.setValue("TRAFFICGEN_WEST_IXIA_HOST", west_chassis_ip)
+
+def deploy_testvnf():
+ """
+ Starting function.
+ """
+ output = act()
+ list_of_agents = []
+ if output:
+ for count in range(len(output['agents'])):
+ # ag_dict = collections.defaultdict()
+ name = str(list(output['agents'].keys())[count])
+ private_ip = output['agents'][name]['ip']
+ public_ip = output['agents'][name]['pip']
+ node = output['agents'][name]['node']
+ list_of_agents.append({'name': name,
+ 'private_ip': private_ip,
+ 'public_ip': public_ip,
+ 'compute_node': node})
+ if list_of_agents:
+ update_vsperf_configuration(list_of_agents)
+ return True
+ return False
+
+if __name__ == "__main__":
+ deploy_testvnf()
diff --git a/tools/os_deploy_tgen/templates/hotfiles.md b/tools/os_deploy_tgen/templates/hotfiles.md
new file mode 100644
index 00000000..6e21157e
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/hotfiles.md
@@ -0,0 +1,13 @@
+# How to use these HOT Files.
+
+These hot files are referenced in the yaml files.
+Please ensure you are using correct HOT file.
+
+## L2 - No Routers are setup - Same Subnet.
+
+l2fip.hot - Floating IP is configured. Use this if the Openstack environment supports floating IP.
+l2up - Use this if you want username and password configured for the TestVNFs.
+l2.hot - Use this if the 2 interfaces has fixed IPs from 2 different networks. This applies when TestVNF has connectivity to provider network.
+
+## L3 - Routers are setup - Different Subnets
+l3.hot - Setup TestVNFs on two different subnet and connect them with a router.
diff --git a/tools/os_deploy_tgen/templates/l2.hot b/tools/os_deploy_tgen/templates/l2.hot
new file mode 100644
index 00000000..226e8433
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2.hot
@@ -0,0 +1,89 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_mgmt_port }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+
+ {{ agent.id }}_mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: external_net }
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_port, fixed_ips, 0, ip_address ] }
+# value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_mgmt_port, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_port, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2_1c_1i.yaml b/tools/os_deploy_tgen/templates/l2_1c_1i.yaml
new file mode 100644
index 00000000..ec931107
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_1c_1i.yaml
@@ -0,0 +1,8 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches single instances on a tenant network.
+
+deployment:
+ template: l2.hot
+ accommodation: [single_room, compute_nodes: 1]
diff --git a/tools/os_deploy_tgen/templates/l2_1c_2i.yaml b/tools/os_deploy_tgen/templates/l2_1c_2i.yaml
new file mode 100644
index 00000000..4241a80c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_1c_2i.yaml
@@ -0,0 +1,10 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances in the same tenant
+ network. Both the instances are hosted on same compute node.
+ The traffic goes within the tenant network (L2 domain).
+
+deployment:
+ template: l2up.hot
+ accommodation: [pair, single_room, best_effort, compute_nodes: 1]
diff --git a/tools/os_deploy_tgen/templates/l2_2c_2i.yaml b/tools/os_deploy_tgen/templates/l2_2c_2i.yaml
new file mode 100644
index 00000000..b1f54f0a
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_2c_2i.yaml
@@ -0,0 +1,10 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances in the same tenant
+ network. Each instance is hosted on a separate compute node. The traffic goes
+ within the tenant network (L2 domain).
+
+deployment:
+ template: l2fip.hot
+ accommodation: [pair, single_room, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/l2_old.hot b/tools/os_deploy_tgen/templates/l2_old.hot
new file mode 100644
index 00000000..d2553d76
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_old.hot
@@ -0,0 +1,93 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 10.0.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2fip.hot b/tools/os_deploy_tgen/templates/l2fip.hot
new file mode 100644
index 00000000..4d4b52f7
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2fip.hot
@@ -0,0 +1,122 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ user_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ spirent:
+ driver: "sockets"
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+ port_security_enabled: false
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ private_datanet:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_datanet
+ port_security_enabled: false
+
+ private_datasubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_datanet }
+ cidr: 172.172.168.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_dataport }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+
+ {{ agent.id }}_dataport:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_datanet }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet_id: { get_resource: private_datasubnet }
+
+ {{ agent.id }}_fip_port:
+ type: OS::Neutron::FloatingIP
+ depends_on:
+ - router_interface
+ properties:
+ floating_network: { get_param: external_net }
+ port_id: { get_resource: {{ agent.id }}_port }
+
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_dataport, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_fip_port, floating_ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_dataport, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2up.hot b/tools/os_deploy_tgen/templates/l2up.hot
new file mode 100644
index 00000000..58f25831
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2up.hot
@@ -0,0 +1,126 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ user_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ users:
+ - default
+ - name: test
+ groups: "users,root"
+ lock-passwd: false
+ passwd: 'test'
+ shell: "/bin/bash"
+ sudo: "ALL=(ALL) NOPASSWD:ALL"
+ ssh_pwauth: true
+ chpasswd:
+ list: |
+ test:test
+ expire: False
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_mgmt_port }
+ user_data: {get_resource: user_config}
+ user_data_format: RAW
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ {{ agent.id }}_mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: external_net }
+ security_groups: [{ get_resource: server_security_group }]
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_port, fixed_ips, 0, ip_address ] }
+# value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_mgmt_port, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_port, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l3.hot b/tools/os_deploy_tgen/templates/l3.hot
new file mode 100644
index 00000000..4a5ea02c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3.hot
@@ -0,0 +1,125 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a pair of networks plugged into the same router.
+ Master instances and slave instances are connected into different networks.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network for which floating IP addresses will be allocated
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnets
+
+resources:
+ east_private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net_east
+
+ east_private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: east_private_net }
+ cidr: 10.1.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: east_private_subnet }
+
+ west_private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net_west
+
+ west_private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: west_private_net }
+ cidr: 10.2.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router_interface_2:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: west_private_subnet }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+
+{% if agent.mode == 'master' %}
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: east_private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: east_private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+{% else %}
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: west_private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: west_private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+{% endif %}
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+{% if agent.mode == 'master' %}
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [east_private_net, name] }, 0 ] }
+{% else %}
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [west_private_net, name] }, 0 ] }
+{% endif %}
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l3_1c_2i.yaml b/tools/os_deploy_tgen/templates/l3_1c_2i.yaml
new file mode 100644
index 00000000..0908843c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3_1c_2i.yaml
@@ -0,0 +1,11 @@
+title: OpenStack L3 East-West Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances, both instances on same
+ compute node. Instances are connected to one of 2 tenant networks, which
+ plugged into single router. The traffic goes from one network to the other
+ (L3 east-west).
+
+deployment:
+ template: l3.hot
+ accommodation: [pair, single_room, best_effort, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/l3_2c_2i.yaml b/tools/os_deploy_tgen/templates/l3_2c_2i.yaml
new file mode 100644
index 00000000..67aee170
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3_2c_2i.yaml
@@ -0,0 +1,11 @@
+title: OpenStack L3 East-West Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances, each instance on its own
+ compute node. Instances are connected to one of 2 tenant networks, which
+ plugged into single router. The traffic goes from one network to the other
+ (L3 east-west).
+
+deployment:
+ template: l3.hot
+ accommodation: [pair, single_room, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/scenario.yaml b/tools/os_deploy_tgen/templates/scenario.yaml
new file mode 100644
index 00000000..c66ec734
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/scenario.yaml
@@ -0,0 +1,44 @@
+name: tdep scenario schema
+type: map
+allowempty: True
+mapping:
+ title:
+ type: str
+ description:
+ type: str
+ deployment:
+ type: map
+ mapping:
+ support_templates:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ name:
+ type: str
+ template:
+ type: str
+ env_file:
+ type: str
+ template:
+ type: str
+ env_file:
+ type: str
+ agents:
+ type: any
+ accommodation:
+ type: seq
+ matching: any
+ sequence:
+ - type: str
+ enum: [pair, alone, double_room, single_room, mixed_room, cross_az, best_effort]
+ - type: map
+ mapping:
+ density:
+ type: number
+ compute_nodes:
+ type: number
+ zones:
+ type: seq
+ sequence:
+ - type: str
diff --git a/tools/os_deploy_tgen/utilities/__init__.py b/tools/os_deploy_tgen/utilities/__init__.py
new file mode 100644
index 00000000..56f22a9e
--- /dev/null
+++ b/tools/os_deploy_tgen/utilities/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities package
+"""
diff --git a/tools/os_deploy_tgen/utilities/utils.py b/tools/os_deploy_tgen/utilities/utils.py
new file mode 100644
index 00000000..5208fd2a
--- /dev/null
+++ b/tools/os_deploy_tgen/utilities/utils.py
@@ -0,0 +1,183 @@
+# Copyright 2020 Spirent Communications, Mirantis
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for deploying Trafficgenerator on Openstack.
+This Code is based on Openstack Shaker.
+"""
+
+#import errno
+#import functools
+import logging
+import os
+import random
+import re
+import uuid
+#import collections
+import yaml
+from pykwalify import core as pykwalify_core
+from pykwalify import errors as pykwalify_errors
+
+from conf import settings as S
+
+LOG = logging.getLogger(__name__)
+
+def read_file(file_name, base_dir=''):
+ """
+ Read Files
+ """
+ full_path = os.path.normpath(os.path.join(base_dir, file_name))
+
+ if not os.path.exists(full_path):
+ full_path = os.path.normpath(os.path.join('tools',
+ 'os_deploy_tgen',
+ file_name))
+ if not os.path.exists(full_path):
+ full_path = os.path.normpath(os.path.join('tools',
+ 'os_deploy_tgen',
+ 'templates',
+ file_name))
+ if not os.path.exists(full_path):
+ msg = ('File %s not found by absolute nor by relative path' %
+ file_name)
+ LOG.error(msg)
+ raise IOError(msg)
+
+ fid = None
+ try:
+ fid = open(full_path)
+ return fid.read()
+ except IOError as exc:
+ LOG.error('Error reading file: %s', exc)
+ raise
+ finally:
+ if fid:
+ fid.close()
+
+
+def write_file(data, file_name, base_dir=''):
+ """
+ Write to file
+ """
+ full_path = os.path.normpath(os.path.join(base_dir, file_name))
+ fid = None
+ try:
+ fid = open(full_path, 'w')
+ return fid.write(data)
+ except IOError as err:
+ LOG.error('Error writing file: %s', err)
+ raise
+ finally:
+ if fid:
+ fid.close()
+
+
+def read_yaml_file(file_name):
+ """
+ Read Yaml File
+ """
+ raw = read_file(file_name)
+ return read_yaml(raw)
+
+
+def read_yaml(raw):
+ """
+ Read YAML
+ """
+ try:
+ parsed = yaml.safe_load(raw)
+ return parsed
+ except Exception as error:
+ LOG.error('Failed to parse input %(yaml)s in YAML format: %(err)s',
+ dict(yaml=raw, err=error))
+ raise
+
+
+def split_address(address):
+ """
+ Split addresses
+ """
+ try:
+ host, port = address.split(':')
+ except ValueError:
+ LOG.error('Invalid address: %s, "host:port" expected', address)
+ raise
+ return host, port
+
+
+def random_string(length=6):
+ """
+ Generate Random String
+ """
+ return ''.join(random.sample('adefikmoprstuz', length))
+
+
+def make_record_id():
+ """
+ Create record-ID
+ """
+ return str(uuid.uuid4())
+
+def strict(strc):
+ """
+ Strict Check
+ """
+ return re.sub(r'[^\w\d]+', '_', re.sub(r'\(.+\)', '', strc)).lower()
+
+
+def validate_yaml(data, schema):
+ """
+ Validate Yaml
+ """
+ cor = pykwalify_core.Core(source_data=data, schema_data=schema)
+ try:
+ cor.validate(raise_exception=True)
+ except pykwalify_errors.SchemaError as err:
+ raise Exception('File does not conform to schema') from err
+
+
+def pack_openstack_params():
+ """
+ Packe Openstack Parameters
+ """
+ if not S.hasValue('OS_AUTH_URL'):
+ raise Exception(
+ 'OpenStack authentication endpoint is missing')
+
+ params = dict(auth=dict(username=S.getValue('OS_USERNAME'),
+ password=S.getValue('OS_PASSWORD'),
+ auth_url=S.getValue('OS_AUTH_URL')),
+ os_region_name=S.getValue('OS_REGION_NAME'),
+ os_cacert=S.getValue('OS_CA_CERT'),
+ os_insecure=S.getValue('OS_INSECURE'))
+
+ if S.hasValue('OS_PROJECT_NAME'):
+ value = S.getValue('OS_PROJECT_NAME')
+ params['auth']['project_name'] = value
+ if S.hasValue('OS_PROJECT_DOMAIN_NAME'):
+ value = S.getValue('OS_PROJECT_DOMAIN_NAME')
+ params['auth']['project_domain_name'] = value
+ if S.hasValue('OS_USER_DOMAIN_NAME'):
+ value = S.getValue('OS_USER_DOMAIN_NAME')
+ params['auth']['user_domain_name'] = value
+ if S.hasValue('OS_INTERFACE'):
+ value = S.getValue('OS_INTERFACE')
+ params['os_interface'] = value
+ if S.hasValue('OS_API_VERSION'):
+ value = S.getValue('OS_API_VERSION')
+ params['identity_api_version'] = value
+ if S.hasValue('OS_PROFILE'):
+ value = S.getValue('OS_PROFILE')
+ params['os_profile'] = value
+ return params
diff --git a/tools/pkt_gen/dummy/dummy.py b/tools/pkt_gen/dummy/dummy.py
index 3dc5448e..ef4b37d9 100755
--- a/tools/pkt_gen/dummy/dummy.py
+++ b/tools/pkt_gen/dummy/dummy.py
@@ -25,6 +25,7 @@ own.
import json
+from collections import OrderedDict
from conf import settings
from conf import merge_spec
from tools.pkt_gen import trafficgen
@@ -108,41 +109,41 @@ class Dummy(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Send a burst of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
results = get_user_traffic(
'burst',
- '%dpkts, %dmS' % (numpkts, duration),
+ '%dpkts, %dmS' % (traffic['burst_size'], duration),
traffic_,
('frames rx', 'payload errors', 'sequence errors'))
# builds results by using user-supplied values where possible
# and guessing remainder using available info
- result[ResultsConstants.TX_FRAMES] = numpkts
+ result[ResultsConstants.TX_FRAMES] = traffic['burst_size']
result[ResultsConstants.RX_FRAMES] = results[0]
result[ResultsConstants.TX_BYTES] = traffic_['l2']['framesize'] \
- * numpkts
+ * traffic['burst_size']
result[ResultsConstants.RX_BYTES] = traffic_['l2']['framesize'] \
* results[0]
result[ResultsConstants.PAYLOAD_ERR] = results[1]
result[ResultsConstants.SEQ_ERR] = results[2]
- return results
+ return result
def send_cont_traffic(self, traffic=None, duration=30):
"""
Send a continuous flow of traffic.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -179,7 +180,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 throughput test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -216,7 +217,7 @@ class Dummy(trafficgen.ITrafficGenerator):
Send traffic per RFC2544 back2back test specifications.
"""
traffic_ = self.traffic_defaults.copy()
- result = {}
+ result = OrderedDict()
if traffic:
traffic_ = merge_spec(traffic_, traffic)
@@ -273,4 +274,5 @@ if __name__ == '__main__':
print(dev.send_cont_traffic(traffic=TRAFFIC))
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
print(dev.send_rfc2544_back2back(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/ixia/ixia.py b/tools/pkt_gen/ixia/ixia.py
index e768be06..31f51246 100755
--- a/tools/pkt_gen/ixia/ixia.py
+++ b/tools/pkt_gen/ixia/ixia.py
@@ -111,6 +111,11 @@ def _build_set_cmds(values, prefix='dict set'):
yield subkey
continue
+ if isinstance(value, list):
+ value = '{{{}}}'.format(' '.join(str(x) for x in value))
+ yield ' '.join([prefix, 'set', key, value]).strip()
+ continue
+
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
if isinstance(value, bool):
@@ -118,6 +123,9 @@ def _build_set_cmds(values, prefix='dict set'):
else:
value = str(value)
+ if isinstance(value, str) and not value:
+ value = '{}'
+
if prefix:
yield ' '.join([prefix, key, value]).strip()
else:
@@ -149,8 +157,8 @@ class Ixia(trafficgen.ITrafficGenerator):
return NotImplementedError(
'Ixia start back2back traffic not implemented')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
return NotImplementedError(
'Ixia send back2back traffic not implemented')
@@ -234,11 +242,11 @@ class Ixia(trafficgen.ITrafficGenerator):
return result
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""See ITrafficGenerator for description
"""
flow = {
- 'numpkts': numpkts,
+ 'numpkts': traffic['burst_size'],
'duration': duration,
'type': 'stopStream',
'framerate': traffic['frame_rate'],
@@ -246,9 +254,9 @@ class Ixia(trafficgen.ITrafficGenerator):
result = self._send_traffic(flow, traffic)
- assert len(result) == 6 # fail-fast if underlying Tcl code changes
+ assert len(result) == 10 # fail-fast if underlying Tcl code changes
- #NOTE - implement Burst results setting via TrafficgenResults.
+ return Ixia._create_result(result)
def send_cont_traffic(self, traffic=None, duration=30):
"""See ITrafficGenerator for description
@@ -309,20 +317,25 @@ class Ixia(trafficgen.ITrafficGenerator):
:returns: dictionary strings representing results from
traffic generator.
"""
- assert len(result) == 8 # fail-fast if underlying Tcl code changes
+ assert len(result) == 8 or len(result) == 10 # fail-fast if underlying Tcl code changes
+
+ # content of result common for all tests
+ # [framesSent, framesRecv, bytesSent, bytesRecv, sendRate, recvRate, sendRateBytes, recvRateBytes]
+ # burst test has additional two values at the end: payError, seqError
if float(result[0]) == 0:
loss_rate = 100
else:
- loss_rate = (float(result[0]) - float(result[1])) / float(result[0]) * 100
+ loss_rate = round((float(result[0]) - float(result[1])) / float(result[0]) * 100, 5)
result_dict = OrderedDict()
- # drop the first 4 elements as we don't use/need them. In
- # addition, IxExplorer does not support latency or % line rate
+ # IxExplorer does not support latency or % line rate
# metrics so we have to return dummy values for these metrics
- result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[4]
- result_dict[ResultsConstants.TX_RATE_FPS] = result[5]
- result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[6]) / 1000000, 3))
- result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[7]) / 1000000, 3))
+ result_dict[ResultsConstants.TX_FRAMES] = result[0]
+ result_dict[ResultsConstants.RX_FRAMES] = result[1]
+ result_dict[ResultsConstants.TX_RATE_FPS] = result[4]
+ result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = result[5]
+ result_dict[ResultsConstants.TX_RATE_MBPS] = str(round(int(result[6]) * 8 / 1e6, 3))
+ result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = str(round(int(result[7]) * 8 / 1e6, 3))
result_dict[ResultsConstants.FRAME_LOSS_PERCENT] = loss_rate
result_dict[ResultsConstants.TX_RATE_PERCENT] = \
ResultsConstants.UNKNOWN_VALUE
diff --git a/tools/pkt_gen/ixnet/ixnet.py b/tools/pkt_gen/ixnet/ixnet.py
index b8fb1879..c7036606 100755
--- a/tools/pkt_gen/ixnet/ixnet.py
+++ b/tools/pkt_gen/ixnet/ixnet.py
@@ -83,6 +83,7 @@ import logging
import os
import re
import csv
+import random
from collections import OrderedDict
from tools.pkt_gen import trafficgen
@@ -127,6 +128,11 @@ def _build_set_cmds(values, prefix='dict set'):
yield subkey
continue
+ if isinstance(value, list):
+ value = '{{{}}}'.format(' '.join(str(x) for x in value))
+ yield ' '.join([prefix, key, value]).strip()
+ continue
+
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
if isinstance(value, bool):
@@ -134,6 +140,9 @@ def _build_set_cmds(values, prefix='dict set'):
else:
value = str(value)
+ if isinstance(value, str) and not value:
+ value = '{}'
+
if prefix:
yield ' '.join([prefix, key, value]).strip()
else:
@@ -168,10 +177,9 @@ class IxNet(trafficgen.ITrafficGenerator):
:returns: Output of command, where applicable.
"""
self._logger.debug('%s%s', trafficgen.CMD_PREFIX, cmd)
-
output = self._tclsh.eval(cmd)
- return output.split()
+ return output
def configure(self):
"""Configure system for IxNetwork.
@@ -185,12 +193,16 @@ class IxNet(trafficgen.ITrafficGenerator):
'port': settings.getValue('TRAFFICGEN_IXNET_PORT'),
'user': settings.getValue('TRAFFICGEN_IXNET_USER'),
# IXIA chassis configuration
- 'chassis': settings.getValue('TRAFFICGEN_IXIA_HOST'),
- 'card': settings.getValue('TRAFFICGEN_IXIA_CARD'),
- 'port1': settings.getValue('TRAFFICGEN_IXIA_PORT1'),
- 'port2': settings.getValue('TRAFFICGEN_IXIA_PORT2'),
+ 'chassis_east': settings.getValue('TRAFFICGEN_EAST_IXIA_HOST'),
+ 'card_east': settings.getValue('TRAFFICGEN_EAST_IXIA_CARD'),
+ 'port_east': settings.getValue('TRAFFICGEN_EAST_IXIA_PORT'),
+ 'chassis_west': settings.getValue('TRAFFICGEN_WEST_IXIA_HOST'),
+ 'card_west': settings.getValue('TRAFFICGEN_WEST_IXIA_CARD'),
+ 'port_west': settings.getValue('TRAFFICGEN_WEST_IXIA_PORT'),
'output_dir':
settings.getValue('TRAFFICGEN_IXNET_TESTER_RESULT_DIR'),
+ 'frame_size_list':
+ settings.getValue('TRAFFICGEN_PKT_SIZES'),
}
self._logger.debug('IXIA configuration configuration : %s', self._cfg)
@@ -248,11 +260,12 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_path = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start continuous traffic test')
raise RuntimeError('Continuous traffic test failed to start.')
+ return results_path
def stop_cont_traffic(self):
"""See ITrafficGenerator for description
@@ -263,9 +276,12 @@ class IxNet(trafficgen.ITrafficGenerator):
lossrate=0.0):
"""See ITrafficGenerator for description
"""
- self.start_rfc2544_throughput(traffic, tests, duration, lossrate)
-
- return self.wait_rfc2544_throughput()
+ results_file = self.start_rfc2544_throughput(traffic, tests, duration, lossrate)
+ run_result = self.wait_rfc2544_throughput()
+ dest_file_name = 'Traffic_Item_Statistics_' + str(random.randrange(1, 100)) + '.csv'
+ self.copy_results_file(results_file,
+ os.path.join(settings.getValue('RESULTS_PATH'), dest_file_name))
+ return run_result
def start_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
lossrate=0.0):
@@ -305,12 +321,14 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_file = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start RFC2544 test')
raise RuntimeError('RFC2544 test failed to start.')
+ return results_file
+
def wait_rfc2544_throughput(self):
"""See ITrafficGenerator for description
"""
@@ -362,7 +380,7 @@ class IxNet(trafficgen.ITrafficGenerator):
next(reader)
for row in reader:
#Replace null entries added by Ixia with 0s.
- row = [entry if len(entry) > 0 else '0' for entry in row]
+ row = [entry if entry else '0' for entry in row]
# tx_fps and tx_mps cannot be reliably calculated
# as the DUT may be modifying the frame size
@@ -389,12 +407,34 @@ class IxNet(trafficgen.ITrafficGenerator):
return results
output = self.run_tcl('waitForRfc2544Test')
-
# the run_tcl function will return a list with one element. We extract
# that one element (a string representation of an IXIA-specific Tcl
# datatype), parse it to find the path of the results file then parse
# the results file
- return parse_ixnet_rfc_results(parse_result_string(output[0]))
+ test_result = parse_ixnet_rfc_results(parse_result_string(output))
+ return test_result
+
+ def copy_results_file(self, source_file=None, dest_file=None):
+ """Copy a file from a source address to destination
+ """
+ dest_dict = {}
+ source_dict = {}
+ srcfile = ''
+ if isinstance(source_file, list):
+ for i in source_file:
+ srcfile = srcfile + ' ' + i
+ else:
+ srcfile = source_file
+
+ source = (srcfile.replace("\\", "/")).strip()
+ source_dict['source_file'] = {'source_file': '\"{}\"'.format(source)}
+ dest_dict['dest_file'] = {'dest_file': '{}'.format(dest_file)}
+ for cmd in _build_set_cmds(source_dict):
+ self.run_tcl(cmd)
+ for cmd in _build_set_cmds(dest_dict):
+ self.run_tcl(cmd)
+ self.run_tcl('copyFileResults $source_file $dest_file')
+ return dest_dict['dest_file']
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=2,
lossrate=0.0):
@@ -403,9 +443,12 @@ class IxNet(trafficgen.ITrafficGenerator):
# NOTE 2 seconds is the recommended duration for a back 2 back
# test in RFC2544. 50 trials is the recommended number from the
# RFC also.
- self.start_rfc2544_back2back(traffic, tests, duration, lossrate)
-
- return self.wait_rfc2544_back2back()
+ b2b_results_file = self.start_rfc2544_back2back(traffic, tests, duration, lossrate)
+ b2b_run_result = self.wait_rfc2544_back2back()
+ dest_file_name = 'Traffic_Item_Statistics_' + str(random.randrange(1, 100)) + '.csv'
+ self.copy_results_file(b2b_results_file,
+ os.path.join(settings.getValue('RESULTS_PATH'), dest_file_name))
+ return b2b_run_result
def start_rfc2544_back2back(self, traffic=None, tests=1, duration=2,
lossrate=0.0):
@@ -445,15 +488,18 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_file = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start RFC2544 test')
raise RuntimeError('RFC2544 test failed to start.')
+ return results_file
+
def wait_rfc2544_back2back(self):
"""Wait for results.
"""
+
def parse_result_string(results):
"""Get path to results file from output
@@ -479,7 +525,7 @@ class IxNet(trafficgen.ITrafficGenerator):
# transform path into something useful
path = result_path.group(1).replace('\\', '/')
- path = os.path.join(path, 'iteration.csv')
+ path = os.path.join(path, 'AggregateResults.csv')
path = path.replace(
settings.getValue('TRAFFICGEN_IXNET_TESTER_RESULT_DIR'),
settings.getValue('TRAFFICGEN_IXNET_DUT_RESULT_DIR'))
@@ -503,11 +549,11 @@ class IxNet(trafficgen.ITrafficGenerator):
for row in reader:
# if back2back count higher than previously found, store it
# Note: row[N] here refers to the Nth column of a row
- if float(row[14]) <= self._params['config']['lossrate']:
- if int(row[12]) > \
+ if float(row[10]) <= self._params['config']['lossrate']:
+ if int(float(row[8])) > \
int(results[ResultsConstants.B2B_FRAMES]):
- results[ResultsConstants.B2B_FRAMES] = int(row[12])
- results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = float(row[14])
+ results[ResultsConstants.B2B_FRAMES] = int(float(row[8]))
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = float(row[10])
return results
@@ -518,9 +564,9 @@ class IxNet(trafficgen.ITrafficGenerator):
# datatype), parse it to find the path of the results file then parse
# the results file
- return parse_ixnet_rfc_results(parse_result_string(output[0]))
+ return parse_ixnet_rfc_results(parse_result_string(output))
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
return NotImplementedError('IxNet does not implement send_burst_traffic')
if __name__ == '__main__':
diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py
index 570720e8..b7d55c4d 100644
--- a/tools/pkt_gen/moongen/moongen.py
+++ b/tools/pkt_gen/moongen/moongen.py
@@ -64,46 +64,46 @@ class Moongen(ITrafficGenerator):
:param one_shot: No RFC 2544 binary search,
just packet flow at traffic specifics
"""
- logging.debug("traffic['frame_rate'] = " + \
+ logging.debug("traffic['frame_rate'] = %s", \
str(traffic['frame_rate']))
- logging.debug("traffic['multistream'] = " + \
+ logging.debug("traffic['multistream'] = %s", \
str(traffic['multistream']))
- logging.debug("traffic['stream_type'] = " + \
+ logging.debug("traffic['stream_type'] = %s", \
str(traffic['stream_type']))
- logging.debug("traffic['l2']['srcmac'] = " + \
+ logging.debug("traffic['l2']['srcmac'] = %s", \
str(traffic['l2']['srcmac']))
- logging.debug("traffic['l2']['dstmac'] = " + \
+ logging.debug("traffic['l2']['dstmac'] = %s", \
str(traffic['l2']['dstmac']))
- logging.debug("traffic['l3']['proto'] = " + \
+ logging.debug("traffic['l3']['proto'] = %s", \
str(traffic['l3']['proto']))
- logging.debug("traffic['l3']['srcip'] = " + \
+ logging.debug("traffic['l3']['srcip'] = %s", \
str(traffic['l3']['srcip']))
- logging.debug("traffic['l3']['dstip'] = " + \
+ logging.debug("traffic['l3']['dstip'] = %s", \
str(traffic['l3']['dstip']))
- logging.debug("traffic['l4']['srcport'] = " + \
+ logging.debug("traffic['l4']['srcport'] = %s", \
str(traffic['l4']['srcport']))
- logging.debug("traffic['l4']['dstport'] = " + \
+ logging.debug("traffic['l4']['dstport'] = %s", \
str(traffic['l4']['dstport']))
- logging.debug("traffic['vlan']['enabled'] = " + \
+ logging.debug("traffic['vlan']['enabled'] = %s", \
str(traffic['vlan']['enabled']))
- logging.debug("traffic['vlan']['id'] = " + \
+ logging.debug("traffic['vlan']['id'] = %s", \
str(traffic['vlan']['id']))
- logging.debug("traffic['vlan']['priority'] = " + \
+ logging.debug("traffic['vlan']['priority'] = %s", \
str(traffic['vlan']['priority']))
- logging.debug("traffic['vlan']['cfi'] = " + \
+ logging.debug("traffic['vlan']['cfi'] = %s", \
str(traffic['vlan']['cfi']))
logging.debug(traffic['l2']['framesize'])
@@ -160,9 +160,9 @@ class Moongen(ITrafficGenerator):
(traffic['frame_rate'] / 100) * (self._moongen_line_speed / \
(8 * (traffic['l2']['framesize'] + 20)) / math.pow(10, 6)))
- logging.debug("startRate = " + start_rate)
+ logging.debug("startRate = %s", start_rate)
- out_file.write("startRate = " + \
+ out_file.write("startRate = %s" % \
start_rate + "\n")
out_file.write("}" + "\n")
@@ -240,14 +240,13 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info("MOONGEN: In moongen disconnect method")
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_traffic']`` packets of traffic, using ``traffic``
configuration, with a timeout of ``time``.
:param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
@@ -508,8 +507,8 @@ class Moongen(ITrafficGenerator):
return moongen_results
- def send_rfc2544_throughput(self, traffic=None, duration=20,
- lossrate=0.0, tests=1):
+ def send_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
#
# Send traffic per RFC2544 throughput test specifications.
#
@@ -631,8 +630,8 @@ class Moongen(ITrafficGenerator):
"""
self._logger.info('In moongen wait_rfc2544_throughput')
- def send_rfc2544_back2back(self, traffic=None, duration=60,
- lossrate=0.0, tests=1):
+ def send_rfc2544_back2back(self, traffic=None, tests=1, duration=60,
+ lossrate=0.0):
"""Send traffic per RFC2544 back2back test specifications.
Send packets at a fixed rate, using ``traffic``
diff --git a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
index 6c30b130..8089ef42 100644
--- a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
+++ b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
@@ -22,12 +22,27 @@ TestCenter REST APIs. This test supports Python 3.4
'''
import argparse
+import collections
import logging
import os
-
+import sqlite3
+import time
_LOGGER = logging.getLogger(__name__)
+GENOME_PKTSIZE_ENCODING = {"a": 64, "b": 128, "c": 256, "d": 512,
+ "e": 1024, "f": 1280, "g": 1518, "h": 2112}
+
+
+def genome2weights(sequence):
+ """ Convert genome sequence to packetsize weights"""
+ weights = collections.defaultdict(int)
+ for char in GENOME_PKTSIZE_ENCODING:
+ charcount = sequence.count(char)
+ if charcount:
+ weights[GENOME_PKTSIZE_ENCODING[char]] = charcount
+ return weights
+
def create_dir(path):
"""Create the directory as specified in path """
@@ -39,6 +54,17 @@ def create_dir(path):
raise
+def write_histogram_to_csv(results_path, csv_results_file_prefix,
+ counts, ranges):
+ """ Write the results of the query to the CSV """
+ filec = os.path.join(results_path, csv_results_file_prefix + ".csv")
+ with open(filec, "wb") as result_file:
+ for key in counts:
+ result_file.write(str(key) + "\n")
+ result_file.write(str(ranges) + "\n")
+ result_file.write(str(counts[key]) + "\n")
+
+
def write_query_results_to_csv(results_path, csv_results_file_prefix,
query_results):
""" Write the results of the query to the CSV """
@@ -51,6 +77,46 @@ def write_query_results_to_csv(results_path, csv_results_file_prefix,
result_file.write(row.replace(" ", ",") + "\n")
+def write_headers(results_path, file_name, rx_tx):
+ """ Write headers for the live-results files """
+ filec = os.path.join(results_path, file_name + rx_tx)
+ with open(filec, "a") as result_file:
+ if 'rx' in rx_tx:
+ result_file.write('Time,RxPrt,DrpFrCnt,SeqRnLen,AvgLat,' +
+ 'DrpFrRate,FrCnt,FrRate,MaxLat,MinLat,' +
+ 'OctCnt,OctRate\n')
+ else:
+ result_file.write('Time,StrId,BlkId,FrCnt,FrRate,ERxFrCnt,' +
+ 'OctCnt,OctRate,bitCnt,bitRate\n')
+
+
+def write_rx_live_results_to_file(results_path, file_name, results):
+ """ Write live results from the rx-ports"""
+ filec = os.path.join(results_path, file_name + ".rx")
+ with open(filec, "a") as result_file:
+ result_file.write('{0},{3},{1},{2},{4},{5},{6},{7},{8},{9},{10},{11}\n'
+ .format(time.time(), results['DroppedFrameCount'],
+ results['SeqRunLength'], results['RxPort'],
+ results['AvgLatency'],
+ results['DroppedFrameRate'],
+ results['FrameCount'], results['FrameRate'],
+ results['MaxLatency'], results['MinLatency'],
+ results['OctetCount'], results['OctetRate']))
+
+
+def write_tx_live_results_to_file(results_path, file_name, results):
+ """ Write live results from the tx-ports"""
+ filec = os.path.join(results_path, file_name + ".tx")
+ with open(filec, "a") as result_file:
+ result_file.write('{0},{1},{9},{2},{3},{4},{5},{6},{7},{8}\n'
+ .format(time.time(), results['StreamId'],
+ results['FrameCount'], results['FrameRate'],
+ results['ExpectedRxFrameCount'],
+ results['OctetCount'], results['OctetRate'],
+ results['BitCount'], results['BitRate'],
+ results['BlockId']))
+
+
def positive_int(value):
""" Positive Integer type for Arguments """
ivalue = int(value)
@@ -68,7 +134,8 @@ def percent_float(value):
"%s not in range [0.0, 100.0]" % pvalue)
return pvalue
-# pylint: disable=too-many-branches, too-many-statements
+
+# pylint: disable=too-many-branches, too-many-statements, too-many-locals
def main():
""" Read the arguments, Invoke Test and Return the results"""
parser = argparse.ArgumentParser()
@@ -146,6 +213,11 @@ def main():
default="./Results",
help="The directory to copy results to",
dest="results_dir")
+ optional_named.add_argument("--vsperf_results_dir",
+ required=False,
+ default="./Results",
+ help="The directory to copy results to",
+ dest="vsperf_results_dir")
optional_named.add_argument("--csv_results_file_prefix",
required=False,
default="Rfc2544Tput",
@@ -269,6 +341,27 @@ def main():
"the first emulated device interface"
"on the first west port"),
dest="west_intf_gateway_addr")
+ optional_named.add_argument("--latency_histogram",
+ required=False,
+ action="store_true",
+ help="latency histogram is required in output?",
+ dest="latency_histogram")
+ optional_named.add_argument("--imix",
+ required=False,
+ default="",
+ help=("IMIX specification as genome"
+ "Encoding - RFC 6985"),
+ dest="imix")
+ optional_named.add_argument("--live_results",
+ required=False,
+ action="store_true",
+ help="Live Results required?",
+ dest="live_results")
+ optional_named.add_argument("--logfile",
+ required=False,
+ default="./traffic_gen.log",
+ help="Log file to log live results",
+ dest="logfile")
parser.add_argument("-v",
"--verbose",
required=False,
@@ -309,6 +402,7 @@ def main():
_LOGGER.debug("SpirentTestCenter system version: %s",
stc.get("system1", "version"))
+ # pylint: disable=too-many-nested-blocks
try:
device_list = []
port_list = []
@@ -325,6 +419,10 @@ def main():
_LOGGER.debug("Creating project ...")
project = stc.get("System1", "children-Project")
+ # Configure the Result view
+ resultopts = stc.get('project1', 'children-resultoptions')
+ stc.config(resultopts, {'ResultViewMode': 'BASIC'})
+
# Configure any custom traffic parameters
if args.traffic_custom == "cont":
if args.verbose:
@@ -353,7 +451,9 @@ def main():
east_chassis_port})
# Create the DeviceGenEthIIIfParams object
stc.create("DeviceGenEthIIIfParams",
- under=east_device_gen_params)
+ under=east_device_gen_params,
+ attributes={'UseDefaultPhyMac': True})
+
# Configuring Ipv4 interfaces
stc.create("DeviceGenIpv4IfParams",
under=east_device_gen_params,
@@ -374,7 +474,9 @@ def main():
west_chassis_port})
# Create the DeviceGenEthIIIfParams object
stc.create("DeviceGenEthIIIfParams",
- under=west_device_gen_params)
+ under=west_device_gen_params,
+ attributes={'UseDefaultPhyMac': True})
+
# Configuring Ipv4 interfaces
stc.create("DeviceGenIpv4IfParams",
under=west_device_gen_params,
@@ -390,6 +492,45 @@ def main():
if args.verbose:
_LOGGER.debug(device_list)
+ # Configure Histogram
+ if args.latency_histogram:
+ # Generic Configuration
+ histResOptions = stc.get("project1", 'children-ResultOptions')
+ stc.config(histResOptions, {'ResultViewMode': 'HISTOGRAM'})
+ # East Port Configuration
+ histAnaEast = stc.get(east_chassis_port, 'children-Analyzer')
+ histAnaEastConfig = stc.get(histAnaEast, 'children-AnalyzerConfig')
+ stc.config(histAnaEastConfig, {'HistogramMode': 'LATENCY'})
+ eLatHist = stc.get(histAnaEastConfig, 'children-LatencyHistogram')
+ stc.config(eLatHist, {'ConfigMode': 'CONFIG_LIMIT_MODE',
+ 'BucketSizeUnit': 'ten_nanoseconds',
+ 'Active': 'TRUE',
+ 'DistributionMode': 'CENTERED_MODE'})
+ # West Port Configuration
+ histAnaWest = stc.get(west_chassis_port, 'children-Analyzer')
+ histAnaWestConfig = stc.get(histAnaWest, 'children-AnalyzerConfig')
+ stc.config(histAnaWestConfig, {'HistogramMode': 'LATENCY'})
+ wLatHist = stc.get(histAnaWestConfig, 'children-LatencyHistogram')
+ stc.config(wLatHist, {'ConfigMode': 'CONFIG_LIMIT_MODE',
+ 'BucketSizeUnit': 'ten_nanoseconds',
+ 'Active': 'TRUE',
+ 'DistributionMode': 'CENTERED_MODE'})
+ gBucketSizeList = stc.get(wLatHist, 'BucketSizeList')
+ # gLimitSizeList = stc.get(wLatHist, 'LimitList')
+
+ # IMIX configuration
+ fld = None
+ if args.imix:
+ args.frame_size_list = []
+ weights = genome2weights(args.imix)
+ fld = stc.create('FrameLengthDistribution', under=project)
+ def_slots = stc.get(fld, "children-framelengthdistributionslot")
+ stc.perform("Delete", params={"ConfigList": def_slots})
+ for fsize in weights:
+ stc.create('framelengthdistributionslot', under=fld,
+ attributes={'FixedFrameLength': fsize,
+ 'Weight': weights[fsize]})
+
# Create the RFC 2544 'metric test
if args.metric == "throughput":
if args.verbose:
@@ -407,7 +548,8 @@ def main():
"RateUpperLimit": args.rate_upper_limit_pct,
"Resolution": args.resolution_pct,
"SearchMode": args.search_mode,
- "TrafficPattern": args.traffic_pattern})
+ "TrafficPattern": args.traffic_pattern,
+ "FrameSizeDistributionList": fld})
elif args.metric == "backtoback":
stc.perform("Rfc2544SetupBackToBackTestCommand",
params={"AcceptableFrameLoss":
@@ -467,24 +609,133 @@ def main():
_LOGGER.debug("Apply configuration...")
stc.apply()
+ # Register for the results
+ hResDataRx = stc.create('ResultDataSet', under='project1')
+ strmBlockList = stc.get('project1', 'children-streamblock')
+ stc.create('ResultQuery', under=hResDataRx, attributes={
+ 'ResultRootList': strmBlockList,
+ 'ConfigClassId': 'StreamBlock',
+ 'ResultClassId': 'RxStreamSummaryResults',
+ 'PropertyIdArray': "RxStreamSummaryResults.RxPort \
+ RxStreamSummaryResults.AvgLatency \
+ RxStreamSummaryResults.BitCount \
+ RxStreamSummaryResults.BitRate \
+ RxStreamSummaryResults.DroppedFrameCount\
+ RxStreamSummaryResults.DroppedFrameRate \
+ RxStreamSummaryResults.FrameCount \
+ RxStreamSummaryResults.FrameRate \
+ RxStreamSummaryResults.MaxLatency \
+ RxStreamSummaryResults.MinLatency \
+ RxStreamSummaryResults.OctetCount \
+ RxStreamSummaryResults.OctetRate \
+ RxStreamSummaryResults.SeqRunLength"})
+ hResDataTx = stc.create('ResultDataSet', under='project1')
+ strmBlockList = stc.get('project1', 'children-streamblock')
+ stc.create('ResultQuery', under=hResDataTx, attributes={
+ 'ResultRootList': strmBlockList,
+ 'ConfigClassId': 'StreamBlock',
+ 'ResultClassId': 'TxStreamResults',
+ 'PropertyIdArray': "TxStreamResults.BlockId \
+ TxStreamResults.BitCount \
+ TxStreamResults.BitRate \
+ TxStreamResults.FrameCount \
+ TxStreamResults.FrameRate \
+ TxStreamResults.OctetCount \
+ TxStreamResults.OctetRate"})
+ stc.perform('ResultDataSetSubscribe', params={'ResultDataSet': hResDataRx})
+ stc.perform('ResultDataSetSubscribe', params={'ResultDataSet': hResDataTx})
+ time.sleep(3)
+ stc.perform('RefreshResultView', params={'ResultDataSet': hResDataTx})
+ hndListRx = stc.get(hResDataRx, 'ResultHandleList')
+ hndListTx = stc.get(hResDataTx, 'ResultHandleList')
+
if args.verbose:
_LOGGER.debug("Starting the sequencer...")
stc.perform("SequencerStart")
- # Wait for sequencer to finish
- _LOGGER.info(
- "Starting test... Please wait for the test to complete...")
- stc.wait_until_complete()
+ sequencer = stc.get("system1", "children-sequencer")
+ state = stc.get(sequencer, 'State')
+
+ # If Live-results are required, we don't wait for the test to complete
+ if args.live_results:
+ write_headers(args.vsperf_results_dir, args.logfile, '.rx')
+ write_headers(args.vsperf_results_dir, args.logfile, '.tx')
+ while state != 'IDLE':
+ state = stc.get(sequencer, 'State')
+ hndListTx = stc.get(hResDataTx, 'ResultHandleList')
+ if hndListTx:
+ handles = hndListTx.split(' ')
+ for handle in handles:
+ tx_values = stc.get(handle)
+ write_tx_live_results_to_file(args.vsperf_results_dir,
+ args.logfile,
+ tx_values)
+ if hndListRx:
+ handles = hndListRx.split(' ')
+ for handle in handles:
+ rx_values = stc.get(handle)
+ write_rx_live_results_to_file(args.vsperf_results_dir,
+ args.logfile,
+ rx_values)
+ time.sleep(1)
+ # Live results not needed, so just wait!
+ else:
+ # Wait for sequencer to finish
+ _LOGGER.info(
+ "Starting test... Please wait for the test to complete...")
+ stc.wait_until_complete()
+
_LOGGER.info("The test has completed... Saving results...")
# Determine what the results database filename is...
lab_server_resultsdb = stc.get(
"system1.project.TestResultSetting", "CurrentResultFileName")
+ if not lab_server_resultsdb or 'Results' not in lab_server_resultsdb:
+ _LOGGER.info("Failed to find results.")
+ stc.end_session()
+ return
+
if args.verbose:
_LOGGER.debug("The lab server results database is %s",
lab_server_resultsdb)
+ # Create Latency Histogram CSV file()
+ if args.latency_histogram:
+ hist_dict_counts = {}
+ for file_url in stc.files():
+ if '-FrameSize-' in file_url:
+ stc.download(file_url)
+ filename = file_url.split('/')[-1]
+ if os.path.exists(os.getcwd() + '/' + filename):
+ conn = sqlite3.connect(os.getcwd() + '/' + filename)
+ # cursor = conn.execute(
+ # 'select * from RxEotStreamResults')
+ # names = [desc[0] for desc in cursor.description]
+ counts = conn.execute("SELECT \
+ HistBin1Count, HistBin2Count,\
+ HistBin3Count, HistBin4Count,\
+ HistBin5Count, HistBin6Count,\
+ HistBin7Count, HistBin8Count,\
+ HistBin9Count, HistBin10Count,\
+ HistBin11Count, HistBin12Count,\
+ HistBin13Count, HistBin14Count, \
+ HistBin15Count, HistBin16Count \
+ from RxEotStreamResults")
+ strs = filename.split('-')
+ key = strs[strs.index('FrameSize')+1]
+ if key in hist_dict_counts:
+ hist_dict_counts[key] = [a+b for a, b in
+ zip(counts.fetchone(),
+ hist_dict_counts[key])]
+ else:
+ hist_dict_counts[key] = counts.fetchone()
+ conn.close()
+
+ write_histogram_to_csv(args.vsperf_results_dir, 'Histogram',
+ hist_dict_counts,
+ gBucketSizeList)
+
stc.perform("CSSynchronizeFiles",
params={"DefaultDownloadDir": args.results_dir})
@@ -565,6 +816,7 @@ def main():
args.results_dir, args.csv_results_file_prefix, resultsdict)
except RuntimeError as e:
+ stc.end_session()
_LOGGER.error(e)
if args.verbose:
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index 9980ae7c..a15c502c 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -98,7 +98,9 @@ def get_rfc2544_common_settings():
"--trial_duration_sec",
settings.getValue("TRAFFICGEN_STC_TRIAL_DURATION_SEC"),
"--traffic_pattern",
- settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN")]
+ settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN"),
+ "--vsperf_results_dir",
+ settings.getValue("RESULTS_PATH")]
return args
@@ -169,6 +171,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
Spirent TestCenter
"""
_logger = logging.getLogger(__name__)
+ _liveresults_file = settings.getValue("TRAFFICGEN_STC_LIVERESULTS_FILE")
def connect(self):
"""
@@ -182,7 +185,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
"""
pass
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""
Do nothing.
"""
@@ -246,8 +249,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
row["ForwardingRate(fps)"])
return result
- # pylint: disable=unused-argument
- def send_rfc2889_forwarding(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_forwarding(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Forwarding test specifications.
"""
@@ -257,7 +259,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
framesize = traffic['l2']['framesize']
args = get_rfc2889_common_settings(framesize, tests,
traffic['traffic_type'])
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -273,7 +275,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_forwarding_results(filec)
- def send_rfc2889_caching(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_caching(self, traffic=None, tests=1, _duration=20):
"""
Send as per RFC2889 Addr-Caching test specifications.
"""
@@ -286,7 +288,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -302,7 +304,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_addr_caching_results(filec)
- def send_rfc2889_learning(self, traffic=None, tests=1, duration=20):
+ def send_rfc2889_learning(self, traffic=None, tests=1, _duration=20):
"""
Send traffic per RFC2889 Addr-Learning test specifications.
"""
@@ -315,7 +317,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom_args = get_rfc2889_custom_settings()
args = common_args + custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -331,11 +333,13 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_addr_learning_results(filec)
- def get_rfc2544_results(self, filename):
+ def get_rfc2544_results(self, filename, genome=None):
"""
Reads the CSV file and return the results
"""
result = {}
+ if not os.path.exists(filename):
+ return result
with open(filename, "r") as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
@@ -366,6 +370,10 @@ class TestCenter(trafficgen.ITrafficGenerator):
row["AverageLatency(us)"]) * 1000
result[ResultsConstants.FRAME_LOSS_PERCENT] = float(
row["PercentLoss"])
+ if genome:
+ result[ResultsConstants.IMIX_GENOME] = genome
+ result[ResultsConstants.IMIX_AVG_FRAMESIZE] = float(
+ row["AvgFrameSize"])
return result
def send_cont_traffic(self, traffic=None, duration=30):
@@ -387,7 +395,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
custom, 1)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -420,7 +428,25 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if traffic and 'latency_histogram' in traffic:
+ if traffic['latency_histogram']['enabled']:
+ if traffic['latency_histogram']['type'] == 'Default':
+ args.append("--latency_histogram")
+
+ genome = ''
+ if traffic and 'imix' in traffic:
+ if traffic['imix']['enabled']:
+ if traffic['imix']['type'] == 'genome':
+ genome = traffic['imix']['genome']
+ args.append('--imix')
+ args.append(genome)
+
+ if settings.getValue("TRAFFICGEN_STC_LIVE_RESULTS") == "True":
+ args.append('--live_results')
+ args.append('--logfile')
+ args.append(self._liveresults_file)
+
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.debug("Arguments used to call test: %s", args)
@@ -434,7 +460,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
if verbose:
self._logger.info("file: %s", filec)
- return self.get_rfc2544_results(filec)
+ return self.get_rfc2544_results(filec, genome)
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=20,
lossrate=0.0):
@@ -453,7 +479,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
tests)
args = rfc2544_common_args + stc_common_args + rfc2544_custom_args
- if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True":
+ if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
self._logger.info("Arguments used to call test: %s", args)
@@ -498,4 +524,5 @@ if __name__ == '__main__':
}
with TestCenter() as dev:
print(dev.send_rfc2544_throughput(traffic=TRAFFIC))
+ # pylint: disable=no-member
print(dev.send_rfc2544_backtoback(traffic=TRAFFIC))
diff --git a/tools/pkt_gen/trafficgen/trafficgen.py b/tools/pkt_gen/trafficgen/trafficgen.py
index 262df71d..a6f7edcc 100755
--- a/tools/pkt_gen/trafficgen/trafficgen.py
+++ b/tools/pkt_gen/trafficgen/trafficgen.py
@@ -81,15 +81,14 @@ class ITrafficGenerator(object):
"""
raise NotImplementedError('Please call an implementation.')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
- Send a ``numpkts`` packets of traffic, using ``traffic``
+ Send a ``traffic['burst_size']`` packets of traffic, using ``traffic``
configuration, for ``duration`` seconds.
Attributes:
:param traffic: Detailed "traffic" spec, see design docs for details
- :param numpkts: Number of packets to send
:param duration: Time to wait to receive packets
:returns: dictionary of strings with following data:
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex_client.py
index 82118f6f..3d6836d8 100644
--- a/tools/pkt_gen/trex/trex.py
+++ b/tools/pkt_gen/trex/trex_client.py
@@ -15,15 +15,18 @@
"""
Trex Traffic Generator Model
"""
+
# pylint: disable=undefined-variable
import logging
import subprocess
import sys
import time
+import os
+import re
from collections import OrderedDict
# pylint: disable=unused-import
import netaddr
-import zmq
+#import zmq
from conf import settings
from conf import merge_spec
from core.results.results_constants import ResultsConstants
@@ -32,6 +35,7 @@ try:
# pylint: disable=wrong-import-position, import-error
sys.path.append(settings.getValue('PATHS')['trafficgen']['Trex']['src']['path'])
from trex_stl_lib.api import *
+ # from trex_stl_lib import trex_stl_exceptions
except ImportError:
# VSPERF performs detection of T-Rex api during testcase initialization. So if
# T-Rex is requsted and API is not available it will fail before this code
@@ -67,6 +71,21 @@ _EMPTY_STATS = {
'tx_pps': 0.0,
'tx_util': 0.0,}}
+# Default frame definition, which can be overridden by TRAFFIC['scapy'].
+# The content of the frame and its network layers are driven by TRAFFIC
+# dictionary, i.e. 'l2', 'l3, 'l4' and 'vlan' parts.
+_SCAPY_FRAME = {
+ '0' : 'Ether(src={Ether_src}, dst={Ether_dst})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_src}, dst={IP_dst})/'
+ '{IP_PROTO}(sport={IP_PROTO_sport}, dport={IP_PROTO_dport})',
+ '1' : 'Ether(src={Ether_dst}, dst={Ether_src})/'
+ 'Dot1Q(prio={Dot1Q_prio}, id={Dot1Q_id}, vlan={Dot1Q_vlan})/'
+ 'IP(proto={IP_proto}, src={IP_dst}, dst={IP_src})/'
+ '{IP_PROTO}(sport={IP_PROTO_dport}, dport={IP_PROTO_sport})',
+}
+
+
class Trex(ITrafficGenerator):
"""Trex Traffic generator wrapper."""
_logger = logging.getLogger(__name__)
@@ -83,6 +102,20 @@ class Trex(ITrafficGenerator):
self._trex_user = settings.getValue('TRAFFICGEN_TREX_USER')
self._stlclient = None
self._verification_params = None
+ self._show_packet_data = False
+
+ def show_packet_info(self, packet_a, packet_b):
+ """
+ Log packet layers to screen
+ :param packet_a: Scapy.layers packet
+ :param packet_b: Scapy.layers packet
+ :return: None
+ """
+ # we only want to show packet data once per test
+ if self._show_packet_data:
+ self._show_packet_data = False
+ self._logger.info(packet_a.show())
+ self._logger.info(packet_b.show())
def connect(self):
'''Connect to Trex traffic generator
@@ -91,11 +124,11 @@ class Trex(ITrafficGenerator):
the configuration file
'''
self._stlclient = STLClient()
- self._logger.info("TREX: In Trex connect method...")
+ self._logger.info("T-Rex: In Trex connect method...")
if self._trex_host_ip_addr:
cmd_ping = "ping -c1 " + self._trex_host_ip_addr
else:
- raise RuntimeError('TREX: Trex host not defined')
+ raise RuntimeError('T-Rex: Trex host not defined')
ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE)
output, error = ping.communicate()
@@ -103,7 +136,7 @@ class Trex(ITrafficGenerator):
if ping.returncode:
self._logger.error(error)
self._logger.error(output)
- raise RuntimeError('TREX: Cannot ping Trex host at ' + \
+ raise RuntimeError('T-Rex: Cannot ping Trex host at ' + \
self._trex_host_ip_addr)
connect_trex = "ssh " + self._trex_user + \
@@ -122,13 +155,18 @@ class Trex(ITrafficGenerator):
self._logger.error(error)
self._logger.error(output)
raise RuntimeError(
- 'TREX: Cannot locate Trex program at %s within %s' \
+ 'T-Rex: Cannot locate Trex program at %s within %s' \
% (self._trex_host_ip_addr, self._trex_base_dir))
- self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
- verbose_level=0)
- self._stlclient.connect()
- self._logger.info("TREX: Trex host successfully found...")
+ try:
+ self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
+ verbose_level='info')
+ self._stlclient.connect()
+ except STLError:
+ raise RuntimeError('T-Rex: Cannot connect to T-Rex server. Please check if it is '
+ 'running and that firewall allows connection to TCP port 4501.')
+
+ self._logger.info("T-Rex: Trex host successfully found...")
def disconnect(self):
"""Disconnect from the traffic generator.
@@ -140,38 +178,80 @@ class Trex(ITrafficGenerator):
:returns: None
"""
- self._logger.info("TREX: In trex disconnect method")
+ self._logger.info("T-Rex: In trex disconnect method")
self._stlclient.disconnect(stop_traffic=True, release_ports=True)
- @staticmethod
- def create_packets(traffic, ports_info):
+ def create_packets(self, traffic, ports_info):
"""Create base packet according to traffic specification.
If traffic haven't specified srcmac and dstmac fields
- packet will be create with mac address of trex server.
+ packet will be created with mac address of trex server.
"""
- mac_add = [li['hw_mac'] for li in ports_info]
-
- if traffic and traffic['l2']['framesize'] > 0:
- if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
- traffic['l2']['srcmac'] == '00:00:00:00:00:00':
- base_pkt_a = Ether(src=mac_add[0], dst=mac_add[1])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['srcip'],
- dst=traffic['l3']['dstip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
- base_pkt_b = Ether(src=mac_add[1], dst=mac_add[0])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
- else:
- base_pkt_a = Ether(src=traffic['l2']['srcmac'], dst=traffic['l2']['dstmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['dstport'], sport=traffic['l4']['srcport'])
+ if not traffic or traffic['l2']['framesize'] <= 0:
+ return (None, None)
- base_pkt_b = Ether(src=traffic['l2']['dstmac'], dst=traffic['l2']['srcmac'])/ \
- IP(proto=traffic['l3']['proto'], src=traffic['l3']['dstip'],
- dst=traffic['l3']['srcip'])/ \
- UDP(dport=traffic['l4']['srcport'], sport=traffic['l4']['dstport'])
+ if traffic['l2']['dstmac'] == '00:00:00:00:00:00' and \
+ traffic['l2']['srcmac'] == '00:00:00:00:00:00':
+
+ mac_add = [li['hw_mac'] for li in ports_info]
+ src_mac = mac_add[0]
+ dst_mac = mac_add[1]
+ else:
+ src_mac = traffic['l2']['srcmac']
+ dst_mac = traffic['l2']['dstmac']
+
+ if traffic['scapy']['enabled']:
+ base_pkt_a = traffic['scapy']['0']
+ base_pkt_b = traffic['scapy']['1']
+ else:
+ base_pkt_a = _SCAPY_FRAME['0']
+ base_pkt_b = _SCAPY_FRAME['1']
+
+ # check and remove network layers disabled by TRAFFIC dictionary
+ # Note: In general, it is possible to remove layers from scapy object by
+ # e.g. del base_pkt_a['IP']. However it doesn't work for all layers
+ # (e.g. Dot1Q). Thus it is safer to modify string with scapy frame definition
+ # directly, before it is converted to the real scapy object.
+ if not traffic['vlan']['enabled']:
+ self._logger.info('VLAN headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)Dot1Q?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l3']['enabled']:
+ self._logger.info('IP headers are disabled by TRAFFIC')
+ base_pkt_a = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)IP(v6)?\([^\)]*\)', '', base_pkt_b)
+ if not traffic['l4']['enabled']:
+ self._logger.info('%s headers are disabled by TRAFFIC',
+ traffic['l3']['proto'].upper())
+ base_pkt_a = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_a)
+ base_pkt_b = re.sub(r'(^|\/)(UDP|TCP|SCTP|{{IP_PROTO}}|{})\([^\)]*\)'.format(
+ traffic['l3']['proto'].upper()), '', base_pkt_b)
+
+ # pylint: disable=eval-used
+ base_pkt_a = eval(base_pkt_a.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
+ base_pkt_b = eval(base_pkt_b.format(
+ Ether_src=repr(src_mac),
+ Ether_dst=repr(dst_mac),
+ Dot1Q_prio=traffic['vlan']['priority'],
+ Dot1Q_id=traffic['vlan']['cfi'],
+ Dot1Q_vlan=traffic['vlan']['id'],
+ IP_proto=repr(traffic['l3']['proto']),
+ IP_PROTO=traffic['l3']['proto'].upper(),
+ IP_src=repr(traffic['l3']['srcip']),
+ IP_dst=repr(traffic['l3']['dstip']),
+ IP_PROTO_sport=traffic['l4']['srcport'],
+ IP_PROTO_dport=traffic['l4']['dstport']))
return (base_pkt_a, base_pkt_b)
@@ -226,36 +306,87 @@ class Trex(ITrafficGenerator):
pkt_a = STLPktBuilder(pkt=base_pkt_a / payload_a)
pkt_b = STLPktBuilder(pkt=base_pkt_b / payload_b)
- stream_1 = STLStream(packet=pkt_a,
- name='stream_1',
- mode=STLTXCont(percentage=traffic['frame_rate']))
- stream_2 = STLStream(packet=pkt_b,
- name='stream_2',
- mode=STLTXCont(percentage=traffic['frame_rate']))
lat_pps = settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS')
- if lat_pps > 0:
- stream_1_lat = STLStream(packet=pkt_a,
+ if traffic['traffic_type'] == 'burst':
+ if lat_pps > 0:
+ # latency statistics are requested; in case of frame burst we can enable
+ # statistics for all frames
+ stream_1 = STLStream(packet=pkt_a,
flow_stats=STLFlowLatencyStats(pg_id=0),
- name='stream_1_lat',
- mode=STLTXCont(pps=lat_pps))
- stream_2_lat = STLStream(packet=pkt_b,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
flow_stats=STLFlowLatencyStats(pg_id=1),
- name='stream_2_lat',
- mode=STLTXCont(pps=lat_pps))
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXSingleBurst(percentage=traffic['frame_rate'],
+ total_pkts=traffic['burst_size']))
+ else:
+ stream_1 = STLStream(packet=pkt_a,
+ name='stream_1',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ stream_2 = STLStream(packet=pkt_b,
+ name='stream_2',
+ mode=STLTXCont(percentage=traffic['frame_rate']))
+ # workaround for latency statistics, which can't be enabled for streams
+ # with high framerate due to the huge performance impact
+ if lat_pps > 0:
+ stream_1_lat = STLStream(packet=pkt_a,
+ flow_stats=STLFlowLatencyStats(pg_id=0),
+ name='stream_1_lat',
+ mode=STLTXCont(pps=lat_pps))
+ stream_2_lat = STLStream(packet=pkt_b,
+ flow_stats=STLFlowLatencyStats(pg_id=1),
+ name='stream_2_lat',
+ mode=STLTXCont(pps=lat_pps))
return (stream_1, stream_2, stream_1_lat, stream_2_lat)
- def generate_traffic(self, traffic, duration):
+
+ # pylint: disable=too-many-locals, too-many-statements
+ def generate_traffic(self, traffic, duration, disable_capture=False):
"""The method that generate a stream
"""
my_ports = [0, 1]
+
+ # initialize ports
self._stlclient.reset(my_ports)
+ self._stlclient.remove_all_captures()
+ self._stlclient.set_service_mode(ports=my_ports, enabled=False)
+
ports_info = self._stlclient.get_port_info(my_ports)
+
+ # get max support speed
+ max_speed = 0
+ if settings.getValue('TRAFFICGEN_TREX_FORCE_PORT_SPEED'):
+ max_speed = settings.getValue('TRAFFICGEN_TREX_PORT_SPEED')
+ elif ports_info[0]['supp_speeds']:
+ max_speed_1 = max(ports_info[0]['supp_speeds'])
+ max_speed_2 = max(ports_info[1]['supp_speeds'])
+ else:
+ # if max supported speed not in port info or set manually, just assume 10G
+ max_speed = 10000
+ if not max_speed:
+ # since we can only control both ports at once take the lower of the two
+ max_speed = min(max_speed_1, max_speed_2)
+ gbps_speed = (max_speed / 1000) * (float(traffic['frame_rate']) / 100.0)
+ self._logger.debug('Starting traffic at %s Gbps speed', gbps_speed)
+
# for SR-IOV
if settings.getValue('TRAFFICGEN_TREX_PROMISCUOUS'):
self._stlclient.set_port_attr(my_ports, promiscuous=True)
- packet_1, packet_2 = Trex.create_packets(traffic, ports_info)
+ packet_1, packet_2 = self.create_packets(traffic, ports_info)
+ self.show_packet_info(packet_1, packet_2)
stream_1, stream_2, stream_1_lat, stream_2_lat = Trex.create_streams(packet_1, packet_2, traffic)
self._stlclient.add_streams(stream_1, ports=[0])
self._stlclient.add_streams(stream_2, ports=[1])
@@ -264,10 +395,104 @@ class Trex(ITrafficGenerator):
self._stlclient.add_streams(stream_1_lat, ports=[0])
self._stlclient.add_streams(stream_2_lat, ports=[1])
+ # enable traffic capture if requested
+ pcap_id = {}
+ if traffic['capture']['enabled'] and not disable_capture:
+ for ports in ['tx_ports', 'rx_ports']:
+ if traffic['capture'][ports]:
+ pcap_dir = ports[:2]
+ self._logger.info("T-Rex starting %s traffic capture", pcap_dir.upper())
+ capture = {ports : traffic['capture'][ports],
+ 'limit' : traffic['capture']['count'],
+ 'bpf_filter' : traffic['capture']['filter']}
+ self._stlclient.set_service_mode(ports=traffic['capture'][ports], enabled=True)
+ pcap_id[pcap_dir] = self._stlclient.start_capture(**capture)
+
self._stlclient.clear_stats()
- self._stlclient.start(ports=[0, 1], force=True, duration=duration)
- self._stlclient.wait_on_traffic(ports=[0, 1])
+ # if the user did not start up T-Rex server with more than default cores, use default mask.
+ # Otherwise use mask to take advantage of multiple cores.
+ try:
+ self._stlclient.start(ports=my_ports, force=True, duration=duration, mult="{}gbps".format(gbps_speed),
+ core_mask=self._stlclient.CORE_MASK_PIN)
+ except STLError:
+ self._stlclient.start(ports=my_ports, force=True, duration=duration, mult="{}gbps".format(gbps_speed))
+
+ if settings.getValue('TRAFFICGEN_TREX_LIVE_RESULTS'):
+ filec = os.path.join(settings.getValue('RESULTS_PATH'),
+ settings.getValue('TRAFFICGEN_TREX_LC_FILE'))
+ filee = os.path.join(settings.getValue('RESULTS_PATH'),
+ settings.getValue('TRAFFICGEN_TREX_LE_FILE'))
+ pgids = self._stlclient.get_active_pgids()
+ rx_port_0 = 1
+ tx_port_0 = 0
+ rx_port_1 = 0
+ tx_port_1 = 1
+ with open(filec, 'a') as fcp, open(filee, 'a') as fep:
+ fcp.write("ts,rx_port,tx_port,rx_pkts,tx_pkts,rx_pps,tx_pps,"+
+ "rx_bps_num,rx_bps_den,tx_bps_num,tx_bps_den\n")
+ fep.write('ts,dropped,ooo,dup,seq_too_high,seq_too_low\n')
+ while True:
+ tr_status = self._stlclient.is_traffic_active(ports=my_ports)
+ if not tr_status:
+ break
+ time.sleep(1)
+ stats = self._stlclient.get_pgid_stats(pgids['flow_stats'])
+ lat_stats = stats['latency'].get(0)
+ flow_stats_0 = stats['flow_stats'].get(0)
+ flow_stats_1 = stats['flow_stats'].get(1)
+ if flow_stats_0:
+ rx_pkts = flow_stats_0['rx_pkts'][rx_port_0]
+ tx_pkts = flow_stats_0['tx_pkts'][tx_port_0]
+ rx_pps = flow_stats_0['rx_pps'][rx_port_0]
+ tx_pps = flow_stats_0['tx_pps'][tx_port_0]
+ rx_bps = flow_stats_0['rx_bps'][rx_port_0]
+ tx_bps = flow_stats_0['tx_bps'][tx_port_0]
+ rx_bps_l1 = flow_stats_0['rx_bps_l1'][rx_port_0]
+ tx_bps_l1 = flow_stats_0['tx_bps_l1'][tx_port_0]
+ # https://github.com/cisco-system-traffic-generator/\
+ # trex-core/blob/master/scripts/automation/\
+ # trex_control_plane/interactive/trex/examples/\
+ # stl/stl_flow_latency_stats.py
+ fcp.write("{10},{8},{9},{0},{1},{2},{3},{4},{5},{6},{7}\n"
+ .format(rx_pkts, tx_pkts, rx_pps, tx_pps,
+ rx_bps, rx_bps_l1, tx_bps, tx_bps_l1,
+ rx_port_0, tx_port_0, time.time()))
+ if flow_stats_1:
+ rx_pkts = flow_stats_1['rx_pkts'][rx_port_1]
+ tx_pkts = flow_stats_1['tx_pkts'][tx_port_1]
+ rx_pps = flow_stats_1['rx_pps'][rx_port_1]
+ tx_pps = flow_stats_1['tx_pps'][tx_port_1]
+ rx_bps = flow_stats_1['rx_bps'][rx_port_1]
+ tx_bps = flow_stats_1['tx_bps'][tx_port_1]
+ rx_bps_l1 = flow_stats_1['rx_bps_l1'][rx_port_1]
+ tx_bps_l1 = flow_stats_1['tx_bps_l1'][tx_port_1]
+ fcp.write("{10},{8},{9},{0},{1},{2},{3},{4},{5},{6},{7}\n"
+ .format(rx_pkts, tx_pkts, rx_pps, tx_pps,
+ rx_bps, rx_bps_l1, tx_bps, tx_bps_l1,
+ rx_port_1, tx_port_1, time.time()))
+ if lat_stats:
+ drops = lat_stats['err_cntrs']['dropped']
+ ooo = lat_stats['err_cntrs']['out_of_order']
+ dup = lat_stats['err_cntrs']['dup']
+ sth = lat_stats['err_cntrs']['seq_too_high']
+ stl = lat_stats['err_cntrs']['seq_too_low']
+ fep.write('{5},{0},{1},{2},{3},{4}\n'
+ .format(drops, ooo, dup, sth, stl, time.time()))
+ else:
+ self._stlclient.wait_on_traffic(ports=my_ports)
stats = self._stlclient.get_stats(sync_now=True)
+
+ # export captured data into pcap file if possible
+ if pcap_id:
+ for pcap_dir in pcap_id:
+ pcap_file = 'capture_{}.pcap'.format(pcap_dir)
+ self._stlclient.stop_capture(pcap_id[pcap_dir]['id'],
+ os.path.join(settings.getValue('RESULTS_PATH'), pcap_file))
+ stats['capture_{}'.format(pcap_dir)] = pcap_file
+ self._logger.info("T-Rex writing %s traffic capture into %s", pcap_dir.upper(), pcap_file)
+ # disable service mode for all ports used by Trex
+ self._stlclient.set_service_mode(ports=my_ports, enabled=False)
+
return stats
@staticmethod
@@ -306,25 +531,39 @@ class Trex(ITrafficGenerator):
result[ResultsConstants.FRAME_LOSS_PERCENT] = 100
if settings.getValue('TRAFFICGEN_TREX_LATENCY_PPS') > 0 and stats['latency']:
- result[ResultsConstants.MIN_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(min(stats["latency"][0]["latency"]["total_min"],
- stats["latency"][1]["latency"]["total_min"])))))
-
- result[ResultsConstants.MAX_LATENCY_NS] = (
- '{:.3f}'.format(
- (float(max(stats["latency"][0]["latency"]["total_max"],
- stats["latency"][1]["latency"]["total_max"])))))
-
- result[ResultsConstants.AVG_LATENCY_NS] = (
- '{:.3f}'.format(
- float((stats["latency"][0]["latency"]["average"]+
- stats["latency"][1]["latency"]["average"])/2)))
+ try:
+ result[ResultsConstants.MIN_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(min(stats["latency"][0]["latency"]["total_min"],
+ stats["latency"][1]["latency"]["total_min"])))))
+ except TypeError:
+ result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.MAX_LATENCY_NS] = (
+ '{:.3f}'.format(
+ (float(max(stats["latency"][0]["latency"]["total_max"],
+ stats["latency"][1]["latency"]["total_max"])))))
+ except TypeError:
+ result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
+
+ try:
+ result[ResultsConstants.AVG_LATENCY_NS] = (
+ '{:.3f}'.format(
+ float((stats["latency"][0]["latency"]["average"]+
+ stats["latency"][1]["latency"]["average"])/2)))
+ except TypeError:
+ result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
else:
result[ResultsConstants.MIN_LATENCY_NS] = 'Unknown'
result[ResultsConstants.MAX_LATENCY_NS] = 'Unknown'
result[ResultsConstants.AVG_LATENCY_NS] = 'Unknown'
+
+ if 'capture_tx' in stats:
+ result[ResultsConstants.CAPTURE_TX] = stats['capture_tx']
+ if 'capture_rx' in stats:
+ result[ResultsConstants.CAPTURE_RX] = stats['capture_rx']
return result
def learning_packets(self, traffic):
@@ -336,7 +575,9 @@ class Trex(ITrafficGenerator):
self._logger.info("T-Rex sending learning packets")
learning_thresh_traffic = copy.deepcopy(traffic)
learning_thresh_traffic["frame_rate"] = 1
- self.generate_traffic(learning_thresh_traffic, settings.getValue("TRAFFICGEN_TREX_LEARNING_DURATION"))
+ self.generate_traffic(learning_thresh_traffic,
+ settings.getValue("TRAFFICGEN_TREX_LEARNING_DURATION"),
+ disable_capture=True)
self._logger.info("T-Rex finished learning packets")
time.sleep(3) # allow packets to complete before starting test traffic
@@ -353,9 +594,14 @@ class Trex(ITrafficGenerator):
:return: passing stats as dictionary
"""
threshold = settings.getValue('TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
+ max_repeat = settings.getValue('TRAFFICGEN_TREX_RFC2544_MAX_REPEAT')
+ loss_verification = settings.getValue('TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION')
+ if loss_verification:
+ self._logger.info("Running Binary Search with Loss Verification")
stats_ok = _EMPTY_STATS
new_params = copy.deepcopy(traffic)
iteration = 1
+ repeat = 0
left = boundaries['left']
right = boundaries['right']
center = boundaries['center']
@@ -371,17 +617,28 @@ class Trex(ITrafficGenerator):
if test_lossrate <= lossrate:
# save the last passing trial for verification
self._verification_params = copy.deepcopy(new_params)
- self._logger.debug("Iteration: %s, frame rate: %s, throughput_rx_fps: %s, frame_loss_percent: %s",
- iteration, "{:.3f}".format(new_params['frame_rate']), stats['total']['rx_pps'],
- "{:.3f}".format(test_lossrate))
+ packets_lost = stats['total']['opackets'] - stats['total']['ipackets']
+ self._logger.debug("Iteration: %s, frame rate: %s, throughput_rx_fps: %s," +
+ " frames lost %s, frame_loss_percent: %s", iteration,
+ "{:.3f}".format(new_params['frame_rate']), stats['total']['rx_pps'],
+ packets_lost, "{:.3f}".format(test_lossrate))
if test_lossrate == 0.0 and new_params['frame_rate'] == traffic['frame_rate']:
return copy.deepcopy(stats)
elif test_lossrate > lossrate:
+ if loss_verification:
+ if repeat < max_repeat:
+ repeat += 1
+ iteration += 1
+ continue
+ else:
+ repeat = 0
right = center
center = (left + right) / 2
new_params = copy.deepcopy(traffic)
new_params['frame_rate'] = center
else:
+ if loss_verification:
+ repeat = 0
stats_ok = copy.deepcopy(stats)
left = center
center = (left + right) / 2
@@ -396,6 +653,8 @@ class Trex(ITrafficGenerator):
self._logger.info("In Trex send_cont_traffic method")
self._params.clear()
+ self._show_packet_data = True
+
self._params['traffic'] = self.traffic_defaults.copy()
if traffic:
self._params['traffic'] = merge_spec(
@@ -403,6 +662,7 @@ class Trex(ITrafficGenerator):
if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
self.learning_packets(traffic)
+ self._logger.info("T-Rex sending traffic")
stats = self.generate_traffic(traffic, duration)
return self.calculate_results(stats)
@@ -423,6 +683,7 @@ class Trex(ITrafficGenerator):
"""
self._logger.info("In Trex send_rfc2544_throughput method")
self._params.clear()
+ self._show_packet_data = True
self._params['traffic'] = self.traffic_defaults.copy()
if traffic:
self._params['traffic'] = merge_spec(
@@ -479,9 +740,25 @@ class Trex(ITrafficGenerator):
raise NotImplementedError(
'Trex wait rfc2544 throughput not implemented')
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=5):
- raise NotImplementedError(
- 'Trex send burst traffic not implemented')
+ def send_burst_traffic(self, traffic=None, duration=20):
+ """See ITrafficGenerator for description
+ """
+ self._logger.info("In Trex send_burst_traffic method")
+ self._params.clear()
+
+ self._params['traffic'] = self.traffic_defaults.copy()
+ if traffic:
+ self._params['traffic'] = merge_spec(
+ self._params['traffic'], traffic)
+
+ if settings.getValue('TRAFFICGEN_TREX_LEARNING_MODE'):
+ self.learning_packets(traffic)
+ self._logger.info("T-Rex sending traffic")
+ stats = self.generate_traffic(traffic, duration)
+
+ time.sleep(3) # allow packets to complete before reading stats
+
+ return self.calculate_results(stats)
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=30,
lossrate=0.0):
diff --git a/tools/pkt_gen/xena/XenaDriver.py b/tools/pkt_gen/xena/XenaDriver.py
index 6e39e47a..ac9cef1c 100644
--- a/tools/pkt_gen/xena/XenaDriver.py
+++ b/tools/pkt_gen/xena/XenaDriver.py
@@ -30,6 +30,7 @@ through socket commands and returning different statistics.
"""
import locale
import logging
+import math
import socket
import struct
import sys
@@ -86,6 +87,26 @@ CMD_VERSION = 'c_versionno ?'
_LOCALE = locale.getlocale()[1]
_LOGGER = logging.getLogger(__name__)
+class ModSet(object):
+ """
+ Mod set attribute tracker
+ """
+ def __init__(self, **kwargs):
+ """ Constructor
+ All mods default to False
+ :param kwargs: Any class attribute can be set here.
+ """
+ self.mod_src_mac = False
+ self.mod_dst_mac = False
+ self.mod_src_ip = False
+ self.mod_dst_ip = False
+ self.mod_src_port = False
+ self.mod_dst_port = False
+
+ for (key, value) in kwargs.items():
+ if hasattr(self, key):
+ setattr(self, key, value)
+
class SimpleSocket(object):
"""
@@ -170,8 +191,7 @@ class KeepAliveThread(threading.Thread):
self.finished = threading.Event()
self.setDaemon(True)
_LOGGER.debug(
- 'Xena Socket keep alive thread initiated, interval ' +
- '{} seconds'.format(self.interval))
+ 'Xena Socket keep alive thread initiated, interval %s seconds', self.interval)
def stop(self):
""" Thread stop. See python thread docs for more info
@@ -640,57 +660,98 @@ class XenaStream(object):
"""
return self._stream_id
- def enable_multistream(self, flows, layer):
+ def enable_multistream(self, flows, mod_class):
"""
- Basic implementation of multi stream. Enable multi stream by setting
- modifiers on the stream
- :param flows: Numbers of flows or end range
- :param layer: layer to enable multi stream as str. Acceptable values
- are L2, L3, or L4
+ Implementation of multi stream. Enable multi stream by setting
+ modifiers on the stream. If no mods are selected, src_ip mod will be used.
+ :param flows: Numbers of flows, Values greater than 65535 will square rooted
+ to the closest value. Xena mods are limited to 4 bytes.
+ :param mod_class: ModSet object
:return: True if success False otherwise
"""
if not self._header_protocol:
raise RuntimeError(
"Please set a protocol header before calling this method.")
-
- # byte offsets for setting the modifier
- offsets = {
- 'L2': [0, 6],
- 'L3': [32, 36] if 'VLAN' in self._header_protocol else [28, 32],
- 'L4': [38, 40] if 'VLAN' in self._header_protocol else [34, 36]
- }
-
- responses = list()
- if layer in offsets.keys() and flows > 0:
- command = make_port_command(
- CMD_STREAM_MODIFIER_COUNT + ' [{}]'.format(self._stream_id) +
- ' 2', self._xena_port)
- responses.append(self._manager.driver.ask_verify(command))
- command = make_port_command(
- CMD_STREAM_MODIFIER + ' [{},0] {} 0xFFFF0000 INC 1'.format(
- self._stream_id, offsets[layer][0]), self._xena_port)
- responses.append(self._manager.driver.ask_verify(command))
- command = make_port_command(
- CMD_STREAM_MODIFIER_RANGE + ' [{},0] 0 1 {}'.format(
- self._stream_id, flows), self._xena_port)
- responses.append(self._manager.driver.ask_verify(command))
- command = make_port_command(
- CMD_STREAM_MODIFIER + ' [{},1] {} 0xFFFF0000 INC 1'.format(
- self._stream_id, offsets[layer][1]), self._xena_port)
- responses.append(self._manager.driver.ask_verify(command))
- command = make_port_command(
- CMD_STREAM_MODIFIER_RANGE + ' [{},1] 0 1 {}'.format(
- self._stream_id, flows), self._xena_port)
- responses.append(self._manager.driver.ask_verify(command))
- return all(responses) # return True if they all worked
- elif flows < 1:
- _LOGGER.warning(
- 'No flows specified in enable multistream. Bypassing...')
- return False
+ # maximum value for a Xena modifier is 65535 (unsigned int). If flows
+ # is greater than 65535 we have to do two mods getting as close as we
+ # can with square rooting the flow count.
+ if flows > 4294836225:
+ _LOGGER.debug('Flow mods exceeds highest value, changing to 4294836225')
+ flows = 4294836225
+ if flows <= 65535:
+ mod1 = flows
+ mod2 = 0
else:
- raise NotImplementedError(
- "Non-implemented stream layer in method enable multistream ",
- "layer=", layer)
+ mod1, mod2 = int(math.sqrt(flows)), int(math.sqrt(flows))
+ _LOGGER.debug('Flow count modified to %s', mod1*mod2)
+ offset_list = list()
+ if not any([mod_class.mod_src_mac, mod_class.mod_dst_mac, mod_class.mod_src_ip,
+ mod_class.mod_dst_ip, mod_class.mod_src_port, mod_class.mod_dst_port]):
+ # no mods were selected, default to src ip only
+ mod_class.mod_src_ip = True
+ if mod_class.mod_src_mac:
+ offset_list.append(3)
+ if mod_class.mod_dst_mac:
+ offset_list.append(9)
+ if mod_class.mod_src_ip:
+ offset_list.append(32 if 'VLAN' in self._header_protocol else 28)
+ if mod_class.mod_dst_ip:
+ offset_list.append(36 if 'VLAN' in self._header_protocol else 32)
+ if mod_class.mod_src_port:
+ offset_list.append(38 if 'VLAN' in self._header_protocol else 34)
+ if mod_class.mod_dst_port:
+ offset_list.append(40 if 'VLAN' in self._header_protocol else 36)
+ # calculate how many mods we have to do
+ countertotal = len(offset_list)
+ if mod2:
+ # to handle flows greater than 65535 we will need more mods for
+ # layer 2 and 3
+ for mod in [mod_class.mod_src_mac, mod_class.mod_dst_mac,
+ mod_class.mod_src_ip, mod_class.mod_dst_ip]:
+ if mod:
+ countertotal += 1
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_COUNT + ' [{}]'.format(self._stream_id) +
+ ' {}'.format(countertotal), self._xena_port)
+ responses = list()
+ responses.append(self._manager.driver.ask_verify(command))
+ modcounter = 0
+ for offset in offset_list:
+ if (mod_class.mod_dst_port or mod_class.mod_src_port) and \
+ (offset >= 38 if 'VLAN' in self._header_protocol else 34):
+ # only do a 1 mod for udp ports at max 65535
+ newmod1 = 65535 if flows >= 65535 else flows
+ command = make_port_command(
+ CMD_STREAM_MODIFIER + ' [{},{}] {} 0xFFFF0000 INC 1'.format(
+ self._stream_id, modcounter, offset), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_RANGE + ' [{},{}] 0 1 {}'.format(
+ self._stream_id, modcounter, newmod1 - 1), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ else:
+ command = make_port_command(
+ CMD_STREAM_MODIFIER + ' [{},{}] {} 0xFFFF0000 INC 1'.format(
+ self._stream_id, modcounter, offset), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_RANGE + ' [{},{}] 0 1 {}'.format(
+ self._stream_id, modcounter, mod1 - 1), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ # if we have a second modifier set the modifier to mod2 and to
+ # incremement once every full rotation of mod 1
+ if mod2:
+ modcounter += 1
+ command = make_port_command(
+ CMD_STREAM_MODIFIER + ' [{},{}] {} 0xFFFF0000 INC {}'.format(
+ self._stream_id, modcounter, offset-2, mod1), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ command = make_port_command(
+ CMD_STREAM_MODIFIER_RANGE + ' [{},{}] 0 1 {}'.format(
+ self._stream_id, modcounter, mod2), self._xena_port)
+ responses.append(self._manager.driver.ask_verify(command))
+ modcounter += 1
+ return all(responses) # return True if they all worked
def get_stream_data(self):
"""
@@ -904,7 +965,7 @@ class XenaRXStats(object):
statdict[entry_id] = self._pack_stats(param, 3)
elif param[1] == 'PR_TPLDS':
tid_list = self._pack_tplds_stats(param, 2)
- if len(tid_list):
+ if tid_list:
statdict['pr_tplds'] = tid_list
elif param[1] == 'PR_TPLDTRAFFIC':
if 'pr_tpldstraffic' in statdict:
diff --git a/tools/pkt_gen/xena/json/xena_json.py b/tools/pkt_gen/xena/json/xena_json.py
index b1eed720..e56b4125 100644
--- a/tools/pkt_gen/xena/json/xena_json.py
+++ b/tools/pkt_gen/xena/json/xena_json.py
@@ -26,10 +26,9 @@ Xena JSON module
from collections import OrderedDict
import locale
import logging
+import math
import os
-import scapy.layers.inet as inet
-
from tools.pkt_gen.xena.json import json_utilities
_LOGGER = logging.getLogger(__name__)
@@ -73,30 +72,87 @@ class XenaJSON(object):
3: ('Dest IP Addr', 'Src IP Addr'),
4: ('Dest Port', 'Src Port')
}
- segments = [
- {
- "Offset": 0,
- "Mask": "//8=", # mask of 255/255
- "Action": "INC",
- "StartValue": 0,
- "StopValue": stop_value,
- "StepValue": 1,
- "RepeatCount": 1,
- "SegmentId": seg_uuid,
- "FieldName": field_name[int(layer)][0]
- },
- {
- "Offset": 0,
- "Mask": "//8=", # mask of 255/255
- "Action": "INC",
- "StartValue": 0,
- "StopValue": stop_value,
- "StepValue": 1,
- "RepeatCount": 1,
- "SegmentId": seg_uuid,
- "FieldName": field_name[int(layer)][1]
- }
- ]
+
+ if stop_value > 4294836225:
+ _LOGGER.debug('Flow mods exceeds highest value, changing to 4294836225')
+ stop_value = 4294836225
+
+ if stop_value <= 65535 or layer == 4:
+ segments = [
+ {
+ "Offset": 0 if layer == 4 else 2,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][0]
+ },
+ {
+ "Offset": 0 if layer == 4 else 2,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][1]
+ }
+ ]
+ else:
+ stop_value = int(math.sqrt(stop_value))
+ _LOGGER.debug('Flow count modified to %s', stop_value * stop_value)
+ segments = [
+ {
+ "Offset": 0 if layer == 3 else 1,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": stop_value,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][0]
+ },
+ {
+ "Offset": 2 if layer == 3 else 3,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][0]
+ },
+ {
+ "Offset": 0 if layer == 3 else 1,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": stop_value,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][1]
+ },
+ {
+ "Offset": 2 if layer == 3 else 3,
+ "Mask": "//8=", # mask of 255/255
+ "Action": "INC",
+ "StartValue": 0,
+ "StopValue": stop_value - 1,
+ "StepValue": 1,
+ "RepeatCount": 1,
+ "SegmentId": seg_uuid,
+ "FieldName": field_name[int(layer)][1]
+ }
+ ]
+
+
self.json_data['StreamProfileHandler']['EntityList'][entity][
'StreamConfig']['HwModifiers'] = (segments)
@@ -279,6 +335,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage.
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer2'] = [
inet.Ether(dst=dst_mac, src=src_mac, **kwargs),
inet.Ether(dst=src_mac, src=dst_mac, **kwargs)]
@@ -293,6 +353,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer3'] = [
inet.IP(src=src_ip, dst=dst_ip, proto=protocol.lower(), **kwargs),
inet.IP(src=dst_ip, dst=src_ip, proto=protocol.lower(), **kwargs)]
@@ -305,6 +369,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['layer4'] = [
inet.UDP(sport=source_port, dport=destination_port, **kwargs),
inet.UDP(sport=source_port, dport=destination_port, **kwargs)]
@@ -316,6 +384,10 @@ class XenaJSON(object):
:param kwargs: Extra params per scapy usage
:return: None
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
self.packet_data['vlan'] = [
inet.Dot1Q(vlan=vlan_id, **kwargs),
inet.Dot1Q(vlan=vlan_id, **kwargs)]
diff --git a/tools/pkt_gen/xena/xena.py b/tools/pkt_gen/xena/xena.py
index 19b44f0b..3adc8294 100755
--- a/tools/pkt_gen/xena/xena.py
+++ b/tools/pkt_gen/xena/xena.py
@@ -32,8 +32,6 @@ import xml.etree.ElementTree as ET
from collections import OrderedDict
from time import sleep
-import scapy.layers.inet as inet
-
from conf import merge_spec
from conf import settings
from core.results.results_constants import ResultsConstants
@@ -41,6 +39,7 @@ from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator
from tools.pkt_gen.xena.XenaDriver import (
aggregate_stats,
line_percentage,
+ ModSet,
XenaSocketDriver,
XenaManager,
)
@@ -149,6 +148,10 @@ class Xena(ITrafficGenerator):
:param reverse: Swap source and destination info when building header
:return: packet header in hex
"""
+ # import can't be performed at module level, because it conflicts with import
+ # of customized scapy version by T-Rex
+ import scapy.layers.inet as inet
+
srcmac = self._params['traffic']['l2'][
'srcmac'] if not reverse else self._params['traffic']['l2'][
'dstmac']
@@ -274,10 +277,6 @@ class Xena(ITrafficGenerator):
enable the pairs topology
:return: None
"""
- # set duplex mode, this code is valid, pylint complaining with a
- # warning that many have complained about online.
- # pylint: disable=redefined-variable-type
-
try:
if self._params['traffic']['bidir'] == "True":
j_file = XenaJSONMesh()
@@ -285,6 +284,9 @@ class Xena(ITrafficGenerator):
j_file = XenaJSONBlocks()
elif bonding_test:
j_file = XenaJSONPairs()
+ else: # just default to mesh config
+ self._logger.error('Invalid traffic type defaulting to Mesh config')
+ j_file = XenaJSONMesh()
j_file.set_chassis_info(
settings.getValue('TRAFFICGEN_XENA_IP'),
@@ -348,7 +350,7 @@ class Xena(ITrafficGenerator):
id=self._params['traffic']['vlan']['cfi'],
prio=self._params['traffic']['vlan']['priority'])
j_file.add_header_segments(
- flows=self._params['traffic']['multistream'],
+ flows=self._params['traffic']['multistream'] - 1,
multistream_layer=self._params['traffic']['stream_type'])
j_file.write_config(os.path.join(
@@ -456,9 +458,17 @@ class Xena(ITrafficGenerator):
port.micro_tpld_enable()
if self._params['traffic']['multistream']:
+ if self._params['traffic']['stream_type'] == 'L2':
+ modobj = ModSet(mod_src_mac=True, mod_dst_mac=True)
+ elif self._params['traffic']['stream_type'] == 'L3':
+ modobj = ModSet(mod_src_ip=True, mod_dst_ip=True)
+ elif self._params['traffic']['stream_type'] == 'L4':
+ modobj = ModSet(mod_src_port=True, mod_dst_port=True)
+ else:
+ self._logger.error('Invalid segment for multistream. Using L2..')
+ modobj = ModSet(mod_src_mac=True, mod_dst_mac=True)
stream.enable_multistream(
- flows=self._params['traffic']['multistream'],
- layer=self._params['traffic']['stream_type'])
+ flows=self._params['traffic']['multistream'], mod_class=modobj)
s1_p0 = self.xmanager.ports[0].add_stream()
setup_stream(s1_p0, self.xmanager.ports[0], 0)
@@ -568,7 +578,7 @@ class Xena(ITrafficGenerator):
self._xsocket.disconnect()
self._xsocket = None
- def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ def send_burst_traffic(self, traffic=None, duration=20):
"""Send a burst of traffic.
See ITrafficGenerator for description
@@ -579,7 +589,7 @@ class Xena(ITrafficGenerator):
if traffic:
self._params['traffic'] = merge_spec(self._params['traffic'],
traffic)
- self._start_traffic_api(numpkts)
+ self._start_traffic_api(traffic['burst_size'])
return self._stop_api_traffic()
def send_cont_traffic(self, traffic=None, duration=20):
diff --git a/tools/report/report.py b/tools/report/report.py
index b3f15c1b..5d05e7ad 100644
--- a/tools/report/report.py
+++ b/tools/report/report.py
@@ -137,7 +137,6 @@ def generate(testcase):
'tests': tests,
}
i = 0
- # pylint: disable=no-member
for output_file in output_files:
template = template_env.get_template(_TEMPLATE_FILES[i])
output_text = template.render(template_vars)
diff --git a/tools/report/report_foot.rst b/tools/report/report_foot.rst
index 5045e186..a49e3452 100644
--- a/tools/report/report_foot.rst
+++ b/tools/report/report_foot.rst
@@ -5,6 +5,7 @@
Rationale for decisions
=======================
+
The tests conducted do not have pass/fail/conditional-pass criteria. The test
is simply conducted and the results are reported.
@@ -12,6 +13,7 @@ is simply conducted and the results are reported.
Conclusions and recommendations
===============================
+
The test results are stable. The vsperf CI jobs that were used to obtain the
results can be found at https://wiki.opnfv.org/wiki/vsperf_results.
@@ -20,11 +22,13 @@ General
Glossary
--------
+
- NFV - Network Function Virtualization
- Mbps - 1,000,000bps
Document change procedures and history
--------------------------------------
+
=============================================== ================= =============
Document ID Author Date Modified
=============================================== ================= =============
diff --git a/tools/report/report_rst.jinja b/tools/report/report_rst.jinja
index eda0c01e..6b51807a 100644
--- a/tools/report/report_rst.jinja
+++ b/tools/report/report_rst.jinja
@@ -90,7 +90,9 @@ Testing Activities/Events
~~~~~~~~~~~~~~~~~~~~~~~~~
pidstat is used to collect the process statistics, as such some values such as
%CPU and %USER maybe > 100% as the values are summed across multiple cores. For
-more info on pidstat please see: http://linux.die.net/man/1/pidstat.
+more info on pidstat please see: http://linux.die.net/man/1/pidstat. Please
+note that vsperf recalculates the CPU consumption of a process by aggregating
+the CPU usage of each thread.
Known issues: Some reported metrics have the value "unkown". These values are
marked unknown as they are not values retrieved from the external tester
diff --git a/tools/systeminfo.py b/tools/systeminfo.py
index f34bcce6..6020d0e2 100644
--- a/tools/systeminfo.py
+++ b/tools/systeminfo.py
@@ -191,7 +191,7 @@ def get_bin_version(binary, regex):
return None
versions = re.findall(regex, output)
- if len(versions):
+ if versions:
return versions[0]
else:
return None
@@ -297,7 +297,7 @@ def get_version(app_name):
if not '16' in release:
tmp_ver[2] += line.rstrip('\n').split(' ')[2]
- if len(tmp_ver[0]):
+ if tmp_ver[0]:
app_version = '.'.join(tmp_ver)
app_git_tag = get_git_tag(S.getValue('TOOLS')['dpdk_src'])
elif app_name.lower().startswith('qemu'):
diff --git a/tools/tasks.py b/tools/tasks.py
index 18f4d712..4e03f85e 100644
--- a/tools/tasks.py
+++ b/tools/tasks.py
@@ -117,11 +117,8 @@ def run_task(cmd, logger, msg=None, check_error=False):
def update_pids(pid):
"""update list of running pids, so they can be terminated at the end
"""
- try:
- pids = settings.getValue('_EXECUTED_PIDS')
- pids.append(pid)
- except AttributeError:
- pids = [pid]
+ pids = settings.getValue('_EXECUTED_PIDS')
+ pids.append(pid)
settings.setValue('_EXECUTED_PIDS', pids)
def run_background_task(cmd, logger, msg):
diff --git a/tools/teststepstools.py b/tools/teststepstools.py
index 33db8f79..db2d53e6 100644
--- a/tools/teststepstools.py
+++ b/tools/teststepstools.py
@@ -43,7 +43,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Assert(result, dummy_condition):
+ def validate_Assert(result, _dummy_condition):
""" Validate evaluation of given `condition'
"""
return result
@@ -56,7 +56,7 @@ class TestStepsTools(object):
return eval(expression)
@staticmethod
- def validate_Eval(result, dummy_expression):
+ def validate_Eval(result, _dummy_expression):
""" Validate result of python `expression' evaluation
"""
return result is not None
@@ -76,7 +76,7 @@ class TestStepsTools(object):
return True
@staticmethod
- def validate_Exec_Python(result, dummy_code):
+ def validate_Exec_Python(result, _dummy_code):
""" Validate result of python `code' execution
"""
return result
@@ -99,7 +99,7 @@ class TestStepsTools(object):
return output
@staticmethod
- def validate_Exec_Shell(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution
"""
return result is not None
@@ -115,7 +115,7 @@ class TestStepsTools(object):
return None
@staticmethod
- def validate_Exec_Shell_Background(result, dummy_command, dummy_regex=None):
+ def validate_Exec_Shell_Background(result, _dummy_command, _dummy_regex=None):
""" validate result of shell `command' execution on the background
"""
return result is not None
diff --git a/tools/veth.py b/tools/veth.py
index 6418d11a..6d7c9962 100644
--- a/tools/veth.py
+++ b/tools/veth.py
@@ -84,8 +84,7 @@ def del_veth_port(port, peer_port):
port, peer_port), False)
-# pylint: disable=unused-argument
-def validate_add_veth_port(result, port, peer_port):
+def validate_add_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""
@@ -93,7 +92,7 @@ def validate_add_veth_port(result, port, peer_port):
return all([port in devs, peer_port in devs])
-def validate_bring_up_eth_port(result, eth_port, namespace=None):
+def validate_bring_up_eth_port(_result, eth_port, namespace=None):
"""
Validation function for integration testcases
"""
@@ -110,7 +109,7 @@ def validate_bring_up_eth_port(result, eth_port, namespace=None):
return True
-def validate_del_veth_port(result, port, peer_port):
+def validate_del_veth_port(_result, port, peer_port):
"""
Validation function for integration testcases
"""
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..69aa1893
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,17 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
diff --git a/vnfs/__init__.py b/vnfs/__init__.py
index 34cacf4f..1743faf8 100644
--- a/vnfs/__init__.py
+++ b/vnfs/__init__.py
@@ -17,4 +17,3 @@
This package contains an interface the VSPERF core uses for controlling
VNFs and VNF-specific implementation modules of this interface.
"""
-
diff --git a/vnfs/qemu/__init__.py b/vnfs/qemu/__init__.py
index 82f32eb9..6ed326dd 100644
--- a/vnfs/qemu/__init__.py
+++ b/vnfs/qemu/__init__.py
@@ -17,4 +17,3 @@
This package contains an implementation of the interface the VSPERF core
uses for controlling VNFs using QEMU and DPDK's testpmd application.
"""
-
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index 8e3d44de..fb87ed27 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -46,12 +46,14 @@ class IVnfQemu(IVnf):
Initialisation function.
"""
super(IVnfQemu, self).__init__()
-
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_QEMU'))
+ name = name + str(self._number)
+ rename_qemu = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue('LOG_TIMESTAMP'),
+ ex=ext)
self._expect = S.getValue('GUEST_PROMPT_LOGIN')[self._number]
self._logger = logging.getLogger(__name__)
- self._logfile = os.path.join(
- S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_QEMU')) + str(self._number)
+ self._logfile = os.path.join(S.getValue('RESULTS_PATH'), rename_qemu)
self._timeout = S.getValue('GUEST_TIMEOUT')[self._number]
self._monitor = '%s/vm%dmonitor' % ('/tmp', self._number)
# read GUEST NICs configuration and use only defined NR of NICS
@@ -115,10 +117,13 @@ class IVnfQemu(IVnf):
self.GuestCommandFilter.prefix = self._log_prefix
logger = logging.getLogger()
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_GUEST_CMDS'))
+ name = name + str(self._number)
+ rename_gcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue('LOG_TIMESTAMP'),
+ ex=ext)
cmd_logger = logging.FileHandler(
- filename=os.path.join(S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_GUEST_CMDS')) +
- str(self._number))
+ filename=os.path.join(S.getValue('RESULTS_PATH'), rename_gcmd))
cmd_logger.setLevel(logging.DEBUG)
cmd_logger.addFilter(self.GuestCommandFilter())
logger.addHandler(cmd_logger)
@@ -210,7 +215,7 @@ class IVnfQemu(IVnf):
:returns: None
"""
- thread_id = (r'.* CPU #%d: .* thread_id=(\d+)')
+ thread_id = (r'.* CPU #%d:.*? thread_id=(\d+)')
self._logger.info('Affinitizing guest...')
@@ -222,7 +227,13 @@ class IVnfQemu(IVnf):
stdin=proc.stdout)
proc.wait()
- for cpu in range(0, int(S.getValue('GUEST_SMP')[self._number])):
+ # calculate the number of CPUs in SMP topology specified by GUEST_SMP
+ # e.g. "sockets=2,cores=3", "4", etc.
+ cpu_nr = 1
+ for i in re.findall(r'\d', S.getValue('GUEST_SMP')[self._number]):
+ cpu_nr = cpu_nr * int(i)
+ # pin each GUEST's core to host core based on configured BINDING
+ for cpu in range(0, cpu_nr):
match = None
guest_thread_binding = S.getValue('GUEST_THREAD_BINDING')[self._number]
if guest_thread_binding is None:
@@ -280,7 +291,7 @@ class IVnfQemu(IVnf):
elif self._guest_loopback == 'linux_bridge':
self._configure_linux_bridge()
elif self._guest_loopback != 'clean':
- raise RuntimeError('Unsupported guest loopback method "%s" was specified.',
+ raise RuntimeError('Unsupported guest loopback method "%s" was specified.' %
self._guest_loopback)
def wait(self, prompt=None, timeout=30):
@@ -387,6 +398,8 @@ class IVnfQemu(IVnf):
self.execute_and_wait('./testpmd {}'.format(testpmd_params), 60, "Done")
self.execute_and_wait('set fwd ' + self._testpmd_fwd_mode, 20, 'testpmd>')
+ for entry in S.getValue('GUEST_QUEUE_STATS_MAPPING'):
+ self.execute_and_wait('set stat_qmap ' + entry, 2, 'testpmd>')
self.execute_and_wait('start', 20, 'testpmd>')
def _configure_l2fwd(self):
@@ -492,11 +505,16 @@ class IVnfQemu(IVnf):
pci_slots)
elif driver == 'igb_uio_from_src':
# build and insert igb_uio and rebind interfaces to it
- self.execute_and_wait('make RTE_OUTPUT=$RTE_SDK/$RTE_TARGET -C '
- '$RTE_SDK/lib/librte_eal/linuxapp/igb_uio')
+ # from DPDK 18.05 Linux kernel driver changed location
+ # also it is not possible to compile driver without
+ # passing EXTRA_CFLAGS
+ self.execute_and_wait("make RTE_OUTPUT=$RTE_SDK/{0} \
+ EXTRA_CFLAGS=\"-I$RTE_SDK/{1}/include\" \
+ -C $RTE_SDK/kernel/linux/igb_uio"\
+ .format(S.getValue('RTE_TARGET'), S.getValue('RTE_TARGET')))
self.execute_and_wait('modprobe uio')
- self.execute_and_wait('insmod %s/kmod/igb_uio.ko' %
- S.getValue('RTE_TARGET'))
+ self.execute_and_wait('insmod {}/kmod/igb_uio.ko'\
+ .format(S.getValue('RTE_TARGET')))
self.execute_and_wait('./*tools/dpdk*bind.py -b igb_uio ' + pci_slots)
else:
self._logger.error(
diff --git a/vnfs/vnf/__init__.py b/vnfs/vnf/__init__.py
index b7c43217..6a7a1547 100644
--- a/vnfs/vnf/__init__.py
+++ b/vnfs/vnf/__init__.py
@@ -15,4 +15,4 @@
"""VNF interface and helpers.
"""
-from vnfs import *
+import vnfs
diff --git a/vnfs/vnf/vnf.py b/vnfs/vnf/vnf.py
index 5ac2ada3..3ad1dcda 100644
--- a/vnfs/vnf/vnf.py
+++ b/vnfs/vnf/vnf.py
@@ -138,7 +138,7 @@ class IVnf(tasks.Process):
self.execute(cmd)
return self.wait(prompt=prompt, timeout=timeout)
- def validate_start(self, dummyresult):
+ def validate_start(self, _dummyresult):
""" Validate call of VNF start()
"""
if self._child and self._child.isalive():
@@ -152,7 +152,7 @@ class IVnf(tasks.Process):
return not self.validate_start(result)
@staticmethod
- def validate_execute_and_wait(result, dummy_cmd, dummy_timeout=30, dummy_prompt=''):
+ def validate_execute_and_wait(result, _dummy_cmd, _dummy_timeout=30, _dummy_prompt=''):
""" Validate command execution within VNF
"""
return len(result) > 0
diff --git a/vsperf b/vsperf
index 5589ac39..773ad759 100755
--- a/vsperf
+++ b/vsperf
@@ -23,6 +23,7 @@ import sys
import argparse
import re
import time
+import csv
import datetime
import shutil
import unittest
@@ -32,17 +33,20 @@ import glob
import subprocess
import ast
import xmlrunner
+from tabulate import tabulate
+from conf import merge_spec
from conf import settings
import core.component_factory as component_factory
from core.loader import Loader
from testcases import PerformanceTestCase
from testcases import IntegrationTestCase
+from testcases import K8sPerformanceTestCase
from tools import tasks
from tools import networkcard
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
-
+from tools.os_deploy_tgen import osdt
sys.dont_write_bytecode = True
VERBOSITY_LEVELS = {
@@ -61,8 +65,43 @@ _TEMPLATE_RST = {'head' : os.path.join(_CURR_DIR, 'tools/report/report_head.rst
'tmp' : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
}
+_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
+ "The following performance matrix was generated with the results of all the\n"\
+ "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
_LOGGER = logging.getLogger()
+logging.getLogger('matplotlib').setLevel(logging.ERROR)
+
+def parse_param_string(values):
+ """
+ Parse and split a single '--test-params' argument.
+
+ This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
+ values. For multiple overrides use a ; separated list for
+ e.g. --test-params 'x=z; y=(a,b)'
+ """
+ results = {}
+
+ if values == '':
+ return {}
+
+ for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
+ param = param.strip()
+ value = value.strip()
+ if param:
+ if value:
+ # values are passed inside string from CLI, so we must retype them accordingly
+ try:
+ results[param] = ast.literal_eval(value)
+ except ValueError:
+ # for backward compatibility, we have to accept strings without quotes
+ _LOGGER.warning("Adding missing quotes around string value: %s = %s",
+ param, str(value))
+ results[param] = str(value)
+ else:
+ results[param] = True
+ return results
+
def parse_arguments():
"""
@@ -70,31 +109,24 @@ def parse_arguments():
"""
class _SplitTestParamsAction(argparse.Action):
"""
- Parse and split the '--test-params' argument.
+ Parse and split '--test-params' arguments.
- This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
- values. For multiple overrides use a ; separated list for
+ This expects either a single list of ; separated overrides
+ as 'x=y', 'x=y,z' or 'x' (implicit true) values.
e.g. --test-params 'x=z; y=(a,b)'
+ Or a list of these ; separated lists with overrides for
+ multiple tests.
+ e.g. --test-params "['x=z; y=(a,b)','x=z']"
"""
def __call__(self, parser, namespace, values, option_string=None):
- results = {}
-
- for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
- param = param.strip()
- value = value.strip()
- if len(param):
- if len(value):
- # values are passed inside string from CLI, so we must retype them accordingly
- try:
- results[param] = ast.literal_eval(value)
- except ValueError:
- # for backward compatibility, we have to accept strings without quotes
- _LOGGER.warning("Adding missing quotes around string value: %s = %s",
- param, str(value))
- results[param] = str(value)
- else:
- results[param] = True
-
+ if values[0] == '[':
+ input_list = ast.literal_eval(values)
+ parameter_list = []
+ for test_params in input_list:
+ parameter_list.append(parse_param_string(test_params))
+ else:
+ parameter_list = parse_param_string(values)
+ results = {'_PARAMS_LIST':parameter_list}
setattr(namespace, self.dest, results)
class _ValidateFileAction(argparse.Action):
@@ -126,7 +158,7 @@ def parse_arguments():
def list_logging_levels():
"""Give a summary of all available logging levels.
- :return: List of verbosity level names in decreasing order of
+ :return: List of verbosity level names in decreasing order of
verbosity
"""
return sorted(VERBOSITY_LEVELS.keys(),
@@ -149,6 +181,8 @@ def parse_arguments():
help='list all system vnfs and exit')
parser.add_argument('--list-loadgens', action='store_true',
help='list all background load generators')
+ parser.add_argument('--list-pods', action='store_true',
+ help='list all system pods')
parser.add_argument('--list-settings', action='store_true',
help='list effective settings configuration and exit')
parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -172,6 +206,8 @@ def parse_arguments():
group.add_argument('--verbosity', choices=list_logging_levels(),
help='debug level')
group.add_argument('--integration', action='store_true', help='execute integration tests')
+ group.add_argument('--k8s', action='store_true', help='execute Kubernetes tests')
+ group.add_argument('--openstack', action='store_true', help='Run VSPERF with openstack')
group.add_argument('--trafficgen', help='traffic generator to use')
group.add_argument('--vswitch', help='vswitch implementation to use')
group.add_argument('--fwdapp', help='packet forwarding application to use')
@@ -189,9 +225,14 @@ def parse_arguments():
help='settings file')
group.add_argument('--test-params', action=_SplitTestParamsAction,
help='csv list of test parameters: key=val; e.g. '
- 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFICGEN_DURATION=30; '
- 'GUEST_LOOPBACK=["l2fwd"] ...')
+ 'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
+ 'GUEST_LOOPBACK=["l2fwd"] ...'
+ ' or a list of csv lists of test parameters: key=val; e.g. '
+ '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
+ '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
group.add_argument('--opnfvpod', help='name of POD in opnfv')
+ group.add_argument('--matrix', help='enable performance matrix analysis',
+ action='store_true', default=False)
args = vars(parser.parse_args())
@@ -201,13 +242,31 @@ def parse_arguments():
def configure_logging(level):
"""Configure logging.
"""
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
+ rename_default = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_default = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_DEFAULT'))
+ settings.getValue('RESULTS_PATH'), rename_default)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+ rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_host_cmds = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+ settings.getValue('RESULTS_PATH'), rename_hostcmd)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ rename_traffic = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_traffic_gen = os.path.join(
- settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ settings.getValue('RESULTS_PATH'), rename_traffic)
+ metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
+ settings.getValue('LOG_TIMESTAMP') + '.log')
+ log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
+ metrics_file)
_LOGGER.setLevel(logging.DEBUG)
@@ -219,6 +278,8 @@ def configure_logging(level):
file_logger = logging.FileHandler(filename=log_file_default)
file_logger.setLevel(logging.DEBUG)
+ file_logger.setFormatter(logging.Formatter(
+ '%(asctime)s : %(message)s'))
_LOGGER.addHandler(file_logger)
class CommandFilter(logging.Filter):
@@ -231,6 +292,11 @@ def configure_logging(level):
def filter(self, record):
return record.getMessage().startswith(trafficgen.CMD_PREFIX)
+ class CollectdMetricsFilter(logging.Filter):
+ """Filter out strings beginning with 'COLLECTD' :'"""
+ def filter(self, record):
+ return record.getMessage().startswith('COLLECTD')
+
cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
cmd_logger.setLevel(logging.DEBUG)
cmd_logger.addFilter(CommandFilter())
@@ -241,6 +307,12 @@ def configure_logging(level):
gen_logger.addFilter(TrafficGenCommandFilter())
_LOGGER.addHandler(gen_logger)
+ if settings.getValue('COLLECTOR') == 'Collectd':
+ met_logger = logging.FileHandler(filename=log_file_infra_metrics)
+ met_logger.setLevel(logging.DEBUG)
+ met_logger.addFilter(CollectdMetricsFilter())
+ _LOGGER.addHandler(met_logger)
+
def apply_filter(tests, tc_filter):
"""Allow a subset of tests to be conveniently selected
@@ -292,7 +364,7 @@ def get_vswitch_names(rst_files):
""" Function will return a list of vSwitches detected in given ``rst_files``.
"""
vswitch_names = set()
- if len(rst_files):
+ if rst_files:
try:
output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
for line in output:
@@ -300,7 +372,7 @@ def get_vswitch_names(rst_files):
if match:
vswitch_names.add(match.group(1))
- if len(vswitch_names):
+ if vswitch_names:
return list(vswitch_names)
except subprocess.CalledProcessError:
@@ -331,7 +403,7 @@ def generate_final_report():
# check if there are any results in rst format
rst_results = glob.glob(os.path.join(path, 'result*rst'))
pkt_processors = get_vswitch_names(rst_results)
- if len(rst_results):
+ if rst_results:
try:
test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
# create report caption directly - it is not worth to execute jinja machinery
@@ -359,6 +431,69 @@ def generate_final_report():
_LOGGER.error('Generatrion of overall test report has failed.')
+def generate_performance_matrix(selected_tests, results_path):
+ """
+ Loads the results of all the currently run tests, compares them
+ based on the MATRIX_METRIC, outputs and saves the generated table.
+ :selected_tests: list of currently run test
+ :results_path: directory path to the results of current tests
+ """
+ _LOGGER.info('Performance Matrix:')
+ test_list = []
+
+ for test in selected_tests:
+ test_name = test.get('Name', '<Name not set>')
+ test_deployment = test.get('Deployment', '<Deployment not set>')
+ test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})
+
+ test_params = {}
+ output = []
+ all_params = settings.getValue('_PARAMS_LIST')
+ for i in range(len(selected_tests)):
+ test = test_list[i]
+ if isinstance(all_params, list):
+ list_index = i
+ if i >= len(all_params):
+ list_index = len(all_params) - 1
+ if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
+ test_params.update(all_params[list_index])
+ else:
+ test_params = all_params[list_index]
+ else:
+ test_params = all_params
+ settings.setValue('TEST_PARAMS', test_params)
+ test['test_params'] = copy.deepcopy(test_params)
+ try:
+ with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
+ test['test_name'], test['test_deployment'])) as csvfile:
+ reader = list(csv.DictReader(csvfile))
+ test['csv_data'] = reader[0]
+ # pylint: disable=broad-except
+ except (Exception) as ex:
+ _LOGGER.error("Result file not found: %s", ex)
+
+ metric = settings.getValue('MATRIX_METRIC')
+ change = {}
+ output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
+ "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
+ if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
+ _LOGGER.error("Incorrect format of test results")
+ return
+ for i, test in enumerate(test_list):
+ if test['csv_data']:
+ change[i] = float(test['csv_data'][metric])/\
+ (float(test_list[0]['csv_data'][metric]) / 100) - 100
+ output.append([i, test['test_name'], float(test['csv_data'][metric]),
+ change[i], str(test['test_params'])[1:-1]])
+ else:
+ change[i] = 0
+ output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
+ print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
+ with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
+ output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
+ tablefmt="rst", floatfmt="0.3f")))
+ _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)
+
def enable_sriov(nic_list):
""" Enable SRIOV for given enhanced PCI IDs
@@ -376,7 +511,7 @@ def enable_sriov(nic_list):
sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})
# sriov is required for some NICs
- if len(sriov_nic):
+ if sriov_nic:
for nic in sriov_nic:
# check if SRIOV is supported and enough virt interfaces are available
if not networkcard.is_sriov_supported(nic) \
@@ -449,6 +584,10 @@ def handle_list_options(args):
print(Loader().get_loadgens_printable())
sys.exit(0)
+ if args['list_pods']:
+ print(Loader().get_pods_printable())
+ sys.exit(0)
+
if args['list_settings']:
print(str(settings))
sys.exit(0)
@@ -466,6 +605,8 @@ def list_testcases(args):
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
@@ -483,9 +624,6 @@ def list_testcases(args):
print(' {:40} {}'.format('', description[i]))
-
-
-
def vsperf_finalize():
""" Clean up before exit
"""
@@ -495,7 +633,7 @@ def vsperf_finalize():
if os.path.exists(results_path):
files_list = os.listdir(results_path)
if files_list == []:
- _LOGGER.info("Removing empty result directory: " + results_path)
+ _LOGGER.info("Removing empty result directory: %s", results_path)
shutil.rmtree(results_path)
except AttributeError:
# skip it if parameter doesn't exist
@@ -547,9 +685,26 @@ def main():
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
- # Load non performance/integration tests
+ # define the timestamp to be used by logs and results
+ date = datetime.datetime.fromtimestamp(time.time())
+ timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+ settings.setValue('LOG_TIMESTAMP', timestamp)
+
+ # generate results directory name
+ # integration test use vswitchd log in test step assertions, ensure that
+ # correct value will be set before loading integration test configuration
+ results_dir = "results_" + timestamp
+ results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+ settings.setValue('RESULTS_PATH', results_path)
+ # create results directory
+ if not os.path.exists(results_path):
+ os.makedirs(results_path)
+
+ # load non performance/integration tests
if args['integration']:
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
+ if args['k8s']:
+ settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/kubernetes'))
# load command line parameters first in case there are settings files
# to be used
@@ -567,6 +722,19 @@ def main():
settings.setValue('mode', args['mode'])
+ if args['k8s']:
+ settings.setValue('K8S', True)
+ else:
+ settings.setValue('K8S', False)
+
+ if args['openstack']:
+ result = osdt.deploy_testvnf()
+ if result:
+ _LOGGER.info('TestVNF successfully deployed on Openstack')
+ settings.setValue('mode', 'trafficgen')
+ else:
+ _LOGGER.error('Failed to deploy TestVNF in Openstac')
+ sys.exit(1)
# update paths to trafficgens if required
if settings.getValue('mode') == 'trafficgen':
functions.settings_update_paths()
@@ -576,6 +744,9 @@ def main():
configure_logging(settings.getValue('VERBOSITY'))
+ # CI build support
+ _LOGGER.info("Creating result directory: %s", results_path)
+
# check and fix locale
check_and_set_locale()
@@ -655,24 +826,14 @@ def main():
# for backward compatibility
settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
- # generate results directory name
- date = datetime.datetime.fromtimestamp(time.time())
- results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
- results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
- settings.setValue('RESULTS_PATH', results_path)
-
- # create results directory
- if not os.path.exists(results_path):
- _LOGGER.info("Creating result directory: " + results_path)
- os.makedirs(results_path)
+ # pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
# execute only traffic generator
_LOGGER.debug("Executing traffic generator:")
loader = Loader()
# set traffic details, so they can be passed to traffic ctl
traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
-
traffic = functions.check_traffic(traffic)
traffic_ctl = component_factory.create_traffic(
@@ -690,13 +851,19 @@ def main():
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
if args['exact_test_name']:
exact_names = args['exact_test_name']
# positional args => exact matches only
- selected_tests = [test for test in testcases if test['Name'] in exact_names]
+ selected_tests = []
+ for test_name in exact_names:
+ for test in testcases:
+ if test['Name'] == test_name:
+ selected_tests.append(test)
elif args['tests']:
# --tests => apply filter to select requested tests
selected_tests = apply_filter(testcases, args['tests'])
@@ -704,44 +871,67 @@ def main():
# Default - run all tests
selected_tests = testcases
- if not len(selected_tests):
+ if not selected_tests:
_LOGGER.error("No tests matched --tests option or positional args. Done.")
vsperf_finalize()
sys.exit(1)
- # run tests
- # Add pylint exception: Redefinition of test type from
- # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
- # pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
settings_snapshot = copy.deepcopy(settings.__dict__)
- for cfg in selected_tests:
+
+ for i, cfg in enumerate(selected_tests):
+ settings.setValue('_TEST_INDEX', i)
test_name = cfg.get('Name', '<Name not set>')
try:
+ test_params = settings.getValue('_PARAMS_LIST')
+ if isinstance(test_params, list):
+ list_index = i
+ if i >= len(test_params):
+ list_index = len(test_params) - 1
+ test_params = test_params[list_index]
+ if settings.getValue('CUMULATIVE_PARAMS'):
+ test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
+ settings.setValue('TEST_PARAMS', test_params)
+
if args['integration']:
test = IntegrationTestCase(cfg)
+ elif args['k8s']:
+ test = K8sPerformanceTestCase(cfg)
else:
test = PerformanceTestCase(cfg)
+
test.run()
suite.addTest(MockTestCase('', True, test.name))
+
# pylint: disable=broad-except
except (Exception) as ex:
_LOGGER.exception("Failed to run test: %s", test_name)
suite.addTest(MockTestCase(str(ex), False, test_name))
_LOGGER.info("Continuing with next test...")
finally:
- settings.restore_from_dict(settings_snapshot)
+ if not settings.getValue('CUMULATIVE_PARAMS'):
+ settings.restore_from_dict(settings_snapshot)
+
+ settings.restore_from_dict(settings_snapshot)
+
+
+ # Generate and printout Performance Matrix
+ if args['matrix']:
+ generate_performance_matrix(selected_tests, results_path)
# generate final rst report with results of all executed TCs
generate_final_report()
+
+
if settings.getValue('XUNIT'):
xmlrunner.XMLTestRunner(
output=settings.getValue('XUNIT_DIR'), outsuffix="",
verbosity=0).run(suite)
- if args['opnfvpod']:
- pod_name = args['opnfvpod']
+ if args['opnfvpod'] or settings.getValue('OPNFVPOD'):
+ pod_name = (args['opnfvpod'] if args['opnfvpod'] else
+ settings.getValue('OPNFVPOD'))
installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
opnfv_url = settings.getValue('OPNFV_URL')
pkg_list = settings.getValue('PACKAGE_LIST')
diff --git a/vswitches/__init__.py b/vswitches/__init__.py
index a34475be..20a715e0 100644
--- a/vswitches/__init__.py
+++ b/vswitches/__init__.py
@@ -17,4 +17,3 @@
This package contains an interface the VSPERF core uses for controlling
vSwitches and vSwitch-specific implementation modules of this interface.
"""
-
diff --git a/vswitches/ovs.py b/vswitches/ovs.py
index 76cabb0d..853bef85 100644
--- a/vswitches/ovs.py
+++ b/vswitches/ovs.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,12 +15,13 @@
"""VSPERF Open vSwitch base class
"""
-import logging
import os
import re
import time
import datetime
import random
+import socket
+import netaddr
import pexpect
from conf import settings
@@ -28,6 +29,10 @@ from src.ovs import OFBridge, flow_key, flow_match
from vswitches.vswitch import IVSwitch
from tools import tasks
from tools.module_manager import ModuleManager
+
+# enable caching of flows if their number exceeds given limit
+_CACHE_FLOWS_LIMIT = 10
+
# pylint: disable=too-many-public-methods
class IVSwitchOvs(IVSwitch, tasks.Process):
"""Open vSwitch base class implementation
@@ -41,23 +46,35 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
def __init__(self):
"""See IVswitch for general description
"""
- self._logfile = os.path.join(settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_VSWITCHD'))
+ super().__init__()
+
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
+ rename_vswitchd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue('LOG_TIMESTAMP'),
+ ex=ext)
+ self._logfile = os.path.join(settings.getValue('RESULTS_PATH'), rename_vswitchd)
self._ovsdb_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
"ovsdb-server.pid")
self._vswitchd_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
"{}.pid".format(self._proc_name))
- self._logger = logging.getLogger(__name__)
# sign '|' must be escaped or avoided, otherwise it is handled as 'or' by regex
self._expect = r'bridge.INFO.{}'.format(self._proc_name)
- self._timeout = 30
- self._bridges = {}
self._vswitchd_args = ['--pidfile=' + self._vswitchd_pidfile_path,
'--overwrite-pidfile', '--log-file=' + self._logfile]
- self._cmd = []
self._cmd_template = ['sudo', '-E', settings.getValue('TOOLS')['ovs-vswitchd']]
- self._stamp = None
self._module_manager = ModuleManager()
+ self._flow_template = settings.getValue('OVS_FLOW_TEMPLATE').copy()
+ self._flow_actions = ['output:{}']
+
+ # if routing tables are enabled, then flows should go into table 1
+ # see design document for details about Routing Tables feature
+ if settings.getValue('OVS_ROUTING_TABLES'):
+ # flows should be added into table 1
+ self._flow_template.update({'table':'1', 'priority':'1'})
+ # and chosen port will be propagated via metadata
+ self._flow_actions = ['write_actions(output:{})',
+ 'write_metadata:{}',
+ 'goto_table:2']
def start(self):
""" Start ``ovsdb-server`` and ``ovs-vswitchd`` instance.
@@ -85,7 +102,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
tasks.Process.start(self)
self.relinquish()
except (pexpect.EOF, pexpect.TIMEOUT) as exc:
- logging.error("Exception during VSwitch start.")
+ self._logger.error("Exception during VSwitch start.")
self._kill_ovsdb()
raise exc
@@ -107,7 +124,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
tasks.Process.start(self)
self.relinquish()
except (pexpect.EOF, pexpect.TIMEOUT) as exc:
- logging.error("Exception during VSwitch start.")
+ self._logger.error("Exception during VSwitch start.")
self._kill_ovsdb()
raise exc
self._logger.info("Vswitchd...Started.")
@@ -128,26 +145,57 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
def stop(self):
"""See IVswitch for general description
"""
+ for switch_name in list(self._switches):
+ self.del_switch(switch_name)
self._logger.info("Terminating vswitchd...")
self.kill()
- self._bridges = {}
+ self._switches = {}
self._logger.info("Vswitchd...Terminated.")
def add_switch(self, switch_name, params=None):
"""See IVswitch for general description
"""
+ # create and configure new ovs bridge and delete all default flows
bridge = OFBridge(switch_name)
bridge.create(params)
+ bridge.del_flow({})
bridge.set_db_attribute('Open_vSwitch', '.',
'other_config:max-idle',
settings.getValue('VSWITCH_FLOW_TIMEOUT'))
- self._bridges[switch_name] = bridge
+ self._switches[switch_name] = bridge
+ if settings.getValue('OVS_ROUTING_TABLES'):
+ # table#0 - flows designed to force 5 & 13 tuple matches go here
+ flow = {'table':'0', 'priority':'1', 'actions': ['goto_table:1']}
+ bridge.add_flow(flow)
+
+ # table#1 - flows to route packets between ports goes here. The
+ # chosen port is communicated to subsequent tables by setting the
+ # metadata value to the egress port number
+ #
+ # A placeholder - flows are added into this table by deployments
+ # or by TestSteps via add_connection() method
+
+ # table#2 - frame modification table. Frame modification flow rules are
+ # isolated in this table so that they can be turned on or off
+ # without affecting the routing or tuple-matching flow rules.
+ flow = {'table':'2', 'priority':'1', 'actions': ['goto_table:3']}
+ bridge.add_flow(flow)
+
+ # table#3 - egress table
+ # (NOTE) Billy O'Mahony - the drop action here actually required in
+ # order to egress the packet. This is the subject of a thread on
+ # ovs-discuss 2015-06-30.
+ flow = {'table':'3', 'priority':'1', 'actions': ['drop']}
+ bridge.add_flow(flow)
def del_switch(self, switch_name):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
- self._bridges.pop(switch_name)
+ bridge = self._switches[switch_name]
+ bridge.del_flow({})
+ for port in list(bridge.get_ports()):
+ bridge.del_port(port)
+ self._switches.pop(switch_name)
bridge.destroy()
def add_phy_port(self, switch_name):
@@ -166,10 +214,10 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
if switch_name is None or remote_switch_name is None:
- return
+ return None
- bridge = self._bridges[switch_name]
- remote_bridge = self._bridges[remote_switch_name]
+ bridge = self._switches[switch_name]
+ remote_bridge = self._switches[remote_switch_name]
pcount = str(self._get_port_count('type=patch'))
# NOTE ::: What if interface name longer than allowed width??
local_port_name = switch_name + '-' + remote_switch_name + '-' + pcount
@@ -195,7 +243,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
def add_tunnel_port(self, switch_name, remote_ip, tunnel_type='vxlan', params=None):
"""Creates tunneling port
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
pcount = str(self._get_port_count('type=' + tunnel_type))
port_name = tunnel_type + pcount
local_params = ['--', 'set', 'Interface', port_name,
@@ -211,53 +259,123 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
def get_ports(self, switch_name):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
ports = list(bridge.get_ports().items())
return [(name, of_port) for (name, (of_port, _)) in ports]
def del_port(self, switch_name, port_name):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.del_port(port_name)
def add_flow(self, switch_name, flow, cache='off'):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.add_flow(flow, cache=cache)
def del_flow(self, switch_name, flow=None):
"""See IVswitch for general description
"""
flow = flow or {}
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.del_flow(flow)
def dump_flows(self, switch_name):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.dump_flows()
+ def _prepare_flows(self, operation, switch_name, port1, port2, traffic=None):
+ """Prepare flows for add_connection, del_connection and validate methods
+ It returns a list of flows based on given parameters.
+ """
+ flows = []
+ if operation == 'add':
+ bridge = self._switches[switch_name]
+ flow = self._flow_template.copy()
+ actions = [action.format(bridge.get_ports()[port2][0]) for action in self._flow_actions]
+ flow.update({'in_port': bridge.get_ports()[port1][0], 'actions': actions})
+ # check if stream specific connection(s) should be crated for multistream feature
+ if traffic and traffic['pre_installed_flows'].lower() == 'yes':
+ for stream in range(traffic['multistream']):
+ tmp_flow = flow.copy()
+ # update flow based on trafficgen settings
+ if traffic['stream_type'] == 'L2':
+ dst_mac_value = netaddr.EUI(traffic['l2']['dstmac']).value
+ tmp_mac = netaddr.EUI(dst_mac_value + stream)
+ tmp_mac.dialect = netaddr.mac_unix_expanded
+ tmp_flow.update({'dl_dst':tmp_mac})
+ elif traffic['stream_type'] == 'L3':
+ dst_ip_value = netaddr.IPAddress(traffic['l3']['dstip']).value
+ tmp_ip = netaddr.IPAddress(dst_ip_value + stream)
+ tmp_flow.update({'dl_type':'0x0800', 'nw_dst':tmp_ip})
+ elif traffic['stream_type'] == 'L4':
+ tmp_flow.update({'dl_type':'0x0800',
+ 'nw_proto':socket.getprotobyname(traffic['l3']['proto'].lower()),
+ 'tp_dst':(traffic['l4']['dstport'] + stream) % 65536})
+ flows.append(tmp_flow)
+ elif traffic and traffic['flow_type'].lower() == 'ip':
+ flow.update({'dl_type':'0x0800', 'nw_src':traffic['l3']['srcip'],
+ 'nw_dst':traffic['l3']['dstip']})
+ flows.append(flow)
+ else:
+ flows.append(flow)
+ elif operation == 'del' and port1:
+ bridge = self._switches[switch_name]
+ flows.append({'in_port': bridge.get_ports()[port1][0]})
+ else:
+ flows.append({})
+
+ return flows
+
+ def add_connection(self, switch_name, port1, port2, traffic=None):
+ """See IVswitch for general description
+ """
+ flows = self._prepare_flows('add', switch_name, port1, port2, traffic)
+
+ # enable flows caching for large number of flows
+ cache = 'on' if len(flows) > _CACHE_FLOWS_LIMIT else 'off'
+
+ for flow in flows:
+ self.add_flow(switch_name, flow, cache)
+
+ if cache == 'on':
+ self.add_flow(switch_name, [], cache='flush')
+
+ def del_connection(self, switch_name, port1=None, port2=None):
+ """See IVswitch for general description
+ """
+ flows = self._prepare_flows('del', switch_name, port1, port2)
+
+ for flow in flows:
+ self.del_flow(switch_name, flow)
+
+ def dump_connections(self, switch_name):
+ """See IVswitch for general description
+ """
+ self.dump_flows(switch_name)
+
def add_route(self, switch_name, network, destination):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.add_route(network, destination)
def set_tunnel_arp(self, ip_addr, mac_addr, switch_name):
"""See IVswitch for general description
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.set_tunnel_arp(ip_addr, mac_addr, switch_name)
def _get_port_count(self, param):
"""Returns the number of ports having a certain parameter
"""
cnt = 0
- for k in self._bridges:
- pparams = [c for (_, (_, c)) in list(self._bridges[k].get_ports().items())]
+ for k in self._switches:
+ pparams = [c for (_, (_, c)) in list(self._switches[k].get_ports().items())]
phits = [i for i in pparams if param in i]
cnt += len(phits)
@@ -271,7 +389,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
:param switch_name: bridge to disable stp
:return: None
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.set_stp(False)
self._logger.info('Sleeping for 50 secs to allow stp to stop.')
time.sleep(50) # needs time to disable
@@ -282,7 +400,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
:param switch_name: bridge to enable stp
:return: None
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.set_stp(True)
self._logger.info('Sleeping for 50 secs to allow stp to start.')
time.sleep(50) # needs time to enable
@@ -293,7 +411,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
:param switch_name: bridge to disable rstp
:return: None
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.set_rstp(False)
self._logger.info('Sleeping for 15 secs to allow rstp to stop.')
time.sleep(15) # needs time to disable
@@ -304,7 +422,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
:param switch_name: bridge to enable rstp
:return: None
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
bridge.set_rstp(True)
self._logger.info('Sleeping for 15 secs to allow rstp to start.')
time.sleep(15) # needs time to enable
@@ -382,7 +500,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
with open(self._ovsdb_pidfile_path, "r") as pidfile:
ovsdb_pid = pidfile.read().strip()
- self._logger.info("Killing ovsdb with pid: " + ovsdb_pid)
+ self._logger.info("Killing ovsdb with pid: %s", ovsdb_pid)
if ovsdb_pid:
tasks.terminate_task(ovsdb_pid, logger=self._logger)
@@ -409,10 +527,10 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
#
# validate methods required for integration testcases
#
- def validate_add_switch(self, dummy_result, switch_name, dummy_params=None):
+ def validate_add_switch(self, _dummy_result, switch_name, _dummy_params=None):
"""Validate - Create a new logical switch with no ports
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
output = bridge.run_vsctl(['show'], check_error=True)
assert not output[1] # there shouldn't be any stderr, but in case
assert re.search('Bridge ["\']?%s["\']?' % switch_name, output[0]) is not None
@@ -420,7 +538,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
# Method could be a function
# pylint: disable=no-self-use
- def validate_del_switch(self, dummy_result, switch_name):
+ def validate_del_switch(self, _dummy_result, switch_name):
"""Validate removal of switch
"""
bridge = OFBridge('tmp')
@@ -432,7 +550,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
def validate_add_phy_port(self, result, switch_name):
""" Validate that physical port was added to bridge.
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
output = bridge.run_vsctl(['show'], check_error=True)
assert not output[1] # there shouldn't be any stderr, but in case
assert re.search('Port ["\']?%s["\']?' % result[0], output[0]) is not None
@@ -444,16 +562,39 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""
return self.validate_add_phy_port(result, switch_name)
- def validate_del_port(self, dummy_result, switch_name, port_name):
+ def validate_del_port(self, _dummy_result, switch_name, port_name):
""" Validate that port_name was removed from bridge.
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
output = bridge.run_vsctl(['show'], check_error=True)
assert not output[1] # there shouldn't be any stderr, but in case
assert 'Port "%s"' % port_name not in output[0]
return True
- def validate_add_flow(self, dummy_result, switch_name, flow, dummy_cache='off'):
+ def validate_add_connection(self, result, switch_name, port1, port2, traffic=None):
+ """ Validate that connection was added
+ """
+ for flow in self._prepare_flows('add', switch_name, port1, port2, traffic):
+ if not self.validate_add_flow(result, switch_name, flow):
+ return False
+
+ return True
+
+ def validate_del_connection(self, result, switch_name, port1, port2):
+ """ Validate that connection was deleted
+ """
+ for flow in self._prepare_flows('del', switch_name, port1, port2):
+ if not self.validate_del_flow(result, switch_name, flow):
+ return False
+
+ return True
+
+ def validate_dump_connections(self, _dummy_result, _dummy_switch_name):
+ """ Validate dump connections call
+ """
+ return True
+
+ def validate_add_flow(self, _dummy_result, switch_name, flow, _dummy_cache='off'):
""" Validate insertion of the flow into the switch
"""
@@ -466,7 +607,7 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
# get dump of flows and compare them one by one
flow_src = flow_key(flow)
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
output = bridge.run_ofctl(['dump-flows', switch_name], check_error=True)
for flow_dump in output[0].split('\n'):
if flow_match(flow_dump, flow_src):
@@ -474,44 +615,44 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
return True
return False
- def validate_del_flow(self, dummy_result, switch_name, flow=None):
+ def validate_del_flow(self, _dummy_result, switch_name, flow=None):
""" Validate removal of the flow
"""
if not flow:
# what else we can do?
return True
- return not self.validate_add_flow(dummy_result, switch_name, flow)
+ return not self.validate_add_flow(_dummy_result, switch_name, flow)
- def validate_dump_flows(self, dummy_result, dummy_switch_name):
+ def validate_dump_flows(self, _dummy_result, _dummy_switch_name):
""" Validate call of flow dump
"""
return True
- def validate_disable_rstp(self, dummy_result, switch_name):
+ def validate_disable_rstp(self, _dummy_result, switch_name):
""" Validate rstp disable
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
return 'rstp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_rstp(self, dummy_result, switch_name):
+ def validate_enable_rstp(self, _dummy_result, switch_name):
""" Validate rstp enable
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
return 'rstp_enable : true' in ''.join(bridge.bridge_info())
- def validate_disable_stp(self, dummy_result, switch_name):
+ def validate_disable_stp(self, _dummy_result, switch_name):
""" Validate stp disable
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
return 'stp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_stp(self, dummy_result, switch_name):
+ def validate_enable_stp(self, _dummy_result, switch_name):
""" Validate stp enable
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
return 'stp_enable : true' in ''.join(bridge.bridge_info())
- def validate_restart(self, dummy_result):
+ def validate_restart(self, _dummy_result):
""" Validate restart
"""
return True
diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py
index 6deb0c25..8da043c6 100644
--- a/vswitches/ovs_dpdk_vhost.py
+++ b/vswitches/ovs_dpdk_vhost.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,6 @@
"""VSPERF VSwitch implementation using DPDK and vhost ports
"""
-import logging
import subprocess
from src.ovs import OFBridge
@@ -36,9 +35,7 @@ class OvsDpdkVhost(IVSwitchOvs):
"""
def __init__(self):
- super(OvsDpdkVhost, self).__init__()
- self._logger = logging.getLogger(__name__)
-
+ super().__init__()
vswitchd_args = []
# legacy DPDK configuration through --dpdk option of vswitchd
@@ -104,9 +101,9 @@ class OvsDpdkVhost(IVSwitchOvs):
if S.getValue('VSWITCH_AFFINITIZATION_ON') == 1:
# Sets the PMD core mask to VSWITCH_PMD_CPU_MASK
# for CPU core affinitization
- self._bridges[switch_name].set_db_attribute('Open_vSwitch', '.',
- 'other_config:pmd-cpu-mask',
- S.getValue('VSWITCH_PMD_CPU_MASK'))
+ self._switches[switch_name].set_db_attribute('Open_vSwitch', '.',
+ 'other_config:pmd-cpu-mask',
+ S.getValue('VSWITCH_PMD_CPU_MASK'))
def add_phy_port(self, switch_name):
"""See IVswitch for general description
@@ -115,7 +112,7 @@ class OvsDpdkVhost(IVSwitchOvs):
The new port is named dpdk<n> where n is an integer starting from 0.
"""
_nics = S.getValue('NICS')
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
dpdk_count = self._get_port_count('type=dpdk')
if dpdk_count == len(_nics):
raise RuntimeError("Can't add phy port! There are only {} ports defined "
@@ -144,7 +141,7 @@ class OvsDpdkVhost(IVSwitchOvs):
The new port is named dpdkvhost<n> where n is an integer starting
from 0
"""
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
if S.getValue('VSWITCH_VHOSTUSER_SERVER_MODE'):
nic_type = 'dpdkvhostuser'
@@ -177,18 +174,3 @@ class OvsDpdkVhost(IVSwitchOvs):
return True
except subprocess.CalledProcessError:
return False
-
- def add_connection(self, switch_name, port1, port2, bidir=False):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def del_connection(self, switch_name, port1, port2, bidir=False):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def dump_connections(self, switch_name):
- """See IVswitch for general description
- """
- raise NotImplementedError()
diff --git a/vswitches/ovs_vanilla.py b/vswitches/ovs_vanilla.py
index 83c52050..d23a0c61 100644
--- a/vswitches/ovs_vanilla.py
+++ b/vswitches/ovs_vanilla.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2017 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,6 @@
"""VSPERF Vanilla OVS implementation
"""
-import logging
import time
from conf import settings
from vswitches.ovs import IVSwitchOvs
@@ -36,9 +35,8 @@ class OvsVanilla(IVSwitchOvs):
_vport_id = 0
def __init__(self):
- super(OvsVanilla, self).__init__()
+ super().__init__()
self._ports = list(nic['device'] for nic in settings.getValue('NICS'))
- self._logger = logging.getLogger(__name__)
self._vswitchd_args += ["unix:%s" % self.get_db_sock_path()]
self._vswitchd_args += settings.getValue('VSWITCHD_VANILLA_ARGS')
@@ -81,7 +79,7 @@ class OvsVanilla(IVSwitchOvs):
self._logger.error("Can't detect device name for NIC %s", self._current_id)
raise ValueError("Invalid device name for %s" % self._current_id)
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
port_name = self._ports[self._current_id]
params = []
@@ -129,21 +127,6 @@ class OvsVanilla(IVSwitchOvs):
tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', tap_name, 'up'],
self._logger, 'Bring up ' + tap_name, False)
- bridge = self._bridges[switch_name]
+ bridge = self._switches[switch_name]
of_port = bridge.add_port(tap_name, [])
return (tap_name, of_port)
-
- def add_connection(self, switch_name, port1, port2, bidir=False):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def del_connection(self, switch_name, port1, port2, bidir=False):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def dump_connections(self, switch_name):
- """See IVswitch for general description
- """
- raise NotImplementedError()
diff --git a/vswitches/vpp_dpdk_vhost.py b/vswitches/vpp_dpdk_vhost.py
index 58d6bf51..f88ed95e 100644
--- a/vswitches/vpp_dpdk_vhost.py
+++ b/vswitches/vpp_dpdk_vhost.py
@@ -1,4 +1,4 @@
-# Copyright 2017 Intel Corporation.
+# Copyright 2017-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,7 +15,6 @@
"""VSPERF VPP implementation using DPDK and vhostuser vports
"""
-import logging
import os
import copy
import re
@@ -37,19 +36,17 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def __init__(self):
"""See IVswitch for general description
"""
- self._logfile = os.path.join(S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_VPP'))
- self._logger = logging.getLogger(__name__)
+ super().__init__()
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_VPP'))
+ rename_vpplf = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
+ self._logfile = os.path.join(S.getValue('RESULTS_PATH'), rename_vpplf)
self._expect = r'vpp#'
- self._timeout = 30
- self._vswitch_args = []
- self._cmd = []
self._cmd_template = ['sudo', '-E', S.getValue('TOOLS')['vpp']]
- self._stamp = None
- self._logger = logging.getLogger(__name__)
self._phy_ports = []
self._virt_ports = []
- self._switches = {}
self._vpp_ctl = ['sudo', S.getValue('TOOLS')['vppctl']]
# configure DPDK NICs
@@ -107,12 +104,20 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
tmpif = iface.split()
if not tmpif:
continue
+ if 'Link' in iface or 'local' in iface:
+ continue
# get PCI address of given interface
output = self.run_vppctl(['show', 'hardware', tmpif[1], 'detail'])
- match = re.search(r'pci address:\s*([\d:\.]+)', output[0])
+ lines = output[0].split('\n')
+ #match = re.search(r'pci address:\s*([\d:\.]+)', output[0])
+ match = ''
+ for line in lines:
+ if "pci:" in line:
+ match = line.split(' ')[6]
if match:
# normalize PCI address, e.g. 0000:05:10.01 => 0000:05:10.1
- tmp_pci = match.group(1).split('.')
+ tmp_pci = match.split('.')
+ # tmp_pci = match.group(1).split('.')
tmp_pci[1] = str(int(tmp_pci[1]))
tmpif.append('.'.join(tmp_pci))
else:
@@ -151,7 +156,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
tasks.Process.start(self)
self.relinquish()
except (pexpect.EOF, pexpect.TIMEOUT) as exc:
- logging.error("Exception during VPP start.")
+ self._logger.error("Exception during VPP start.")
raise exc
self._logger.info("VPP...Started.")
@@ -205,6 +210,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def add_switch(self, switch_name, dummy_params=None):
"""See IVswitch for general description
"""
+ # pylint: disable=unused-argument
if switch_name in self._switches:
self._logger.warning("switch %s already exists...", switch_name)
else:
@@ -221,6 +227,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
"""See IVswitch for general description
:raises: RuntimeError
"""
+ # pylint: disable=unused-argument
# get list of physical interfaces with PCI addresses
vpp_nics = self._get_nic_info(key='Pci')
# check if there are any NICs left
@@ -239,6 +246,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def add_vport(self, dummy_switch_name):
"""See IVswitch for general description
"""
+ # pylint: disable=unused-argument
socket_name = S.getValue('TOOLS')['ovs_var_tmp'] + 'dpdkvhostuser' + str(len(self._virt_ports))
if S.getValue('VSWITCH_VHOSTUSER_SERVER_MODE'):
mode = ['server']
@@ -266,21 +274,17 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
else:
self._logger.warning("Port %s is not configured.", port_name)
- def add_l2patch(self, port1, port2, bidir=False):
+ def add_l2patch(self, port1, port2):
"""Create l2patch connection between given ports
"""
self.run_vppctl(['test', 'l2patch', 'rx', port1, 'tx', port2])
- if bidir:
- self.run_vppctl(['test', 'l2patch', 'rx', port2, 'tx', port1])
- def add_xconnect(self, port1, port2, bidir=False):
+ def add_xconnect(self, port1, port2):
"""Create l2patch connection between given ports
"""
self.run_vppctl(['set', 'interface', 'l2', 'xconnect', port1, port2])
- if bidir:
- self.run_vppctl(['set', 'interface', 'l2', 'xconnect', port2, port1])
- def add_bridge(self, switch_name, port1, port2, dummy_bidir=False):
+ def add_bridge(self, switch_name, port1, port2):
"""Add given ports to bridge ``switch_name``
"""
self.run_vppctl(['set', 'interface', 'l2', 'bridge', port1,
@@ -288,56 +292,59 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
self.run_vppctl(['set', 'interface', 'l2', 'bridge', port2,
str(self._switches[switch_name])])
- def add_connection(self, switch_name, port1, port2, bidir=False):
+ def add_connection(self, switch_name, port1, port2, traffic=None):
"""See IVswitch for general description
:raises: RuntimeError
"""
+ if traffic:
+ self._logger.warning("VPP add_connection() does not support 'traffic' options.")
+
mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE')
if mode == 'l2patch':
- self.add_l2patch(port1, port2, bidir)
+ self.add_l2patch(port1, port2)
elif mode == 'xconnect':
- self.add_xconnect(port1, port2, bidir)
+ self.add_xconnect(port1, port2)
elif mode == 'bridge':
self.add_bridge(switch_name, port1, port2)
else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
- def del_l2patch(self, port1, port2, bidir=False):
+ def del_l2patch(self, port1, port2):
"""Remove l2patch connection between given ports
:param port1: port to be used in connection
:param port2: port to be used in connection
- :param bidir: switch between uni and bidirectional traffic
"""
self.run_vppctl(['test', 'l2patch', 'rx', port1, 'tx', port2, 'del'])
- if bidir:
- self.run_vppctl(['test', 'l2patch', 'rx', port2, 'tx', port1, 'del'])
- def del_xconnect(self, dummy_port1, dummy_port2, dummy_bidir=False):
+ def del_xconnect(self, port1, port2):
"""Remove xconnect connection between given ports
"""
- self._logger.warning('VPP: Removal of l2 xconnect is not implemented.')
+ self.run_vppctl(['set', 'interface', 'l3', port1])
+ self.run_vppctl(['set', 'interface', 'l3', port2])
- def del_bridge(self, dummy_switch_name, dummy_port1, dummy_port2):
+ def del_bridge(self, _dummy_switch_name, port1, port2):
"""Remove given ports from the bridge
"""
- self._logger.warning('VPP: Removal of interfaces from bridge is not implemented.')
+ self.run_vppctl(['set', 'interface', 'l3', port1])
+ self.run_vppctl(['set', 'interface', 'l3', port2])
- def del_connection(self, switch_name, port1, port2, bidir=False):
+ def del_connection(self, switch_name, port1=None, port2=None):
"""See IVswitch for general description
:raises: RuntimeError
"""
- mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE')
- if mode == 'l2patch':
- self.del_l2patch(port1, port2, bidir)
- elif mode == 'xconnect':
- self.del_xconnect(port1, port2, bidir)
- elif mode == 'bridge':
- self.del_bridge(switch_name, port1, port2)
- else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ if port1 and port2:
+ mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE')
+ if mode == 'l2patch':
+ self.del_l2patch(port1, port2)
+ elif mode == 'xconnect':
+ self.del_xconnect(port1, port2)
+ elif mode == 'bridge':
+ self.del_bridge(switch_name, port1, port2)
+ else:
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
def dump_l2patch(self):
"""Dump l2patch connections
@@ -347,7 +354,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def dump_xconnect(self):
"""Dump l2 xconnect connections
"""
- self._logger.warning("VPP: Dump of l2 xconnections is not supported.")
+ self.run_vppctl(['show', 'mode'] + self._phy_ports + self._virt_ports)
def dump_bridge(self, switch_name):
"""Show bridge details
@@ -369,7 +376,7 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
elif mode == 'bridge':
self.dump_bridge(switch_name)
else:
- raise RuntimeError('VPP: Unsupported l2 connection mode detected %s', mode)
+ raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode)
def run_vppctl(self, args, check_error=False):
"""Run ``vppctl`` with supplied arguments.
@@ -385,50 +392,50 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
#
# Validate methods
#
- def validate_add_switch(self, dummy_result, switch_name, dummy_params=None):
+ def validate_add_switch(self, _dummy_result, switch_name, _dummy_params=None):
"""Validate - Create a new logical switch with no ports
"""
return switch_name in self._switches
- def validate_del_switch(self, dummy_result, switch_name):
+ def validate_del_switch(self, _dummy_result, switch_name):
"""Validate removal of switch
"""
- return not self.validate_add_switch(dummy_result, switch_name)
+ return not self.validate_add_switch(_dummy_result, switch_name)
- def validate_add_phy_port(self, result, dummy_switch_name):
+ def validate_add_phy_port(self, result, _dummy_switch_name):
""" Validate that physical port was added to bridge.
"""
return result[0] in self._phy_ports
- def validate_add_vport(self, result, dummy_switch_name):
+ def validate_add_vport(self, result, _dummy_switch_name):
""" Validate that virtual port was added to bridge.
"""
return result[0] in self._virt_ports
- def validate_del_port(self, dummy_result, dummy_switch_name, port_name):
+ def validate_del_port(self, _dummy_result, _dummy_switch_name, port_name):
""" Validate that port_name was removed from bridge.
"""
return not (port_name in self._phy_ports or port_name in self._virt_ports)
# pylint: disable=no-self-use
- def validate_add_connection(self, dummy_result, dummy_switch_name, dummy_port1,
- dummy_port2, dummy_bidir=False):
+ def validate_add_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1,
+ _dummy_port2, _dummy_traffic=None):
""" Validate that connection was added
"""
return True
- def validate_del_connection(self, dummy_result, dummy_switch_name, dummy_port1,
- dummy_port2, dummy_bidir=False):
+ def validate_del_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1,
+ _dummy_port2):
""" Validate that connection was deleted
"""
return True
- def validate_dump_connections(self, dummy_result, dummy_switch_name):
+ def validate_dump_connections(self, _dummy_result, _dummy_switch_name):
""" Validate dump connections call
"""
return True
- def validate_run_vppctl(self, result, dummy_args, dummy_check_error=False):
+ def validate_run_vppctl(self, result, _dummy_args, _dummy_check_error=False):
"""validate execution of ``vppctl`` with supplied arguments.
"""
# there shouldn't be any stderr
@@ -437,21 +444,6 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
#
# Non implemented methods
#
- def add_flow(self, switch_name, flow, cache='off'):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def del_flow(self, switch_name, flow=None):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
- def dump_flows(self, switch_name):
- """See IVswitch for general description
- """
- raise NotImplementedError()
-
def add_route(self, switch_name, network, destination):
"""See IVswitch for general description
"""
@@ -470,4 +462,4 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def get_ports(self, switch_name):
"""See IVswitch for general description
"""
- raise NotImplementedError()
+ return self._phy_ports
diff --git a/vswitches/vswitch.py b/vswitches/vswitch.py
index efa3a349..a3d4e974 100644
--- a/vswitches/vswitch.py
+++ b/vswitches/vswitch.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,12 +14,23 @@
"""Generic interface VSPERF uses for controlling a vSwitch
"""
+import logging
class IVSwitch(object):
"""Interface class that is implemented by vSwitch-specific classes
Other methods are called only between start() and stop()
"""
+ def __init__(self):
+ """Initialization of vswitch class
+ """
+ self._timeout = 30
+ self._switches = {}
+ self._logger = logging.getLogger(__name__)
+ self._cmd = []
+ self._vswitch_args = []
+ self._stamp = None
+
def get_version(self):
"""Return version of vSwitch and DPDK (if used by vSwitch)
This method should be implemented in case, that version
@@ -112,58 +123,23 @@ class IVSwitch(object):
"""
raise NotImplementedError()
- def add_flow(self, switch_name, flow, cache='off'):
- """Add a flow rule to the logical switch
-
- :param switch_name: The switch on which to operate
- :param flow: Flow description as a dictionary
- :param cache: Optional. Specifies if flow should be inserted
- to the switch or cached to increase performance during manipulation
- with large number of flows.
- Values:
- 'off' - cache is off and flow is inserted directly to the switch
- 'on' - cache is on and flow is inserted into the cache
- 'flush' - cache content will be inserted into the switch
-
- Example flow dictionary:
- flow = {
- 'in_port': '1',
- 'idle_timeout': '0',
- 'actions': ['output:3']
- }
- """
- raise NotImplementedError()
-
- def del_flow(self, switch_name, flow=None):
- """Delete the flow rule from the logical switch
-
- :param switch_name: The switch on which to operate
- :param flow: Flow description as a dictionary
-
- For flow dictionary description, see add_flow
- For flow==None, all flows are deleted
- """
- raise NotImplementedError()
-
- def add_connection(self, switch_name, port1, port2, bidir=False):
+ def add_connection(self, switch_name, port1, port2, traffic=None):
"""Creates connection between given ports.
:param switch_name: switch on which to operate
:param port1: port to be used in connection
:param port2: port to be used in connection
- :param bidir: switch between uni and bidirectional traffic
:raises: RuntimeError
"""
raise NotImplementedError()
- def del_connection(self, switch_name, port1, port2, bidir=False):
+ def del_connection(self, switch_name, port1=None, port2=None):
"""Remove connection between two interfaces.
:param switch_name: switch on which to operate
:param port1: port to be used in connection
:param port2: port to be used in connection
- :param bidir: switch between uni and bidirectional traffic
:raises: RuntimeError
"""
@@ -178,13 +154,6 @@ class IVSwitch(object):
"""
raise NotImplementedError()
- def dump_flows(self, switch_name):
- """Dump flows from the logical switch
-
- :param switch_name: The switch on which to operate
- """
- raise NotImplementedError()
-
def add_route(self, switch_name, network, destination):
"""Add a route for tunneling routing table
diff --git a/xtesting/baremetal/Dockerfile b/xtesting/baremetal/Dockerfile
new file mode 100644
index 00000000..b78594b5
--- /dev/null
+++ b/xtesting/baremetal/Dockerfile
@@ -0,0 +1,36 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM opnfv/xtesting
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ADD . /src/
+RUN apk add --no-cache --update --virtual .build-deps python3 \
+ py3-pip py3-wheel git python3-dev linux-headers libffi-dev \
+ make openssl-dev gcc musl-dev && \
+ pip3 install --upgrade pip chainmap oslo.utils \
+ paramiko scp && \
+ git init /src && pip3 install /src
+
+ENV DUT_IP_ADDRESS=10.10.120.24
+ENV DUT_USERNAME=opnfv
+ENV DUT_PASSWORD=opnfv
+ENV VSPERF_TESTS=phy2phy_tput
+ENV VSPERF_CONFFILE=/vsperf.conf
+ENV RES_PATH=/tmp
+ENV VSPERF_TRAFFICGEN_MODE=NO
+
+COPY vsperf.conf /vsperf.conf
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+CMD ["run_tests", "-t", "all"]
diff --git a/xtesting/baremetal/exceptions.py b/xtesting/baremetal/exceptions.py
new file mode 100644
index 00000000..c4e0e097
--- /dev/null
+++ b/xtesting/baremetal/exceptions.py
@@ -0,0 +1,65 @@
+"""
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+#pylint: disable=import-error
+from oslo_utils import excutils
+
+
+class VsperfCException(Exception):
+ """Base VSPERF-C Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VsperfCException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VsperfCException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """ #pylint: disable=no-self-use
+ return False
+
+
+class InvalidType(VsperfCException):
+ """Invalid type"""
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
+class SSHError(VsperfCException):
+ """ssh error"""
+ message = '%(error_msg)s'
+
+
+class SSHTimeout(SSHError):
+ """ssh timeout""" #pylint: disable=unnecessary-pass
+ pass
diff --git a/xtesting/baremetal/requirements.txt b/xtesting/baremetal/requirements.txt
new file mode 100644
index 00000000..f2da6ad5
--- /dev/null
+++ b/xtesting/baremetal/requirements.txt
@@ -0,0 +1,2 @@
+xtesting
+requests!=2.20.0,!=2.24.0 # Apache-2.0
diff --git a/xtesting/baremetal/setup.cfg b/xtesting/baremetal/setup.cfg
new file mode 100644
index 00000000..9ca38236
--- /dev/null
+++ b/xtesting/baremetal/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = vsperf
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ vsperf_controller = vsperf_controller:VsperfBm
diff --git a/xtesting/baremetal/setup.py b/xtesting/baremetal/setup.py
new file mode 100644
index 00000000..fa9d59ac
--- /dev/null
+++ b/xtesting/baremetal/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/xtesting/baremetal/site.yml b/xtesting/baremetal/site.yml
new file mode 100644
index 00000000..06f8c2e2
--- /dev/null
+++ b/xtesting/baremetal/site.yml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: vsperf
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: vsperfbm
+ tests:
+ - phy2phy_tput
diff --git a/xtesting/baremetal/ssh.py b/xtesting/baremetal/ssh.py
new file mode 100644
index 00000000..ce560c49
--- /dev/null
+++ b/xtesting/baremetal/ssh.py
@@ -0,0 +1,546 @@
+# Copyright 2020: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#pylint: disable=I,C,R,locally-disabled
+#pylint: disable=import-error,arguments-differ
+
+# this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+ ssh = sshclient.SSH("root", "example.com", port=33)
+ status, stdout, stderr = ssh.execute("ps ax")
+ if status:
+ raise Exception("Command failed with non-zero status.")
+ print(stdout.splitlines())
+
+Execute command with huge output:
+
+ class PseudoFile(io.RawIOBase):
+ def write(chunk):
+ if "error" in chunk:
+ email_admin(chunk)
+
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+ ssh = sshclient.SSH("user", "example.com")
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
+
+Upload file:
+
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+ eventlet.monkey_patch(select=True, time=True)
+ or
+ eventlet.monkey_patch()
+ or
+ sshclient = eventlet.import_patched("vsperf.ssh")
+
+"""
+from __future__ import print_function
+import io
+import logging
+import os
+import re
+import select
+import socket
+import time
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+
+# When building container change this to
+import exceptions as exceptions
+#else keep it as
+#import exceptions
+# When building container change this to
+from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+#else keep it as
+#from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+
+
+def convert_key_to_str(key):
+ if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+ return key
+ k = io.StringIO()
+ key.write_private_key(k)
+ return k.getvalue()
+
+
+# class SSHError(Exception):
+# pass
+#
+#
+# class SSHTimeout(SSHError):
+# pass
+
+
+class SSH(object):
+ """Represent ssh connection."""
+ #pylint: disable=no-member
+
+ SSH_PORT = paramiko.config.SSH_PORT
+ DEFAULT_WAIT_TIMEOUT = 120
+
+ @staticmethod
+ def gen_keys(key_filename, bit_count=2048):
+ rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+ rsa_key.write_private_key_file(key_filename)
+ print("Writing %s ..." % key_filename)
+ with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+ pubkey_file.write(rsa_key.get_name())
+ pubkey_file.write(' ')
+ pubkey_file.write(rsa_key.get_base64())
+ pubkey_file.write('\n')
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else
+ # refers to the calling class
+ # i.e. the subclass, not the superclass
+ return SSH
+
+ @classmethod
+ def get_arg_key_map(cls):
+ return {
+ 'user': ('user', NON_NONE_DEFAULT),
+ 'host': ('ip', NON_NONE_DEFAULT),
+ 'port': ('ssh_port', cls.SSH_PORT),
+ 'pkey': ('pkey', None),
+ 'key_filename': ('key_filename', None),
+ 'password': ('password', None),
+ 'name': ('name', None),
+ }
+
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None):
+ """Initialize SSH client.
+
+ :param user: ssh username
+ :param host: hostname or ip address of remote ssh server
+ :param port: remote ssh port
+ :param pkey: RSA or DSS private key string or file object
+ :param key_filename: private key filename
+ :param password: password
+ """
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
+
+ self.wait_timeout = self.DEFAULT_WAIT_TIMEOUT
+ self.user = user
+ self.host = host
+ # everybody wants to debug this in the caller, do it here instead
+ self.log.debug("user:%s host:%s", user, host)
+
+ # we may get text port from YAML, convert to int
+ self.port = try_int(port, self.SSH_PORT)
+ self.pkey = self._get_pkey(pkey) if pkey else None
+ self.password = password
+ self.key_filename = key_filename
+ self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
+
+ @classmethod
+ def args_from_node(cls, node, overrides=None, defaults=None):
+ if overrides is None:
+ overrides = {}
+ if defaults is None:
+ defaults = {}
+
+ params = ChainMap(overrides, node, defaults)
+ return make_dict_from_map(params, cls.get_arg_key_map())
+
+ @classmethod
+ def from_node(cls, node, overrides=None, defaults=None):
+ return cls(**cls.args_from_node(node, overrides, defaults))
+
+ def _get_pkey(self, key):
+ if isinstance(key, six.string_types):
+ key = six.moves.StringIO(key)
+ errors = []
+ for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+ try:
+ return key_class.from_private_key(key)
+ except paramiko.SSHException as e:
+ errors.append(e)
+ raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors)
+
+ @property
+ def is_connected(self):
+ return bool(self._client)
+
+ def _get_client(self):
+ if self.is_connected:
+ return self._client
+ try:
+ self._client = paramiko.SSHClient()
+ self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.connect(self.host, username=self.user,
+ port=self.port, pkey=self.pkey,
+ key_filename=self.key_filename,
+ password=self.password,
+ allow_agent=False, look_for_keys=False,
+ timeout=1)
+ return self._client
+ except Exception as e:
+ message = ("Exception %(exception_type)s was raised "
+ "during connect. Exception value is: %(exception)r" %
+ {"exception": e, "exception_type": type(e)})
+ self._client = False
+ raise exceptions.SSHError(error_msg=message)
+
+ def _make_dict(self):
+ return {
+ 'user': self.user,
+ 'host': self.host,
+ 'port': self.port,
+ 'pkey': self.pkey,
+ 'key_filename': self.key_filename,
+ 'password': self.password,
+ 'name': self.name,
+ }
+
+ def copy(self):
+ return self.get_class()(**self._make_dict())
+
+ def close(self):
+ if self._client:
+ self._client.close()
+ self._client = False
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ """Execute specified command on the server.
+
+ :param cmd: Command to be executed.
+ :type cmd: str
+ :param stdin: Open file or string to pass to stdin.
+ :param stdout: Open file to connect to stdout.
+ :param stderr: Open file to connect to stderr.
+ :param raise_on_error: If False then exit code will be return. If True
+ then exception will be raized if non-zero code.
+ :param timeout: Timeout in seconds for command execution.
+ Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
+ :param pty: Request a pseudo terminal for this connection.
+ This allows passing control characters.
+ Default False.
+ :type pty: bool
+ """
+
+ client = self._get_client()
+
+ if isinstance(stdin, six.string_types):
+ stdin = six.moves.StringIO(stdin)
+
+ return self._run(client, cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, raise_on_error=raise_on_error,
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open, pty=pty)
+
+ def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+
+ transport = client.get_transport()
+ session = transport.open_session()
+ if pty:
+ session.get_pty()
+ session.exec_command(cmd)
+ start_time = time.time()
+
+ # encode on transmit, decode on receive
+ data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+ stderr_data = None
+
+ # If we have data to be sent to stdin then `select' should also
+ # check for stdin availability.
+ if stdin and not stdin.closed:
+ writes = [session]
+ else:
+ writes = []
+
+ while True:
+ # Block until data can be read/write.
+ e = select.select([session], writes, [session], 1)[2]
+
+ if session.recv_ready():
+ data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+ self.log.debug("stdout: %r", data)
+ if stdout is not None:
+ stdout.write(data)
+ continue
+
+ if session.recv_stderr_ready():
+ stderr_data = encodeutils.safe_decode(
+ session.recv_stderr(4096), 'utf-8')
+ self.log.debug("stderr: %r", stderr_data)
+ if stderr is not None:
+ stderr.write(stderr_data)
+ continue
+
+ if session.send_ready():
+ if stdin is not None and not stdin.closed:
+ if not data_to_send:
+ stdin_txt = stdin.read(4096)
+ if stdin_txt is None:
+ stdin_txt = ''
+ data_to_send = encodeutils.safe_encode(
+ stdin_txt, incoming='utf-8')
+ if not data_to_send:
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
+
+ if session.exit_status_ready():
+ break
+
+ if timeout and (time.time() - timeout) > start_time:
+ message = ('Timeout executing command %(cmd)s on host %(host)s'
+ % {"cmd": cmd, "host": self.host})
+ raise exceptions.SSHTimeout(error_msg=message)
+ if e:
+ raise exceptions.SSHError(error_msg='Socket error')
+
+ exit_status = session.recv_exit_status()
+ if exit_status != 0 and raise_on_error:
+ fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+ details = fmt % {"cmd": cmd, "status": exit_status}
+ if stderr_data:
+ details += " Last stderr data: '%s'." % stderr_data
+ raise exceptions.SSHError(error_msg=details)
+ return exit_status
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ """Execute the specified command on the server.
+
+ :param cmd: (str) Command to be executed.
+ :param stdin: (StringIO) Open file to be sent on process stdin.
+ :param timeout: (int) Timeout for execution of the command.
+ :param raise_on_error: (bool) If True, then an SSHError will be raised
+ when non-zero exit code.
+
+ :returns: tuple (exit_status, stdout, stderr)
+ """
+ stdout = six.moves.StringIO()
+ stderr = six.moves.StringIO()
+
+ exit_status = self.run(cmd, stderr=stderr,
+ stdout=stdout, stdin=stdin,
+ timeout=timeout, raise_on_error=raise_on_error)
+ stdout.seek(0)
+ stderr.seek(0)
+ return exit_status, stdout.read(), stderr.read()
+
+ def wait(self, timeout=None, interval=1):
+ """Wait for the host will be available via ssh."""
+ if timeout is None:
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self.execute("uname")
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
+
+ def get(self, remote_path, local_path='/tmp/', recursive=True):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.get(remote_path, local_path, recursive)
+
+ # keep shell running in the background, e.g. screen
+ def send_command(self, command):
+ client = self._get_client()
+ client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ self.run("&& ".join(cmd), stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)
+
+ def put_file_obj(self, file_obj, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.putfo(file_obj, remotepath)
+ if mode is not None:
+ sftp.chmod(remotepath, mode)
+
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+ @classmethod
+ def get_arg_key_map(cls):
+ arg_key_map = super(AutoConnectSSH, cls).get_arg_key_map()
+ arg_key_map['wait'] = ('wait', True)
+ return arg_key_map
+
+ # always wait or we will get OpenStack SSH errors
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None, wait=True):
+ super(AutoConnectSSH, self).__init__(user, host, port, pkey,
+ key_filename, password, name)
+ if wait and wait is not True:
+ self.wait_timeout = int(wait)
+
+ def _make_dict(self):
+ data = super(AutoConnectSSH, self)._make_dict()
+ data.update({
+ 'wait': self.wait_timeout
+ })
+ return data
+
+ def _connect(self):
+ if not self.is_connected:
+ interval = 1
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self._get_client()
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def drop_connection(self):
+ """ Don't close anything, just force creation of a new client """
+ self._client = False
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ self._connect()
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ self._connect()
+ return super(AutoConnectSSH, self).run(cmd, stdin, stdout,
+ stderr, raise_on_error,
+ timeout, keep_stdin_open, pty)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ self._connect()
+ return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+ def put_file(self, local_path, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file(local_path,
+ remote_path, mode)
+
+ def put_file_obj(self, file_obj, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file_obj(file_obj,
+ remote_path, mode)
+
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+ @staticmethod
+ def get_class():
+ # must return static class name,
+ # anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return AutoConnectSSH
diff --git a/xtesting/baremetal/testcases.yaml b/xtesting/baremetal/testcases.yaml
new file mode 100644
index 00000000..91cef451
--- /dev/null
+++ b/xtesting/baremetal/testcases.yaml
@@ -0,0 +1,16 @@
+---
+tiers:
+ -
+ name: vsperfbm
+ order: 1
+ description: ''
+ testcases:
+ -
+ case_name: phy2phy_tput
+ project_name: vsperf
+ criteria: 100
+ blocking: true
+ clean_flag: false
+ description: ''
+ run:
+ name: vsperf_controller
diff --git a/xtesting/baremetal/utils.py b/xtesting/baremetal/utils.py
new file mode 100644
index 00000000..d945381e
--- /dev/null
+++ b/xtesting/baremetal/utils.py
@@ -0,0 +1,41 @@
+"""
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+
+
+NON_NONE_DEFAULT = object()
+
+
+def get_key_with_default(data, key, default=NON_NONE_DEFAULT):
+ """get default key"""
+ value = data.get(key, default)
+ if value is NON_NONE_DEFAULT:
+ raise KeyError(key)
+ return value
+
+
+def make_dict_from_map(data, key_map):
+ """mapping dict"""
+ return {dest_key: get_key_with_default(data, src_key, default)
+ for dest_key, (src_key, default) in key_map.items()}
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ #pylint: disable=invalid-name
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
diff --git a/xtesting/baremetal/vsperf.conf b/xtesting/baremetal/vsperf.conf
new file mode 100644
index 00000000..8ed7115f
--- /dev/null
+++ b/xtesting/baremetal/vsperf.conf
@@ -0,0 +1,21 @@
+VSWITCH_BRIDGE_NAME = 'vsperf-br0'
+WHITELIST_NICS = ['02:00.0', '02:00.1']
+TRAFFICGEN = 'Trex'
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.86/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_DURATION=1
+TRAFFICGEN_LOSSRATE=0
+TRAFFICGEN_RFC2544_TESTS=10
+#TRAFFICGEN_PKT_SIZES=(64,128,256,512,1024,1280,1518)
+TRAFFICGEN_PKT_SIZES=(1024,)
+GUEST_TESTPMD_FWD_MODE = ['io']
+GUEST_IMAGE = ['/home/opnfv/vnfs/vloop-vnf-ubuntu-18.04_20180920.qcow2']
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = True
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
+
diff --git a/xtesting/baremetal/vsperf_controller.py b/xtesting/baremetal/vsperf_controller.py
new file mode 100644
index 00000000..91bad766
--- /dev/null
+++ b/xtesting/baremetal/vsperf_controller.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF-controller
+"""
+
+# Fetching Environment Variable for controller, You can configure or
+# modifies list.env file for setting your environment variable.
+
+#pylint: disable=global-statement,no-else-continue
+#pylint: disable=too-many-branches
+
+import os
+import sys
+from stat import S_ISDIR
+import time
+import paramiko
+from xtesting.core import testcase
+import ssh
+
+TIMER = float()
+
+
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+RES_PATH= os.getenv('RES_PATH')
+
+VSPERF_TEST = os.getenv('VSPERF_TESTS')
+VSPERF_CONF = os.getenv('VSPERF_CONFFILE')
+VSPERF_TRAFFICGEN_MODE = str(os.getenv('VSPERF_TRAFFICGEN_MODE'))
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+
+RECV_BYTES = 4096
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT Successfully Connected ..............................................[OK] \n ")
+
+def upload_test_config_file():
+ """
+ #Upload Test Config File on DUT
+ """
+ #localpath = '/usr/src/app/vsperf/vsperf.conf'
+ if VSPERF_CONF:
+ localpath = VSPERF_CONF
+ else:
+ localpath = 'vsperf.conf'
+ if not os.path.exists(localpath):
+ print("VSPERF Test config File does not exists.......................[Failed]")
+ return
+ remotepath = '~/vsperf.conf'
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f {}".format(remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ check_test_config_cmd_1= "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ print(check_test_config_cmd_1)
+ check_test_result_1= str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result_1:
+ print(
+ "Test Configuration File Uploaded on DUT-Host.............................[OK] \n ")
+ else:
+ print("VSPERF Test config file upload failed.....................................[Critical]")
+
+def run_vsperf_test():
+ """
+ Here we will perform the actual vsperf test
+ """
+ global TIMER
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(DUT_PWD)
+ DUT_CLIENT.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf && "
+ cmd += "./vsperf "
+ cmd += "--conf-file ~/vsperf.conf "
+ if "yes" in VSPERF_TRAFFICGEN_MODE.lower():
+ cmd += "--mode trafficgen"
+ vsperf_test_list = VSPERF_TEST.split(",")
+ print(vsperf_test_list)
+ for test in vsperf_test_list:
+ atest = cmd
+ atest += test
+ DUT_CLIENT.run(atest, pty=True)
+ print(
+ "Test Successfully Completed................................................[OK]\n ")
+
+def get_result():
+ """
+ Get Latest results from DUT
+ """
+ stdout_data = []
+ stderr_data = []
+ client = paramiko.Transport((DUT_IP, 22))
+ client.connect(username=DUT_USER, password=DUT_PWD)
+ session = client.open_channel(kind='session')
+ directory_to_download = ''
+ session.exec_command('ls /tmp | grep results')
+ if not directory_to_download:
+ while True:
+ if session.recv_ready():
+ stdout_data.append(session.recv(RECV_BYTES))
+ if session.recv_stderr_ready():
+ stderr_data.append(session.recv_stderr(RECV_BYTES))
+ if session.exit_status_ready():
+ break
+ if stdout_data:
+ line = stdout_data[0]
+ filenames = line.decode("utf-8").rstrip("\n").split("\n")
+ filenames = sorted(filenames)
+ latest = filenames[-1]
+ directory_to_download = os.path.join('/tmp', latest)
+ stdout_data = []
+ stderr_data = []
+ if directory_to_download:
+ destination = os.path.join(RES_PATH,
+ os.path.basename(os.path.normpath(
+ directory_to_download)))
+ os.makedirs(destination)
+ print(directory_to_download)
+ # Begin the actual downlaod
+ sftp = paramiko.SFTPClient.from_transport(client)
+ def sftp_walk(remotepath):
+ path=remotepath
+ files=[]
+ folders=[]
+ for fle in sftp.listdir_attr(remotepath):
+ if S_ISDIR(fle.st_mode):
+ folders.append(fle.filename)
+ else:
+ files.append(fle.filename)
+ if files:
+ yield path, files
+ # Filewise download happens here
+ for path,files in sftp_walk(directory_to_download):
+ for fil in files:
+ remote = os.path.join(path,fil)
+ local = os.path.join(destination, fil)
+ print(local)
+ sftp.get(remote, local)
+ # Ready to work with downloaded data, close the session and client.
+ session.close()
+ client.close()
+
+class VsperfBm(testcase.TestCase):
+ """
+ VSPERF-Xtesting Baremetal Control Class
+ """
+ def run(self, **kwargs):
+ global RES_PATH
+ try:
+ self.start_time = time.time()
+ self.result=100
+ os.makedirs(self.res_dir, exist_ok=True)
+ RES_PATH = self.res_dir
+ if DUT_IP:
+ host_connect()
+ if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ self.result = 0
+ else:
+ upload_test_config_file()
+ run_vsperf_test()
+ get_result()
+ self.stop_time = time.time()
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()
diff --git a/xtesting/openstack/Dockerfile b/xtesting/openstack/Dockerfile
new file mode 100644
index 00000000..2e613872
--- /dev/null
+++ b/xtesting/openstack/Dockerfile
@@ -0,0 +1,61 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM opnfv/xtesting
+LABEL maintainer="sridhar.rao@spirent.com"
+
+# Install required packages
+RUN apk add --no-cache --update python3 python3-dev \
+ py3-wheel py3-pip git openssh-client python3-tkinter \
+ tk gcc musl-dev libffi-dev openssl-dev make
+
+# Clone VSPERF.
+RUN git clone https://gerrit.opnfv.org/gerrit/vswitchperf /vswitchperf
+
+#
+# Remove unnecessary python packages.
+#
+RUN cd /vswitchperf && \
+ sed -e '/numpy/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/matplotlib/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pycrypto/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pypsi/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/paramiko/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pyzmq/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/kubernetes/ s/^#*/#\ /' -i requirements.txt
+
+#
+# Build VSPERF
+#
+RUN cd /vswitchperf && \
+ pip3 install --ignore-installed distlib -r requirements.txt && \
+ cd /vswitchperf/src/trex && make
+
+# Include vsperf into Path.
+ENV PATH "$PATH:/vswitchperf"
+
+COPY vsperfostack.conf /vsperfostack.conf
+
+# Required step for Xtesting
+ADD . /src/
+RUN git init /src && pip3 install /src
+
+# Copy Testcase
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+
+# Set working directory - This helps to resolve path to templates.
+WORKDIR /vswitchperf
+
+# Command Run
+CMD ["run_tests", "-t", "all"]
diff --git a/xtesting/openstack/cloud.rc b/xtesting/openstack/cloud.rc
new file mode 100644
index 00000000..3f867743
--- /dev/null
+++ b/xtesting/openstack/cloud.rc
@@ -0,0 +1,10 @@
+export OS_AUTH_URL=http://10.10.180.21/identity
+export OS_PROJECT_ID=0440a230a799460facec0d09dde64497
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+export OS_USERNAME="admin"
+export OS_PASSWORD="admin123"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
diff --git a/xtesting/openstack/setup.cfg b/xtesting/openstack/setup.cfg
new file mode 100644
index 00000000..4b98992a
--- /dev/null
+++ b/xtesting/openstack/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = vsperfostack
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ vsperfostack = vsperfostack:VsperfOstack
diff --git a/xtesting/openstack/setup.py b/xtesting/openstack/setup.py
new file mode 100644
index 00000000..1394cdfe
--- /dev/null
+++ b/xtesting/openstack/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/xtesting/openstack/site.yml b/xtesting/openstack/site.yml
new file mode 100644
index 00000000..1ca663f4
--- /dev/null
+++ b/xtesting/openstack/site.yml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: vsperfostack
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: vsperfos
+ tests:
+ - phy2phy_tput
diff --git a/xtesting/openstack/testcases.yaml b/xtesting/openstack/testcases.yaml
new file mode 100644
index 00000000..aab3b16a
--- /dev/null
+++ b/xtesting/openstack/testcases.yaml
@@ -0,0 +1,19 @@
+---
+tiers:
+ -
+ name: vsperfostack
+ order: 1
+ description: 'VSPERF Openstack Testing'
+ testcases:
+ -
+ case_name: phy2phy_tput
+ project_name: vsperfostack
+ criteria: 100
+ blocking: true
+ clean_flag: false
+ description: 'VSPERF Openstack RFC2544 Throughput Test'
+ run:
+ name: vsperfostack
+ args:
+ conf_file: vsperfostack.conf
+ deploy_tgen: false
diff --git a/xtesting/openstack/vsperfostack.conf b/xtesting/openstack/vsperfostack.conf
new file mode 100644
index 00000000..489054a7
--- /dev/null
+++ b/xtesting/openstack/vsperfostack.conf
@@ -0,0 +1,80 @@
+# Copyright 20202 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# OPenstack Information
+
+OS_AUTH_URL="http://10.10.180.21/identity"
+OS_PROJECT_ID="0440a230a799460facec0d09dde64497"
+OS_PROJECT_NAME="admin"
+OS_USER_DOMAIN_NAME="Default"
+OS_PROJECT_DOMAIN_ID="default"
+OS_USERNAME="admin"
+OS_PASSWORD="admin123"
+OS_REGION_NAME="RegionOne"
+OS_INTERFACE="public"
+OS_IDENTITY_API_VERSION=3
+OS_INSECURE=False
+OS_CA_CERT= 'None'
+
+# Deployment Information
+SCENARIOS = ['templates/l2_2c_2i.yaml']
+FLAVOR_NAME = 'm1.large'
+IMAGE_NAME = 'stcv'
+EXTERNAL_NET = 'public'
+
+# Traffic Information
+TRAFFICGEN_PKT_SIZES = (1024,)
+TRAFFICGEN_DURATION = 10
+
+# Traffigen to Use
+TRAFFICGEN='TestCenter'
+
+
+# Trafficgen Specific Information
+# STC
+TRAFFICGEN_STC_LAB_SERVER_ADDR = "10.10.180.245"
+TRAFFICGEN_STC_LICENSE_SERVER_ADDR = "10.10.50.226"
+TRAFFICGEN_STC_EAST_SLOT_NUM = "1"
+TRAFFICGEN_STC_EAST_PORT_NUM = "1"
+TRAFFICGEN_STC_WEST_SLOT_NUM = "1"
+TRAFFICGEN_STC_WEST_PORT_NUM = "1"
+TRAFFICGEN_STC_PYTHON2_PATH = "/usr/bin/python3"
+TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = "testcenter-rfc2544-rest.py"
+TRAFFICGEN_STC_RFC2544_METRIC="throughput"
+
+
+# Ixia
+TRAFFICGEN_EAST_IXIA_CARD = '1'
+TRAFFICGEN_WEST_IXIA_CARD = '1'
+TRAFFICGEN_EAST_IXIA_PORT = '1'
+TRAFFICGEN_WEST_IXIA_PORT = '1'
+TRAFFICGEN_IXIA_LIB_PATH = '/opt/ixia/ixos-api/9.00.0.20/lib/ixTcl1.0'
+TRAFFICGEN_IXNET_LIB_PATH = '/opt/ixia/ixnetwork/9.00.1915.16/lib/TclApi/IxTclNetwork'
+TRAFFICGEN_IXNET_MACHINE = '10.10.180.240' # quad dotted ip address
+TRAFFICGEN_IXNET_PORT = '443'
+TRAFFICGEN_IXNET_USER = 'admin'
+TRAFFICGEN_IXNET_TESTER_RESULT_DIR = 'c:/ixia_results/vsperf_sandbox'
+TRAFFICGEN_IXNET_DUT_RESULT_DIR = '/mnt/ixia_results/vsperf_sandbox'
+
+# Trex
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.86/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = False
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
diff --git a/xtesting/openstack/vsperfostack.py b/xtesting/openstack/vsperfostack.py
new file mode 100755
index 00000000..437f8492
--- /dev/null
+++ b/xtesting/openstack/vsperfostack.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""VSPERF-Xtesting-Openstack Control script.
+"""
+
+import os
+import subprocess
+import sys
+import time
+
+from xtesting.core import testcase
+
+
+class VsperfOstack(testcase.TestCase):
+ """
+ Implement Xtesting's testcase class
+ """
+ def run(self, **kwargs):
+ """
+ Main Run.
+ """
+ custom_conffile = '/vswitchperf/conf/99_xtesting.conf'
+ try:
+ test_params = {}
+ for key in kwargs:
+ test_params[key] = kwargs[key]
+ # Make results directory - Xtesting Requirement
+ os.makedirs(self.res_dir, exist_ok=True)
+ # Start the timer
+ self.start_time = time.time()
+
+ # Get the parameter
+ if 'conf_file' in test_params.keys():
+ conffile = os.path.join('/', test_params['conf_file'])
+ else:
+ conffile = '/vsperfostack.conf'
+
+ # Remove customfile if it exists.
+ if os.path.exists(custom_conffile):
+ os.remove(custom_conffile)
+
+ # Write custom configuration.
+ with open(custom_conffile, 'a+') as fil:
+ fil.writelines("LOG_DIR='{}'".format(self.res_dir))
+ fil.close()
+ # Start the vsperf command
+ if('deploy_tgen' in test_params.keys() and
+ test_params['deploy_tgen']):
+ output = subprocess.check_output(['vsperf',
+ '--conf-file',
+ conffile,
+ '--openstack',
+ '--load-env',
+ '--tests',
+ self.case_name])
+ else:
+ output = subprocess.check_output(['vsperf',
+ '--conf-file',
+ conffile,
+ '--load-env',
+ '--mode',
+ 'trafficgen',
+ '--tests',
+ self.case_name])
+ print(output)
+ self.result = 100
+ self.stop_time = time.time()
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()