aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--3rd_party/ixia/ixnetrfc2544.tcl188
-rw-r--r--conf/00_common.conf5
-rw-r--r--conf/02_vswitch.conf7
-rw-r--r--conf/03_traffic.conf31
-rw-r--r--conf/04_vnf.conf27
-rw-r--r--conf/05_collector.conf25
-rw-r--r--conf/10_custom.conf3
-rw-r--r--conf/11_openstack.conf43
-rw-r--r--conf/12_k8s.conf41
-rw-r--r--conf/__init__.py10
-rw-r--r--conf/integration/01b_dpdk_regression_tests.conf145
-rw-r--r--conf/kubernetes/01_testcases.conf12
-rw-r--r--core/component_factory.py15
-rwxr-xr-xcore/loader/loader.py37
-rw-r--r--core/pod_controller.py93
-rw-r--r--core/results/results_constants.py9
-rw-r--r--core/vswitch_controller_p2p.py5
-rw-r--r--docs/index.rst5
-rw-r--r--docs/k8s/index.rst40
-rw-r--r--docs/lma/index.rst18
-rw-r--r--docs/lma/logs/devguide.rst145
-rw-r--r--docs/lma/logs/images/elasticsearch.pngbin0 -> 36046 bytes
-rw-r--r--docs/lma/logs/images/fluentd-cs.pngbin0 -> 40226 bytes
-rw-r--r--docs/lma/logs/images/fluentd-ss.pngbin0 -> 18331 bytes
-rw-r--r--docs/lma/logs/images/nginx.pngbin0 -> 36737 bytes
-rw-r--r--docs/lma/logs/images/setup.pngbin0 -> 43503 bytes
-rw-r--r--docs/lma/logs/userguide.rst386
-rw-r--r--docs/lma/metrics/devguide.rst469
-rw-r--r--docs/lma/metrics/images/dataflow.pngbin0 -> 42443 bytes
-rw-r--r--docs/lma/metrics/images/setup.pngbin0 -> 15019 bytes
-rw-r--r--docs/lma/metrics/userguide.rst226
-rw-r--r--docs/openstack/index.rst39
-rw-r--r--docs/release/release-notes/release-notes.rst122
-rw-r--r--docs/testing/developer/devguide/design/vswitchperf_design.rst16
-rw-r--r--docs/testing/developer/devguide/index.rst6
-rw-r--r--docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst2
-rw-r--r--docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst16
-rw-r--r--docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst20
-rw-r--r--docs/testing/developer/devguide/results/scenario.rst2
-rw-r--r--docs/testing/user/configguide/index.rst4
-rw-r--r--docs/testing/user/configguide/installation.rst16
-rw-r--r--docs/testing/user/configguide/tools.rst52
-rw-r--r--docs/testing/user/configguide/trafficgen.rst22
-rw-r--r--docs/testing/user/userguide/index.rst1
-rw-r--r--docs/testing/user/userguide/testusage.rst4
-rw-r--r--docs/xtesting/index.rst85
-rwxr-xr-xdocs/xtesting/vsperf-xtesting.pngbin0 -> 93202 bytes
-rw-r--r--pods/__init__.py19
-rw-r--r--pods/papi/__init__.py19
-rw-r--r--pods/papi/papi.py143
-rw-r--r--pods/pod/__init__.py18
-rw-r--r--pods/pod/pod.py63
-rw-r--r--requirements.txt26
-rwxr-xr-xsrc/dpdk/Makefile4
-rw-r--r--src/dpdk/testpmd_proc.py6
-rw-r--r--src/package-list.mk10
-rw-r--r--src/trex/Makefile4
-rw-r--r--systems/README.md4
-rwxr-xr-xsystems/build_base_machine.sh31
-rwxr-xr-xsystems/centos/build_base_machine.sh4
-rwxr-xr-xsystems/centos/prepare_python_env.sh4
-rwxr-xr-xsystems/debian/build_base_machine.sh39
-rwxr-xr-xsystems/debian/prepare_python_env.sh28
-rwxr-xr-xsystems/rhel/7.2/build_base_machine.sh10
-rwxr-xr-xsystems/rhel/7.2/prepare_python_env.sh4
-rwxr-xr-xsystems/rhel/7.3/build_base_machine.sh10
-rwxr-xr-xsystems/rhel/7.3/prepare_python_env.sh4
-rwxr-xr-xsystems/rhel/7.5/build_base_machine.sh10
-rwxr-xr-xsystems/rhel/7.5/prepare_python_env.sh4
-rwxr-xr-xsystems/ubuntu/14.04/build_base_machine.sh2
-rw-r--r--testcases/__init__.py1
-rw-r--r--testcases/k8s_performance.py39
-rw-r--r--testcases/testcase.py57
-rwxr-xr-xtools/collectors/cadvisor/__init__.py17
-rw-r--r--tools/collectors/cadvisor/cadvisor.py218
-rwxr-xr-xtools/collectors/multicmd/__init__.py17
-rw-r--r--tools/collectors/multicmd/multicmd.py138
-rw-r--r--tools/confgenwizard/__init__.py0
-rw-r--r--tools/confgenwizard/nicinfo.py236
-rw-r--r--tools/confgenwizard/vsperfwiz.py736
-rw-r--r--tools/docker/client/__init__.py1
-rw-r--r--tools/docker/client/vsperf_client.py771
-rw-r--r--tools/docker/client/vsperfclient.conf39
-rw-r--r--tools/docker/deployment/auto/controller/Dockerfile23
-rw-r--r--tools/docker/deployment/auto/controller/list.env14
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/collectd.conf49
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml20
-rw-r--r--tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py392
-rw-r--r--tools/docker/deployment/auto/docker-compose.yml22
-rw-r--r--tools/docker/deployment/interactive/controller/Dockerfile21
-rw-r--r--tools/docker/deployment/interactive/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py360
-rw-r--r--tools/docker/deployment/interactive/docker-compose.yml21
-rw-r--r--tools/docker/docs/architecture.txt70
-rw-r--r--tools/docker/docs/client.rst99
-rw-r--r--tools/docker/docs/test.rst86
-rw-r--r--tools/docker/libs/proto/__init__.py1
-rwxr-xr-xtools/docker/libs/proto/vsperf.proto109
-rw-r--r--tools/docker/libs/utils/__init__.py1
-rw-r--r--tools/docker/libs/utils/exceptions.py65
-rw-r--r--tools/docker/libs/utils/ssh.py546
-rw-r--r--tools/docker/libs/utils/utils.py41
-rwxr-xr-xtools/docker/prepare.sh33
-rw-r--r--tools/docker/results/README.md48
-rw-r--r--tools/docker/results/docker-compose.yml80
-rw-r--r--tools/docker/results/grafana/dashboards/container_metrics_dashboard.json1291
-rw-r--r--tools/docker/results/jupyter/Dockerfile16
-rw-r--r--tools/docker/results/logstash/pipeline/02-beats-input.conf6
-rw-r--r--tools/docker/results/logstash/pipeline/20-collectd-input.conf14
-rw-r--r--tools/docker/results/logstash/pipeline/30-output.conf7
-rw-r--r--tools/docker/results/notebooks/testresult-analysis.ipynb783
-rw-r--r--tools/docker/results/resultsdb/cases.json1
-rw-r--r--tools/docker/results/resultsdb/init_db.py110
-rw-r--r--tools/docker/results/resultsdb/pods.json382
-rw-r--r--tools/docker/results/resultsdb/projects.json8
-rw-r--r--tools/docker/testcontrol/auto/controller/Dockerfile23
-rw-r--r--tools/docker/testcontrol/auto/controller/list.env13
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf21
-rw-r--r--tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py469
-rw-r--r--tools/docker/testcontrol/auto/docker-compose.yml22
-rw-r--r--tools/docker/testcontrol/interactive/controller/Dockerfile22
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/__init__.py1
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/output.txt1
-rw-r--r--tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py706
-rw-r--r--tools/docker/testcontrol/interactive/docker-compose.yml20
-rw-r--r--tools/docker/vsperf/Dockerfile37
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/.ansible-lint3
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/README.md60
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/ansible.cfg9
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/hosts5
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml4
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml28
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml20
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml606
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml251
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml101
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml47
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml127
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml46
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml22
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml30
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml8
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml17
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml14
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml11
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml12
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml26
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml13
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml10
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml83
-rw-r--r--tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml15
-rw-r--r--tools/lma/ansible-client/ansible.cfg17
-rw-r--r--tools/lma/ansible-client/hosts2
-rw-r--r--tools/lma/ansible-client/playbooks/clean.yaml25
-rw-r--r--tools/lma/ansible-client/playbooks/setup.yaml28
-rw-r--r--tools/lma/ansible-client/roles/clean-collectd/main.yml44
-rw-r--r--tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml28
-rw-r--r--tools/lma/ansible-client/roles/collectd/files/collectd.conf.j244
-rw-r--r--tools/lma/ansible-client/roles/collectd/tasks/main.yml60
-rw-r--r--tools/lma/ansible-client/roles/td-agent/files/td-agent.conf63
-rw-r--r--tools/lma/ansible-client/roles/td-agent/tasks/main.yml30
-rw-r--r--tools/lma/ansible-server/ansible.cfg17
-rw-r--r--tools/lma/ansible-server/group_vars/all.yml27
-rw-r--r--tools/lma/ansible-server/hosts12
-rw-r--r--tools/lma/ansible-server/playbooks/clean.yaml52
-rw-r--r--tools/lma/ansible-server/playbooks/setup.yaml44
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml34
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml65
-rw-r--r--tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml26
-rw-r--r--tools/lma/ansible-server/roles/clean-logging/tasks/main.yml193
-rw-r--r--tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml48
-rw-r--r--tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml44
-rw-r--r--tools/lma/ansible-server/roles/k8s-master/tasks/main.yml49
-rw-r--r--tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml72
-rw-r--r--tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml24
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml48
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml68
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml132
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml76
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml231
-rw-r--r--tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml23
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml525
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml34
-rw-r--r--tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml65
-rw-r--r--tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml23
-rw-r--r--tools/lma/ansible-server/roles/logging/files/namespace.yaml17
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml36
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml68
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml28
-rw-r--r--tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml58
-rw-r--r--tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml105
-rw-r--r--tools/lma/ansible-server/roles/logging/files/storageClass.yaml73
-rw-r--r--tools/lma/ansible-server/roles/logging/tasks/main.yml165
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml37
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml62
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml41
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml62
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml42
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml79
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml30
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml51
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml68
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml31
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml36
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml36
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml26
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml18
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml80
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml609
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml73
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml30
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml33
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml34
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml73
-rw-r--r--tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml35
-rw-r--r--tools/lma/ansible-server/roles/monitoring/tasks/main.yml273
-rw-r--r--tools/lma/ansible-server/roles/nfs/tasks/main.yml42
-rw-r--r--tools/lma/jupyter-notebooks/Causation-Analysis.ipynb784
-rw-r--r--tools/lma/logs/dockerfile/elastalert/Dockerfile23
-rw-r--r--tools/lma/logs/dockerfile/fluentd/Dockerfile23
-rw-r--r--tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb308
-rw-r--r--tools/lma/metrics/dashboard/cpu_usage_using.json750
-rw-r--r--tools/lma/metrics/dashboard/memory_using.json337
-rw-r--r--tools/lma/metrics/dashboard/ovs_stats_using.json854
-rw-r--r--tools/lma/metrics/dashboard/rdt_using.json833
-rw-r--r--tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb644
-rw-r--r--tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb913
-rw-r--r--tools/lma/yamllintrc25
-rw-r--r--tools/load_gen/stressorvm/stressor_vm.py41
-rw-r--r--tools/md-testvnf/config.json11
-rw-r--r--tools/md-testvnf/http/ks.cfg88
-rw-r--r--tools/md-testvnf/playbook.yml36
-rw-r--r--tools/md-testvnf/scripts/ansible.sh7
-rwxr-xr-xtools/md-testvnf/scripts/deploycentostools.sh364
-rw-r--r--tools/md-testvnf/scripts/sshConfig.sh10
-rw-r--r--tools/md-testvnf/testVNF_image.json72
-rw-r--r--tools/os_deploy_tgen/__init__.py17
-rw-r--r--tools/os_deploy_tgen/osclients/__init__.py17
-rw-r--r--tools/os_deploy_tgen/osclients/glance.py34
-rwxr-xr-xtools/os_deploy_tgen/osclients/heat.py156
-rw-r--r--tools/os_deploy_tgen/osclients/neutron.py34
-rw-r--r--tools/os_deploy_tgen/osclients/nova.py213
-rw-r--r--tools/os_deploy_tgen/osclients/openstack.py82
-rw-r--r--tools/os_deploy_tgen/osdt.py601
-rw-r--r--tools/os_deploy_tgen/templates/hotfiles.md13
-rw-r--r--tools/os_deploy_tgen/templates/l2.hot89
-rw-r--r--tools/os_deploy_tgen/templates/l2_1c_1i.yaml8
-rw-r--r--tools/os_deploy_tgen/templates/l2_1c_2i.yaml10
-rw-r--r--tools/os_deploy_tgen/templates/l2_2c_2i.yaml10
-rw-r--r--tools/os_deploy_tgen/templates/l2_old.hot93
-rw-r--r--tools/os_deploy_tgen/templates/l2fip.hot122
-rw-r--r--tools/os_deploy_tgen/templates/l2up.hot126
-rw-r--r--tools/os_deploy_tgen/templates/l3.hot125
-rw-r--r--tools/os_deploy_tgen/templates/l3_1c_2i.yaml11
-rw-r--r--tools/os_deploy_tgen/templates/l3_2c_2i.yaml11
-rw-r--r--tools/os_deploy_tgen/templates/scenario.yaml44
-rw-r--r--tools/os_deploy_tgen/utilities/__init__.py17
-rw-r--r--tools/os_deploy_tgen/utilities/utils.py183
-rwxr-xr-xtools/pkt_gen/ixnet/ixnet.py86
-rw-r--r--tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py177
-rw-r--r--tools/pkt_gen/testcenter/testcenter.py24
-rw-r--r--tools/pkt_gen/trex/trex_client.py (renamed from tools/pkt_gen/trex/trex.py)87
-rw-r--r--vnfs/qemu/qemu.py34
-rwxr-xr-xvsperf93
-rw-r--r--vswitches/ovs.py8
-rw-r--r--vswitches/vpp_dpdk_vhost.py22
-rw-r--r--xtesting/baremetal/Dockerfile36
-rw-r--r--xtesting/baremetal/exceptions.py65
-rw-r--r--xtesting/baremetal/requirements.txt2
-rw-r--r--xtesting/baremetal/setup.cfg10
-rw-r--r--xtesting/baremetal/setup.py9
-rw-r--r--xtesting/baremetal/site.yml13
-rw-r--r--xtesting/baremetal/ssh.py546
-rw-r--r--xtesting/baremetal/testcases.yaml16
-rw-r--r--xtesting/baremetal/utils.py41
-rw-r--r--xtesting/baremetal/vsperf.conf21
-rw-r--r--xtesting/baremetal/vsperf_controller.py194
-rw-r--r--xtesting/openstack/Dockerfile61
-rw-r--r--xtesting/openstack/cloud.rc10
-rw-r--r--xtesting/openstack/setup.cfg10
-rw-r--r--xtesting/openstack/setup.py9
-rw-r--r--xtesting/openstack/site.yml13
-rw-r--r--xtesting/openstack/testcases.yaml19
-rw-r--r--xtesting/openstack/vsperfostack.conf80
-rwxr-xr-xxtesting/openstack/vsperfostack.py85
298 files changed, 27284 insertions, 327 deletions
diff --git a/3rd_party/ixia/ixnetrfc2544.tcl b/3rd_party/ixia/ixnetrfc2544.tcl
index 435f335f..fbc05f95 100644
--- a/3rd_party/ixia/ixnetrfc2544.tcl
+++ b/3rd_party/ixia/ixnetrfc2544.tcl
@@ -43,7 +43,7 @@ lappend auto_path [list $lib_path]
# verify that the IXIA chassis spec is given
-set reqVars [list "machine" "port" "user" "chassis" "card" "port1" "port2" "output_dir" "bidir"]
+set reqVars [list "machine" "port" "user" "chassis_east" "card_east" "port_east" "chassis_west" "card_west" "port_west" "output_dir" "bidir" "frame_size_list"]
set rfc2544test ""
foreach var $reqVars {
@@ -59,6 +59,7 @@ foreach var $reqVars {
set ::IxNserver $machine
set ::IxNport $port
set ::biDirect $bidir
+set frameSizeList $frame_size_list
# change to windows path format and append directory
set output_dir [string map {"/" "\\"} $output_dir]
@@ -66,14 +67,17 @@ set output_dir "$output_dir\\rfctests"
puts "Output directory is $output_dir"
proc startRfc2544Test { testSpec trafficSpec } {
- # Start RFC2544 quicktest.
+ # Start RFC2544 quickte"$output_dir\\rfctests"st.
# Configure global variables. See documentation on 'global' for more
# information on why this is necessary
# https://www.tcl.tk/man/tcl8.5/tutorial/Tcl13.html
global rfc2544test
+ global qt
+ global frameSizeList
global sg_rfc2544throughput
global sg_rfc2544back2back
+ global output_dir
# Suffix for stack names
# This variable should be incremented after setting sg_stack like:
@@ -90,13 +94,16 @@ proc startRfc2544Test { testSpec trafficSpec } {
set duration [dict get $testSpec duration]
# check if only one tgen port is requested
- if {($::port1 == $::port2)} {
- set twoPorts 0
- set selfDestined True
- } else {
- set twoPorts 1
- set selfDestined False
- }
+ set twoPorts 1
+ set selfDestined False
+ if {($::chassis_east == $::chassis_west)} {
+ if {($::card_east == $::card_west)} {
+ if {($::port_east == $::port_west)} {
+ set twoPorts 0
+ set selfDestined True
+ }}
+ }
+
# RFC2544 to IXIA terminology mapping (it affects Ixia configuration inside this script):
# Test => Trial
@@ -163,30 +170,18 @@ proc startRfc2544Test { testSpec trafficSpec } {
set trafficSpec_vlan [dict get $trafficSpec vlan]
set frameSize [dict get $trafficSpec_l2 framesize]
- set srcMac [dict get $trafficSpec_l2 srcmac]
+ set srcMac [dict get $trafficSpec_l2 srcmac]
set dstMac [dict get $trafficSpec_l2 dstmac]
+ set srcPort [dict get $trafficSpec_l4 srcport]
+ set dstPort [dict get $trafficSpec_l4 dstport]
set proto [dict get $trafficSpec_l3 proto]
set srcIp [dict get $trafficSpec_l3 srcip]
set dstIp [dict get $trafficSpec_l3 dstip]
+ set vlanEnabled [dict get $trafficSpec_vlan enabled]
+ set l3Enabled [dict get $trafficSpec_l3 enabled]
+ set l4Enabled [dict get $trafficSpec_l4 enabled]
- set srcPort [dict get $trafficSpec_l4 srcport]
- set dstPort [dict get $trafficSpec_l4 dstport]
-
- set l3Enabled [dict get $trafficSpec_l3 enabled]
- set l4Enabled [dict get $trafficSpec_l4 enabled]
- set vlanEnabled [dict get $trafficSpec_vlan enabled]
-
- if {$vlanEnabled == 1 } {
- # these keys won't exist if vlan wasn't enabled
- set vlanId [dict get $trafficSpec_vlan id]
- set vlanUserPrio [dict get $trafficSpec_vlan priority]
- set vlanCfi [dict get $trafficSpec_vlan cfi]
- } else {
- set vlanId 0
- set vlanUserPrio 0
- set vlanCfi 0
- }
if {$frameSize < 68 } {
if {$rfc2544TestType == "back2back"} {
@@ -281,7 +276,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-csvLogPollIntervalMultiplier 1 \
-pollInterval 2 \
-guardrailEnabled True \
- -enableCsvLogging False \
+ -enableCsvLogging False\
-dataStorePollingIntervalMultiplier 1 \
-maxNumberOfStatsPerCustomGraph 16 \
-additionalFcoeStat1 fcoeInvalidDelimiter \
@@ -373,7 +368,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-useDefaultRootPath False \
-outputRootPath $::output_dir
sg_commit
- set sg_top [lindex [ixNet remapIds $sg_top] 0]
+ #set sg_top [lindex [ixNet remapIds $sg_top] 0]
set ixNetSG_Stack(0) $sg_top
###
@@ -1154,21 +1149,30 @@ proc startRfc2544Test { testSpec trafficSpec } {
-masterChassis {} \
-sequenceId 1 \
-cableLength 0 \
- -hostname $::chassis
+ -hostname $::chassis_east
+ sg_commit
+ set sg_chassis1 [ixNet add $ixNetSG_Stack(0)/availableHardware chassis]
+ ixNet setMultiAttrs $sg_chassis1 \
+ -masterChassis {} \
+ -sequenceId 2 \
+ -cableLength 0 \
+ -hostname $::chassis_west
sg_commit
set sg_chassis [lindex [ixNet remapIds $sg_chassis] 0]
set ixNetSG_Stack(1) $sg_chassis
+ set sg_chassis1 [lindex [ixNet remapIds $sg_chassis1] 0]
+ set ixNetSG_Stack(4) $sg_chassis1
#
- # configuring the object that corresponds to /availableHardware/chassis/card
+ # configuring the object that corresponds to /availableHardware/chassis/card_east
#
- set sg_card $ixNetSG_Stack(1)/card:$::card
- ixNet setMultiAttrs $sg_card \
+ set sg_card_east $ixNetSG_Stack(1)/card:$::card_east
+ ixNet setMultiAttrs $sg_card_east \
-aggregationMode normal
sg_commit
- set sg_card [lindex [ixNet remapIds $sg_card] 0]
- set ixNetSG_ref(19) $sg_card
- set ixNetSG_Stack(2) $sg_card
+ set sg_card_east [lindex [ixNet remapIds $sg_card_east] 0]
+ set ixNetSG_ref(19) $sg_card_east
+ set ixNetSG_Stack(2) $sg_card_east
#
# configuring the object that corresponds to /availableHardware/chassis/card/aggregation:1
@@ -1206,11 +1210,24 @@ proc startRfc2544Test { testSpec trafficSpec } {
sg_commit
set sg_aggregation [lindex [ixNet remapIds $sg_aggregation] 0]
ixNet setMultiAttrs $ixNetSG_ref(2) \
- -connectedTo $ixNetSG_ref(19)/port:$::port1
+ -connectedTo $ixNetSG_ref(19)/port:$::port_east
sg_commit
+
+ #
+ # configuring the object that corresponds to /availableHardware/chassis/card_west
+ #
+ puts "ixNetSG_Stack(4) is $ixNetSG_Stack(4)"
+ set sg_card_west $ixNetSG_Stack(4)/card:$::card_west
+ ixNet setMultiAttrs $sg_card_west \
+ -aggregationMode normal
+ sg_commit
+ set sg_card_west [lindex [ixNet remapIds $sg_card_west] 0]
+ set ixNetSG_ref(20) $sg_card_west
+ set ixNetSG_Stack(4) $sg_card_west
+
if {$twoPorts} {
ixNet setMultiAttrs $ixNetSG_ref(10) \
- -connectedTo $ixNetSG_ref(19)/port:$::port2
+ -connectedTo $ixNetSG_ref(20)/port:$::port_west
sg_commit
}
sg_commit
@@ -1353,7 +1370,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-destinationMacMode manual
ixNet setMultiAttrs $sg_configElement/frameSize \
-weightedPairs {} \
- -fixedSize 64 \
+ -fixedSize $frameSizeList \
-incrementFrom 64 \
-randomMin 64 \
-randomMax 1518 \
@@ -3080,7 +3097,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
ixNet setMultiAttrs $sg_tracking \
-offset 0 \
-oneToOneMesh False \
- -trackBy {} \
+ -trackBy {trackingenabled0} \
-values {} \
-fieldWidth thirtyTwoBits \
-protocolOffset {Root.0}
@@ -6276,12 +6293,16 @@ proc startRfc2544Test { testSpec trafficSpec } {
#
if {$rfc2544TestType == "throughput"} {
set sg_rfc2544throughput [ixNet add $ixNetSG_Stack(0)/quickTest rfc2544throughput]
+ ixNet commit
ixNet setMultiAttrs $sg_rfc2544throughput \
-name {QuickTest1} \
-mode existingMode \
-inputParameters {{}}
+ ixNet commit
+ set sizes [join $frameSizeList ","]
+ set sg_rfc2544throughput [lindex [ixNet remapIds $sg_rfc2544throughput] 0]
ixNet setMultiAttrs $sg_rfc2544throughput/testConfig \
- -protocolItem {} \
+ -protocolItem [list ] \
-enableMinFrameSize True \
-framesize $frameSize \
-reportTputRateUnit mbps \
@@ -6293,7 +6314,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-tolerance 0 \
-frameLossUnit {0} \
-staggeredStart False \
- -framesizeList $frameSize \
+ -framesizeList $sizes \
-frameSizeMode custom \
-rateSelect percentMaxRate \
-percentMaxRate 100 \
@@ -6318,7 +6339,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-txDelay 2 \
-delayAfterTransmit 2 \
-minRandomFrameSize 64 \
- -maxRandomFrameSize 1518 \
+ -maxRandomFrameSize 128 \
-countRandomFrameSize 1 \
-minIncrementFrameSize 64 \
-stepIncrementFrameSize 64 \
@@ -6415,9 +6436,9 @@ proc startRfc2544Test { testSpec trafficSpec } {
-dataErrorThresholdValue 0 \
-dataErrorThresholdMode average
sg_commit
+ ixNet commit
set sg_rfc2544throughput [lindex [ixNet remapIds $sg_rfc2544throughput] 0]
set ixNetSG_Stack(1) $sg_rfc2544throughput
-
#
# configuring the object that corresponds to /quickTest/rfc2544throughput:1/protocols
#
@@ -6438,6 +6459,12 @@ proc startRfc2544Test { testSpec trafficSpec } {
-includeMode inTest \
-itemType trafficItem
sg_commit
+
+ #
+ # configuring the results folder that corresponds to /quickTest/rfc2544throughput:1
+ #
+ ixNet setAttr $sg_rfc2544throughput -resultPath $output_dir
+ ixNet commit
set sg_trafficSelection [lindex [ixNet remapIds $sg_trafficSelection] 0]
ixNet commit
@@ -6466,7 +6493,7 @@ proc startRfc2544Test { testSpec trafficSpec } {
-tolerance 0 \
-frameLossUnit {0} \
-staggeredStart False \
- -framesizeList $frameSize \
+ -framesizeList [list $frameSize] \
-frameSizeMode custom \
-rateSelect percentMaxRate \
-percentMaxRate 100 \
@@ -6611,14 +6638,74 @@ proc startRfc2544Test { testSpec trafficSpec } {
}
ixNet exec apply $rfc2544test
after 5000
-
#
# starting the RFC2544 Throughput test
#
puts "Starting test..."
ixNet exec start $rfc2544test
+ puts "Checking if [ixNet getA $rfc2544test -name] started...."
+ set count 0
+ while { [ixNet getA $rfc2544test/results -isRunning] eq false } {
+ after 1000
+ if { $count > 60 } { error "QT failed to start after 1 minute" }
+ incr count
+ }
+ puts "Looking for statistics"
+ set results_file_name "Traffic Item Statistics"
+ set results_file_path [getResultFile $results_file_name]
+ return $results_file_path
+}
+
+proc getResultFile { viewName } {
+ global output_dir
+ puts "Sleeping 20 seconds to have $viewName view"
+ after 20000
+ set root [ixNet getRoot]
+ set views [ixNet getList $root/statistics view]
+ foreach view $views {
+ if { [ixNet getA $view -caption] eq $viewName } {
+ set trafficView $view
+ break
+ }
+ }
+ puts "Checking that the $viewName view is ready"
+ set count 0
+ while { [ixNet getA $trafficView/data -isReady] eq false } {
+ after 1000
+ if { $count > 2 } { break }
+ incr count
+ }
+ puts "Success! $viewName view is ready! "
+ puts "Changing the CSV path"
+ set setAttr [ixNet setA $root/statistics -csvFilePath $output_dir]
+ if { $setAttr != "::ixNet::OK"} {
+ error "Error"
+ }
+ ixNet commit
+ puts "Enabling CSV logging"
+ set setAttr [ixNet setA $trafficView -enableCsvLogging True]
+ if { $setAttr != "::ixNet::OK"} {
+ error "Error"
+ }
+ ixNet commit
+ puts "Enabled CSV logging"
+ puts "Getting CSV file name for $trafficView view"
+ set csv_path [ixNet getA $root/statistics -csvFilePath]
+ set csv_name [ixNet getA $trafficView -csvFileName]
+ ixNet commit
+ return [file join $csv_path $csv_name]
}
+proc copyFileResults { sourceFile destFile } {
+ puts "Coping the file $sourceFile to $destFile..."
+ set source [dict get $sourceFile source_file]
+ set dest [dict get $destFile dest_file]
+ if {[catch {ixNet exec copyFile [ixNet readFrom "$source" -ixNetRelative] [ixNet writeTo "$dest" -overwrite]} errMsg]} {
+ error "Error while copying results : '$errMsg'"
+ }
+}
+
+
proc waitForRfc2544Test { } {
# Wait for- and return results of- RFC2544 quicktest.
@@ -6626,7 +6713,14 @@ proc waitForRfc2544Test { } {
puts "Waiting for test to complete..."
set result [ixNet exec waitForTest $rfc2544test]
+ puts "Checking if [ixNet getA $rfc2544test -name] stopped"
+ set count 0
+ while { [ixNet getA $rfc2544test/results -isRunning] eq true } {
+ after 1000
+ if { $count > 60 } { error "QT failed to stop after 1 minute it finished" }
+ incr count
+ }
puts "Finished Test"
return "$result"
-}
+} \ No newline at end of file
diff --git a/conf/00_common.conf b/conf/00_common.conf
index a846fb55..c3579014 100644
--- a/conf/00_common.conf
+++ b/conf/00_common.conf
@@ -131,6 +131,9 @@ CUMULATIVE_PARAMS = False
# For example: 'throughput_rx_mbps', 'throughput_rx_fps', 'avg_latency_ns'
MATRIX_METRIC = 'throughput_rx_fps'
+# OPNFVPOD specification.
+OPNFVPOD = ''
+
# ############################
# Modules
# ############################
@@ -139,7 +142,7 @@ MATRIX_METRIC = 'throughput_rx_fps'
# it can be used to suppress automatic load of obsoleted or abstract modules
# Example:
# EXCLUDE_MODULES = ['ovs_vanilla', 'qemu_virtio_net', 'pidstat']
-EXCLUDE_MODULES = ["testcenter-rfc2544-throughput"]
+EXCLUDE_MODULES = ["testcenter-rfc2544-throughput", "vsperf_controller", "vsperf_pb2", "vsperf_client", "vsperf_pb2_grpc"]
# ############################
# Vsperf Internal Options
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index 84ef71cb..4eca1a52 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -115,7 +115,7 @@ PATHS['vswitch'] = {
'path': os.path.join(ROOT_DIR, 'src/vpp/vpp/build-root/install-vpp-native/vpp'),
'vpp': 'bin/vpp',
'vppctl': 'bin/vppctl',
- 'vpp_plugin_path' : 'lib64/vpp_plugins',
+ 'vpp_plugin_path' : 'lib/vpp_plugins',
},
'bin': {
'vpp': 'vpp',
@@ -224,7 +224,7 @@ OVS_ROUTING_TABLES = False
#########################
# Set of arguments used for startup of VPP
# NOTE: DPDK socket mem allocation is driven by parameter DPDK_SOCKET_MEM
-VSWITCH_VPP_CLI_SOCK = ''
+VSWITCH_VPP_CLI_SOCK = '/run/vpp/cli.sock'
VSWITCH_VPP_ARGS = {
'unix' : [
'interactive', # required by VSPERF to detect successful VPP startup
@@ -236,6 +236,9 @@ VSWITCH_VPP_ARGS = {
'workers 2',
'corelist-workers 4,5',
],
+ 'socksvr' : [
+ 'socket-name /run/vpp-api.sock',
+ ],
}
# log file for VPP
diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf
index 486ab2c8..01747a38 100644
--- a/conf/03_traffic.conf
+++ b/conf/03_traffic.conf
@@ -209,6 +209,20 @@ LOG_FILE_TRAFFIC_GEN = 'traffic-gen.log'
# 'enabled' - Specifies if the histogram provisioning is enabled or not.
# 'type' - Defines how histogram is provided. Currenty only 'Default' is defined.
# 'Default' - Default histogram as provided by the Traffic-generator.
+# 'imix' - A dictionary for IMIX Specification.
+# 'enabled' - Specifies if IMIX is enabled or NOT.
+# 'type' - The specification type - denotes how IMIX is specified.
+# Currently only 'genome' type is defined.
+# Other types (ex: table-of-proportions) can be added in future.
+# 'genome' - The Genome Encoding of Pkt-Sizes and Ratio for IMIX.
+# The ratio is inferred from the number of particular geneome characters.
+# Genome encoding is described in RFC 6985. This specification is closest
+# to the method described in section 6.2 of RFC 6985.
+# Ex: 'aaaaaaaddddg' denotes ratio of 7:4:1 of packets sizes 64:512:1518.
+# Note: Exact-sequence is not maintained, only the ratio of packets
+# is ensured.
+# Data type: str
+# Default Value: 'aaaaaaaddddg'
TRAFFIC = {
'traffic_type' : 'rfc2544_throughput',
'frame_rate' : 100,
@@ -264,6 +278,11 @@ TRAFFIC = {
'enabled': False,
'type': 'Default',
},
+ 'imix': {
+ 'enabled': False,
+ 'type': 'genome',
+ 'genome': 'aaaaaaaddddg',
+ },
}
#path to traffic generators directory.
@@ -453,6 +472,12 @@ TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR = ""
# Print additional information to the terminal during the test
TRAFFICGEN_STC_VERBOSE = "True"
+# Live Results Required?
+TRAFFICGEN_STC_LIVE_RESULTS = "True"
+
+# Live results file name
+TRAFFICGEN_STC_LIVERESULTS_FILE = "stc-liveresults.dat"
+
# Spirent TestCenter Configuration -- END
#########################################
@@ -545,12 +570,16 @@ TRAFFICGEN_TREX_PROMISCUOUS = False
# side when pushing traffic. For 40G use 40000. For 25G use 25000.
TRAFFICGEN_TREX_FORCE_PORT_SPEED = False
TRAFFICGEN_TREX_PORT_SPEED = 10000 # 10G
+TRAFFICGEN_TREX_LIVE_RESULTS = True
+TRAFFICGEN_TREX_LC_FILE = "trex-liveresults-counts.dat"
+TRAFFICGEN_TREX_LE_FILE = "trex-liveresults-errors.dat"
+
PATHS['trafficgen'] = {
'Trex': {
'type' : 'src',
'src': {
- 'path': os.path.join(ROOT_DIR, 'src/trex/trex/scripts/automation/trex_control_plane/stl')
+ 'path': os.path.join(ROOT_DIR, 'src/trex/trex/scripts/automation/trex_control_plane/interactive')
}
}
}
diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf
index 234f11b6..1574ca8d 100644
--- a/conf/04_vnf.conf
+++ b/conf/04_vnf.conf
@@ -87,8 +87,9 @@ GUEST_TIMEOUT = [180]
# Guest images may require different drive types such as ide to mount shared
# locations and/or boot correctly. You can modify the types here.
-GUEST_BOOT_DRIVE_TYPE = ['scsi']
-GUEST_SHARED_DRIVE_TYPE = ['scsi']
+# Default setting to ide to support qemu version 3.1.1.
+GUEST_BOOT_DRIVE_TYPE = ['ide']
+GUEST_SHARED_DRIVE_TYPE = ['ide']
# guest loopback application method; supported options are:
# 'testpmd' - testpmd from dpdk will be built and used
@@ -130,10 +131,13 @@ GUEST_PROMPT = ['root.*#']
GUEST_NICS_NR = [2]
# template for guests with 4 NICS, but only GUEST_NICS_NR NICS will be configured at runtime
-GUEST_NICS = [[{'device' : 'eth0', 'mac' : '#MAC(00:00:00:00:00:01,2)', 'pci' : '00:04.0', 'ip' : '#IP(192.168.1.2,4)/24'},
- {'device' : 'eth1', 'mac' : '#MAC(00:00:00:00:00:02,2)', 'pci' : '00:05.0', 'ip' : '#IP(192.168.1.3,4)/24'},
- {'device' : 'eth2', 'mac' : '#MAC(cc:00:00:00:00:01,2)', 'pci' : '00:06.0', 'ip' : '#IP(192.168.1.4,4)/24'},
- {'device' : 'eth3', 'mac' : '#MAC(cc:00:00:00:00:02,2)', 'pci' : '00:07.0', 'ip' : '#IP(192.168.1.5,4)/24'},
+# With qemu verison 3.1.1 the PCI assignments are starting from 00.03.0.
+# TODO: Need a better approach for pci configuration. Currently its based on what qemu-system-x86_64 assigns.
+# One option is to use the pci configuration as one of the parameters of the qemu-system-x86_64 command.
+GUEST_NICS = [[{'device' : 'eth0', 'mac' : '#MAC(00:00:00:00:00:01,2)', 'pci' : '00:03.0', 'ip' : '#IP(192.168.1.2,4)/24'},
+ {'device' : 'eth1', 'mac' : '#MAC(00:00:00:00:00:02,2)', 'pci' : '00:04.0', 'ip' : '#IP(192.168.1.3,4)/24'},
+ {'device' : 'eth2', 'mac' : '#MAC(cc:00:00:00:00:01,2)', 'pci' : '00:05.0', 'ip' : '#IP(192.168.1.4,4)/24'},
+ {'device' : 'eth3', 'mac' : '#MAC(cc:00:00:00:00:02,2)', 'pci' : '00:06.0', 'ip' : '#IP(192.168.1.5,4)/24'},
]]
# amount of host memory allocated for each guest
@@ -208,11 +212,18 @@ GUEST_BRIDGE_IP = ['#IP(1.1.1.5)/16']
# Note: Testpmd must be executed in interactive mode. It means, that
# VSPERF won't work correctly if '-i' will be removed.
GUEST_TESTPMD_PARAMS = ['-c 0x3 -n 4 --socket-mem 512 -- '
- '--burst=64 -i --txqflags=0xf00 '
- '--disable-hw-vlan']
+ '--burst=64 -i ']
# packet forwarding mode supported by testpmd; Please see DPDK documentation
# for comprehensive list of modes supported by your version.
# e.g. io|mac|mac_retry|macswap|flowgen|rxonly|txonly|csum|icmpecho|...
# Note: Option "mac_retry" has been changed to "mac retry" since DPDK v16.07
GUEST_TESTPMD_FWD_MODE = ['csum']
+
+# map queue stats to separate regs to verify MQ functionality
+# setting this from testpmd command line prameters since DPDK 18.11 does not
+# work as expected so we have to set this inside testpmd i.e. to set rx queue
+# 2 on port 0 to mapping 5 add: "rx 0 2 5"
+# Please see DPDK documentation to get more information how to set stat_qmap
+# (https://doc.dpdk.org/guides/testpmd_app_ug/testpmd_funcs.html)
+GUEST_QUEUE_STATS_MAPPING = []
diff --git a/conf/05_collector.conf b/conf/05_collector.conf
index a1bb41f8..882ef414 100644
--- a/conf/05_collector.conf
+++ b/conf/05_collector.conf
@@ -55,3 +55,28 @@ COLLECTD_INTERFACE_XKEYS = ['docker', 'lo']
# Provide individual core-ids or range of core-ids.
# The range is specified using '-'
COLLECTD_INTELRDT_XKEYS = [ ]
+
+###############################################
+# Multi Command Collector Configurations
+###############################################
+MC_COLLECTD_CSV = '/tmp/csv/'
+MC_COLLECTD_CMD = '/opt/collectd/sbin/collectd'
+MC_PROX_HOME = '/home/opnfv/irq/'
+MC_PROX_CMD = './runrapid.py'
+MC_PROX_OUT = 'RUNirq.irq.log'
+MC_CRON_OUT = '/tmp/ovs-cores.log'
+MC_BEAT_CFILE = '/etc/filebeat/filebeat.yml'
+
+###############################################
+# Cadvisor Specific configuration
+###############################################
+
+LOG_FILE_CADVISOR = 'cadvisor'
+CADVISOR_STORAGE_DRIVER = 'stdout,influxdb'
+# ip:port of influxdb
+CADVISOR_STORAGE_HOST = '10.10.120.22:8086'
+CADVISOR_DRIVER_DB = '_internal'
+# names of all containers to calcualte results
+#CADVISOR_CONTAINERS = ['container1name','container2name']
+CADVISOR_CONTAINERS = []
+
diff --git a/conf/10_custom.conf b/conf/10_custom.conf
index 0e274aab..99600966 100644
--- a/conf/10_custom.conf
+++ b/conf/10_custom.conf
@@ -147,6 +147,9 @@ TRAFFICGEN_TREX_VERIFICATION_MODE = False
TRAFFICGEN_TREX_VERIFICATION_DURATION = 60
TRAFFICGEN_TREX_MAXIMUM_VERIFICATION_TRIALS = 10
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 0
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = False
+
# TREX Configuration and Connection Info-- END
####################################################
diff --git a/conf/11_openstack.conf b/conf/11_openstack.conf
new file mode 100644
index 00000000..6be65228
--- /dev/null
+++ b/conf/11_openstack.conf
@@ -0,0 +1,43 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file describes a list of parameters used for deploying a TGEN,
+# on Openstack.
+
+
+DEFAULT_POLLING_INTERVAL = 10
+SCENARIOS = ['templates/l2_2c_2i.yaml']
+
+SCHEMA = 'templates/scenario.yaml'
+
+OS_AUTH_URL="http://10.10.180.21/identity"
+OS_PROJECT_ID="0440a230a799460facec0d09dde64497"
+OS_PROJECT_NAME="admin"
+OS_USER_DOMAIN_NAME="Default"
+OS_PROJECT_DOMAIN_ID="default"
+OS_USERNAME="admin"
+OS_PASSWORD="admin123"
+OS_REGION_NAME="RegionOne"
+OS_INTERFACE="public"
+OS_IDENTITY_API_VERSION=3
+OS_INSECURE=False
+OS_CA_CERT= 'None'
+
+STACK_NAME = 'testvnf_vsperf'
+CLEANUP_ON_EXIT = True
+
+FLAVOR_NAME = 'm1.large'
+IMAGE_NAME = 'bionic'
+EXTERNAL_NET = 'public'
+DNS_NAMESERVERS = ['8.8.8.8', '8.8.4.4']
diff --git a/conf/12_k8s.conf b/conf/12_k8s.conf
new file mode 100644
index 00000000..5cfac966
--- /dev/null
+++ b/conf/12_k8s.conf
@@ -0,0 +1,41 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Information about the Master Node.
+
+POD_DIR=os.path.join(ROOT_DIR, 'pods/')
+POD='Papi'
+
+MASTER_IP = '10.10.120.22'
+MASTER_LOGIN = 'opnfv'
+MASTER_PASSWD = 'opnfv'
+
+K8S_CONFIG_FILEPATH = '/home/opnfv/sridhar/k8sconfig'
+
+# Information about the Worker Node. Default is Localhost.
+WORKER_IP = '10.10.120.21'
+WORKER_LOGIN = 'opnfv'
+WORKER_PASSWD = 'opnfv'
+
+
+# Plugin to use.
+PLUGIN = 'ovsdpdk'
+
+# Paths. Default location: Master Node.
+NETWORK_ATTACHMENT_FILEPATH = ['/home/opnfv/sridhar/cnb/userspace/ovsdpdk/userspace-ovs-netAttach.yaml']
+POD_MANIFEST_FILEPATH = '/home/opnfv/sridhar/cnb/userspace/ovsdpdk/userspace-ovs-netapp-pod.yaml'
+
+
+# Application pod
+APP_NAME = 'l2fwd'
+
diff --git a/conf/__init__.py b/conf/__init__.py
index 83c5475f..7f6c1912 100644
--- a/conf/__init__.py
+++ b/conf/__init__.py
@@ -108,6 +108,13 @@ class Settings(object):
raise AttributeError("%r object has no attribute %r" %
(self.__class__, attr))
+ def hasValue(self, attr):
+ """Return true if key exists
+ """
+ if attr in self.__dict__:
+ return True
+ return False
+
def __setattr__(self, name, value):
"""Set a value
"""
@@ -256,6 +263,9 @@ class Settings(object):
Expand VM option with given key for given number of VMs
"""
tmp_value = self.getValue(key)
+ # skip empty/not set value
+ if not tmp_value:
+ return
if isinstance(tmp_value, str):
scalar = True
master_value = tmp_value
diff --git a/conf/integration/01b_dpdk_regression_tests.conf b/conf/integration/01b_dpdk_regression_tests.conf
index abc56c28..44343d28 100644
--- a/conf/integration/01b_dpdk_regression_tests.conf
+++ b/conf/integration/01b_dpdk_regression_tests.conf
@@ -21,6 +21,10 @@
# Generic configuration used by OVSDPDK testcases
#
############################################################
+
+# required to import path to the log file
+from conf import settings
+
_OVSDPDK_1st_PMD_CORE = 4
_OVSDPDK_2nd_PMD_CORE = 5
# calculate PMD mask from core IDs configured above
@@ -32,8 +36,11 @@ _OVSDPDK_GUEST_5_CORES = [('7', '8', '9', '10', '11')]
# number of queues configured in OVS and GUEST
_OVSDPDK_MQ = '2'
-# Path to the log file
-_OVSDPDK_VSWITCH_LOG = os.path.join(LOG_DIR, LOG_FILE_VSWITCHD)
+# path to the log file
+_RESULTS_PATH = settings.getValue('RESULTS_PATH')
+name, ext = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
+log_file = "{name}_{uid}{ex}".format(name=name,uid=settings.getValue('LOG_TIMESTAMP'),ex=ext)
+_OVSDPDK_VSWITCH_LOG = os.path.join(_RESULTS_PATH, log_file)
_OVSDPDK_HEADER_LEN = 18 # length of frame headers in bytes, it's used for calculation
# of payload size, i.e. payload = frame_size - header_len
@@ -172,59 +179,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['tools', 'assert', 'not len(#STEP[-1])'],
]
},
- {
- # Support of netdev-dpdk/detach has been removed from OVS, so testcase will fail with recent
- # OVS/DPDK versions. There is an ongoing discussion about possible support of netdev-dpdk/detach
- # in the future OVS versions.
- # Test has been tested with:
- # OVS_TAG = 03d6399e618e4136c5da0be2b6f18f0b7d75b2bb
- # DPDK_TAG = v16.11
- "Name": "ovsdpdk_hotplug_detach",
- "Deployment": "clean",
- "Description": "Same as ovsdpdk_hotplug_attach, but delete and detach the device after the hotplug. "
- "Note: Support of netdev-dpdk/detach has been removed from OVS, so testcase will fail "
- "with recent OVS/DPDK versions.",
- "vSwitch" : "OvsDpdkVhost",
- "Parameters" : {
- # suppress DPDK configuration, so physical interfaces are not bound to DPDK driver
- 'WHITELIST_NICS' : [],
- 'NICS' : [],
- },
- "TestSteps": [
- # check if OVS supports netdev-dpdk/detach, fail otherwise
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] list-commands', '|netdev-dpdk\/detach'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # restore original NICS configuration, so we can use add/del_phy_port
- ['settings', 'setValue', 'TEST_PARAMS', ''],
- # find out which DPDK driver is being used; it should be the last configured
- # DPDK module; optional path and .ko suffix must be removed
- ['tools', 'eval', '\'$TOOLS["dpdk_modules"][-1]\'.split("/")[-1].split(".")[0]'],
- # bind NIC to DPDK driver
- ['tools', 'exec_shell', 'sudo $TOOLS["bind-tool"] --bind #STEP[-1] $NICS[0]["pci"]'],
- # and check that DPDK port can be created without errors
- ['vswitch', 'add_switch', 'int_br0'],
- ['#port', 'vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|Error attaching device.*$NICS[0]["pci"]'],
- ['tools', 'assert', 'not len(#STEP[-1])'],
- # try to unbind port - should fail beause it is being used
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] netdev-dpdk/detach $NICS[0]["pci"] 2>&1; exit 0',
- '|Device.*$NICS[0]["pci"].*is being used by interface'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # delete port and unbind it - should succeed
- ['vswitch', 'del_port', 'int_br0', '#STEP[port][0]'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] netdev-dpdk/detach $NICS[0]["pci"]',
- '|Device.*$NICS[0]["pci"].*has been detached'],
- ['tools', 'assert', 'len(#STEP[-1])'],
- # try to add port again
- ['vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|Error attaching device.*$NICS[0]["pci"]'],
- # it will work because auto attach was implemented into OVS
- ['tools', 'assert', 'not len(#STEP[-1])'],
- ['vswitch', 'del_switch', 'int_br0'],
- ]
- },
]
############################################################
@@ -410,7 +364,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"VSWITCH_DPDK_MULTI_QUEUES" : _OVSDPDK_MQ,
},
"TestSteps": [
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk[01]\s+queue-id: \d+'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk[01]\s+queue-id:\s+\d+'],
# check that requested nr of queues was created on both NICs
['tools', 'assert', 'len(#STEP[-1])=={}'.format(int(_OVSDPDK_MQ)*2)],
]
@@ -432,8 +386,10 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
"TestSteps": [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] -- set Interface dpdk0 other_config:pmd-rxq-affinity="0:{},1:{}"'.format(_OVSDPDK_1st_PMD_CORE, _OVSDPDK_1st_PMD_CORE)],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 0 1'],
- ['tools', 'assert', 'len(#STEP[-1])==1'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
]
},
{
@@ -453,8 +409,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
},
"TestSteps": [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] -- set Interface dpdk0 other_config:pmd-rxq-affinity="0:{},1:{}"'.format(_OVSDPDK_1st_PMD_CORE, _OVSDPDK_2nd_PMD_CORE)],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 0$'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id: 1$'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] dpif-netdev/pmd-rxq-show','|dpdk0\s+queue-id:\s+1'],
['tools', 'assert', 'len(#STEP[-2])==1'],
['tools', 'assert', 'len(#STEP[-2])==1'],
]
@@ -475,12 +431,15 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": STEP_VSWITCH_PVP_INIT +
[
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdkvhostuserclient0\s+queue-id: \d'],
+ '|dpdkvhostuserclient0\s+queue-id:\s+\d'],
['tools', 'assert', 'len(#STEP[-1])==1'],
['vnf', 'start'],
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdkvhostuserclient0\s+queue-id: 0 1'],
- ['tools', 'assert', 'len(#STEP[-1])==1'],
+ '|dpdkvhostuserclient0\s+queue-id:\s+0'],
+ ['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
+ '|dpdkvhostuserclient0\s+queue-id:\s+1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
+ ['tools', 'assert', 'len(#STEP[-2])==1'],
['vnf', 'stop'],
] +
STEP_VSWITCH_PVP_FINIT
@@ -500,11 +459,12 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TRAFFICGEN_DURATION" : 5,
"TRAFFICGEN" : "IxNet",
"TRAFFIC" : {
- "bidir" : "false",
+ "bidir" : "False",
"traffic_type" : "rfc2544_continuous",
"multistream" : 6,
"stream_type" : "L3",
"frame_rate" : 1,
+ "learning_frames" : False,
'l2': {
'srcmac': "00:00:07:00:0E:00",
'dstmac': "00:00:00:00:00:01"
@@ -514,7 +474,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'proto': 'udp',
'srcip': '6.6.6.6',
'dstip': '1.1.1.1',
- },
+ }
}
},
"TestSteps": STEP_VSWITCH_PVP_INIT + [
@@ -525,6 +485,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# so send_traffic() will end with success
['vswitch', 'add_flow', 'int_br0',
{'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}],
+ ['vswitch', 'add_flow', 'int_br0', {'priority' : '0', 'actions' : ['NORMAL']}],
['vnf', 'start'],
# configure two channels, so multiple cores could be used
['vnf', 'execute_and_wait', 'ethtool -L eth0 combined 2'],
@@ -549,7 +510,6 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['vnf', 'execute_and_wait', 'route add default gw 1.1.1.5 eth0'],
['vnf', 'execute_and_wait', 'arp -s 1.1.1.5 DE:AD:BE:EF:CA:FC'],
['vnf', 'execute_and_wait', 'ip a'],
-
['trafficgen', 'send_traffic',{}],
# check interrupts to verify that traffic was corectly dispatched...
['#result', 'vnf', 'execute_and_wait', 'cat /proc/interrupts',
@@ -580,16 +540,15 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# there must be separate CPU for each of RX/TX queues
"GUEST_SMP" : ['5'],
"GUEST_TESTPMD_PARAMS" : ['-c 0x1F -n 4 --socket-mem 512 -- '
- '--burst=64 -i --txqflags=0xf00 --nb-cores=4 '
- # map queue stats to separate regs to verify MQ functionality
- '--rx-queue-stats-mapping=\(0,0,0\),\(0,1,1\),\(1,0,2\),\(1,1,3\) '
- '--tx-queue-stats-mapping=\(0,0,4\),\(0,1,5\),\(1,0,6\),\(1,1,7\) '
- '--disable-hw-vlan --rxq=2 --txq=2'],
+ '--burst=64 -i --nb-cores=4 '
+ '--rxq=2 --txq=2'],
"TRAFFICGEN_DURATION" : 5,
"TRAFFIC" : {
"traffic_type" : "rfc2544_continuous",
"multistream" : 3,
"stream_type" : "L3",
+ "frame_rate" : 1,
+ "learning_frames" : False,
'l3': {
'enabled': True,
'proto': 'udp',
@@ -597,12 +556,21 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'dstip': '1.1.1.1',
},
},
+ "GUEST_QUEUE_STATS_MAPPING" : ["rx 0 0 0",
+ "rx 0 1 1",
+ "rx 1 0 2",
+ "rx 1 1 3",
+ "tx 0 0 4",
+ "tx 0 1 5",
+ "tx 1 0 6",
+ "tx 1 1 7"
+ ]
},
"TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT +
[
['vnf', 'start'],
['tools', 'exec_shell', "sudo $TOOLS['ovs-appctl'] dpif-netdev/pmd-rxq-show",
- '|dpdk\w+\s+queue-id: \d'],
+ '|dpdk\w+\s+queue-id:\s+\d'],
# there must be two standalone queue records for every interface (2x4)
['tools', 'assert', 'len(#STEP[-1])==8'],
['trafficgen', 'send_traffic', {}],
@@ -739,7 +707,7 @@ _OVSDPDK_VDEV_ADD_NULL = [
['vswitch', 'add_switch', 'int_br0'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] add-port int_br0 null0 -- '
'set Interface null0 type=dpdk options:dpdk-devargs=eth_null0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show', '|dpdk-devargs=\S+eth_null0'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show', '|dpdk-devargs=eth_null0'],
['tools', 'assert', 'len(#STEP[-1])==1'],
]
@@ -776,7 +744,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": _OVSDPDK_VDEV_ADD_NULL + [
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] del-port null0'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] show',
- '|dpdk-devargs=\S+eth_null0'],
+ '|dpdk-devargs=eth_null0'],
['tools', 'assert', 'not len(#STEP[-1])'],
]
},
@@ -1017,9 +985,9 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_phy_port', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9710'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9702'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdk0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
# get line number of next log file entry
['tools', 'exec_shell', 'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdk0 mtu_request=9711'],
@@ -1027,7 +995,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['tools', 'exec_shell', "sed -n '#STEP[-2][0],$ p' $_OVSDPDK_VSWITCH_LOG",
'|unsupported MTU 9711'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdk0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
]
},
{
@@ -1041,9 +1009,9 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
"TestSteps": [
['vswitch', 'add_switch', 'int_br0'],
['vswitch', 'add_vport', 'int_br0'],
- ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9710'],
+ ['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9702'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdkvhostuserclient0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
# get line number of next log file entry
['tools', 'exec_shell', 'echo $((1+`wc -l $_OVSDPDK_VSWITCH_LOG | cut -d" " -f1`))', '(\d+)'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] set Interface dpdkvhostuserclient0 mtu_request=9711'],
@@ -1052,7 +1020,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
'|unsupported MTU 9711'],
['tools', 'assert', 'len(#STEP[-1])'],
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] get Interface dpdkvhostuserclient0 mtu'],
- ['tools', 'assert', 'int(#STEP[-1])==9710'],
+ ['tools', 'assert', 'int(#STEP[-1])==9702'],
]
},
{
@@ -1200,8 +1168,8 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
['trafficgen', 'get_results'],
# all traffic should pass through (i.e. 0% frame loss)
['tools', 'assert', 'float(#STEP[-1][0]["frame_loss_percent"])==0'],
- # set packetsize to 9019 and send traffic
- ['settings', 'setValue', 'TRAFFICGEN_PKT_SIZES', (9019,)],
+ # set packetsize to 9702 and send traffic
+ ['settings', 'setValue', 'TRAFFICGEN_PKT_SIZES', (9702,)],
# disable verification of send_traffic "!" prefix, otherwise vsperf
# will fail when 100% packet loss is detected
['!trafficgen', 'send_traffic', {}],
@@ -1244,10 +1212,6 @@ _OVSDPDK_RATE_set_rate_limiter = [
'set Interface $_OVSDPDK_RATE_PORT$_OVSDPDK_RATE_NICID '
'ingress_policing_burst=$_OVSDPDK_RATE_BURST '
'ingress_policing_rate=$_OVSDPDK_RATE_RATE'],
- # check vswitchd log file, that rate limiter was created
- ['tools', 'exec_shell', "sed -n '#STEP[-2][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- '|CIR period'],
- ['tools', 'assert', '("CIR period" in #STEP[-1])==$_OVSDPDK_RATE_LIMITER_CREATED'],
# verify that interface has correct rate limiter configuration
['tools', 'exec_shell', 'sudo $TOOLS["ovs-vsctl"] '
'list interface $_OVSDPDK_RATE_PORT$_OVSDPDK_RATE_NICID',
@@ -1491,11 +1455,6 @@ _OVSDPDK_QOS_set_qos = [
'other-config:cbs=$_OVSDPDK_QOS_CBS','|\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'],
['tools', 'assert', 'len(#STEP[-1])==1'],
- # Check the OVS logs
- ['tools', 'exec_shell', "sed -n '#STEP[-3][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- '|CIR period'],
- ['tools', 'assert', '"CIR period" in #STEP[-1]'],
-
# Check the QoS policy and attributes
['tools', 'exec_shell', 'sudo $TOOLS["ovs-appctl"] -t ovs-vswitchd qos/show '
'$_OVSDPDK_QOS_PORT$_OVSDPDK_QOS_NICID', '.+'],
@@ -1615,7 +1574,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# Check the OVS logs
['tools', 'exec_shell', "sed -n '#STEP[LOG_MARK][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- 'Failed to set QoS type egress-policer on port #STEP[1][0]: No such file or directory'],
+ 'Failed to set QoS type egress-policer on port #STEP[1][0]: Invalid argument'],
['tools', 'assert', 'len(#STEP[-1])==1'],
# Check the attributes for vhost0
@@ -1645,7 +1604,7 @@ INTEGRATION_TESTS = INTEGRATION_TESTS + [
# Check the OVS logs
['tools', 'exec_shell', "sed -n '#STEP[LOG_MARK][0],$ p' $_OVSDPDK_VSWITCH_LOG",
- 'Failed to set QoS type egress-policer on port #STEP[1][0]: No such file or directory'],
+ 'Failed to set QoS type egress-policer on port #STEP[1][0]: Invalid argument'],
['tools', 'assert', 'len(#STEP[-1])==1'],
# Check the attributes for vhost0
diff --git a/conf/kubernetes/01_testcases.conf b/conf/kubernetes/01_testcases.conf
new file mode 100644
index 00000000..c5b3135c
--- /dev/null
+++ b/conf/kubernetes/01_testcases.conf
@@ -0,0 +1,12 @@
+K8SPERFORMANCE_TESTS = [
+ {
+ "Name": "pcp_tput",
+ "Deployment": "p2p",
+ "Description": "LTD.Throughput.RFC2544.Throughput",
+ "Parameters" : {
+ "TRAFFIC" : {
+ "traffic_type" : "rfc2544_throughput",
+ },
+ },
+ },
+]
diff --git a/core/component_factory.py b/core/component_factory.py
index 2c51a060..f13bfb5b 100644
--- a/core/component_factory.py
+++ b/core/component_factory.py
@@ -24,7 +24,7 @@ from core.vswitch_controller_op2p import VswitchControllerOP2P
from core.vswitch_controller_ptunp import VswitchControllerPtunP
from core.vnf_controller import VnfController
from core.pktfwd_controller import PktFwdController
-
+from core.pod_controller import PodController
def __init__():
"""Finds and loads all the modules required.
@@ -102,6 +102,19 @@ def create_vnf(deployment_scenario, vnf_class, extra_vnfs):
"""
return VnfController(deployment_scenario, vnf_class, extra_vnfs)
+def create_pod(deployment_scenario, pod_class):
+ """Return a new PodController for the deployment_scenario.
+
+ The returned controller is configured with the given POD class.
+
+ Deployment scenarios: 'pvp', 'pvvp'
+
+ :param deployment_scenario: The deployment scenario name
+ :param pod_class: Reference to pod class to be used.
+ :return: PodController for the deployment_scenario
+ """
+ return PodController(deployment_scenario, pod_class)
+
def create_collector(collector_class, result_dir, test_name):
"""Return a new Collector of the given class
diff --git a/core/loader/loader.py b/core/loader/loader.py
index dcd77ced..45e0d5ba 100755
--- a/core/loader/loader.py
+++ b/core/loader/loader.py
@@ -23,6 +23,7 @@ from tools.pkt_fwd.pkt_fwd import IPktFwd
from tools.pkt_gen.trafficgen import ITrafficGenerator
from vswitches.vswitch import IVSwitch
from vnfs.vnf.vnf import IVnf
+from pods.pod.pod import IPod
# pylint: disable=too-many-public-methods
class Loader(object):
@@ -71,6 +72,11 @@ class Loader(object):
settings.getValue('PKTFWD'),
IPktFwd)
+ self._pod_loader = LoaderServant(
+ settings.getValue('POD_DIR'),
+ settings.getValue('POD'),
+ IPod)
+
def get_trafficgen(self):
"""Returns a new instance configured traffic generator.
@@ -220,6 +226,37 @@ class Loader(object):
"""
return self._vnf_loader.get_classes_printable()
+ def get_pod(self):
+ """Returns instance of currently configured pod implementation.
+
+ :return: IPod implementation if available, None otherwise.
+ """
+ return self._pod_loader.get_class()()
+
+ def get_pod_class(self):
+ """Returns type of currently configured pod implementation.
+
+ :return: Type of IPod implementation if available.
+ None otherwise.
+ """
+ return self._pod_loader.get_class()
+
+ def get_pods(self):
+ """Returns dictionary of all available pods.
+
+ :return: Dictionary of pods.
+ - key: name of the class which implements IPod,
+ - value: Type of vnf which implements IPod.
+ """
+ return self._pod_loader.get_classes()
+
+ def get_pods_printable(self):
+ """Returns all available pods in printable format.
+
+ :return: String containing printable list of pods.
+ """
+ return self._pod_loader.get_classes_printable()
+
def get_pktfwd(self):
"""Returns instance of currently configured packet forwarder implementation.
diff --git a/core/pod_controller.py b/core/pod_controller.py
new file mode 100644
index 00000000..8bc91ec4
--- /dev/null
+++ b/core/pod_controller.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" pod Controller interface
+"""
+
+import logging
+import pexpect
+#from conf import settings
+from pods.pod.pod import IPod
+
+class PodController():
+ """POD controller class
+
+ Used to set-up and control PODs for specified scenario
+
+ Attributes:
+ _pod_class: A class object representing the POD.
+ _deployment: A string describing the scenario to set-up in the
+ constructor.
+ _pods: A list of pods controlled by the controller.
+ """
+
+ def __init__(self, deployment, pod_class):
+ """Sets up the POD infrastructure based on deployment scenario
+
+ :param pod_class: The POD class to be used.
+ """
+ # reset POD ID counter for each testcase
+ IPod.reset_pod_counter()
+ pod_number = 0
+ # setup controller with requested number of pods
+ self._logger = logging.getLogger(__name__)
+ self._pod_class = pod_class
+ self._deployment = deployment.lower()
+ self._pods = []
+ if self._deployment == 'p2p':
+ pod_number = 1
+
+ if pod_number:
+ self._pods = [pod_class() for _ in range(pod_number)]
+
+ self._logger.debug('Initializing the pod')
+
+ def get_pods(self):
+ """Returns a list of pods controlled by this controller.
+ """
+ self._logger.debug('get the pods')
+ return self._pods
+
+ def get_pods_number(self):
+ """Returns a number of pods controlled by this controller.
+ """
+ self._logger.debug('get_pods_number %s pod[s]', str(len(self._pods)))
+ return len(self._pods)
+
+ def start(self):
+ """Boots all pods set-up by __init__.
+
+ This is a blocking function.
+ """
+ self._logger.debug('start the pod')
+ try:
+ for pod in self._pods:
+ pod.create()
+ except pexpect.TIMEOUT:
+ self.stop()
+ raise
+
+ def stop(self):
+ """Stops all pods set-up by __init__.
+
+ This is a blocking function.
+ """
+ self._logger.debug('stopping the pod')
+ for pod in self._pods:
+ pod.terminate()
+
+ def __enter__(self):
+ self.start()
+
+ def __exit__(self, type_, value, traceback):
+ self.stop()
diff --git a/core/results/results_constants.py b/core/results/results_constants.py
index 967adbf9..769938a8 100644
--- a/core/results/results_constants.py
+++ b/core/results/results_constants.py
@@ -73,6 +73,11 @@ class ResultsConstants(object):
CAPTURE_TX = "capture_tx"
CAPTURE_RX = "capture_rx"
+ # IMIX Used
+ IMIX_GENOME = "imix_genome"
+ # IMIX Avg. Frame Size
+ IMIX_AVG_FRAMESIZE = "imix_avg_framesize"
+
@staticmethod
def get_traffic_constants():
"""Method returns all Constants used to store results.
@@ -96,4 +101,6 @@ class ResultsConstants(object):
ResultsConstants.MIN_LATENCY_NS,
ResultsConstants.MAX_LATENCY_NS,
ResultsConstants.AVG_LATENCY_NS,
- ResultsConstants.FRAME_LOSS_PERCENT]
+ ResultsConstants.FRAME_LOSS_PERCENT,
+ ResultsConstants.IMIX_GENOME,
+ ResultsConstants.IMIX_AVG_FRAMESIZE]
diff --git a/core/vswitch_controller_p2p.py b/core/vswitch_controller_p2p.py
index d8f22e4c..0037d484 100644
--- a/core/vswitch_controller_p2p.py
+++ b/core/vswitch_controller_p2p.py
@@ -45,8 +45,9 @@ class VswitchControllerP2P(IVswitchController):
(port1, _) = self._vswitch.add_phy_port(self._bridge)
(port2, _) = self._vswitch.add_phy_port(self._bridge)
- self._vswitch.add_connection(self._bridge, port1, port2, self._traffic)
- self._vswitch.add_connection(self._bridge, port2, port1, self._traffic)
+ if not settings.getValue('K8S'):
+ self._vswitch.add_connection(self._bridge, port1, port2, self._traffic)
+ self._vswitch.add_connection(self._bridge, port2, port1, self._traffic)
except:
self._vswitch.stop()
diff --git a/docs/index.rst b/docs/index.rst
index d76a1952..c8a400f8 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -17,5 +17,8 @@ OPNFV Vswitchperf
testing/developer/devguide/index
testing/developer/devguide/results/index
testing/user/configguide/index
- testing/user/userguide/index
+ lma/index
+ openstack/index
+ k8s/index
+ xtesting/index
diff --git a/docs/k8s/index.rst b/docs/k8s/index.rst
new file mode 100644
index 00000000..872a3280
--- /dev/null
+++ b/docs/k8s/index.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent, AT&T, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+=========================================================
+OPNFV VSPERF Kubernetes Container Networking Benchmarking
+=========================================================
+VSPERF supports testing and benchmarking of kubernetes container networking solution, referred as kubernetes Container Networking Benchmarking (CNB). The process can be broadly classified into following four operations.
+
+1. Setting up of Kubernetes Cluster.
+2. Deploying container networking solution.
+3. Deploying pod(s).
+4. Running tests.
+
+First step is achieved through the tool present in *tools/k8s/cluster-deployment* folder. Please refer to the documentation present in that folder for automated kubernetes cluster setup. To perform the remaining steps, the user has to run the following command.
+
+.. code-block:: console
+
+ vsperf --k8s --conf-file k8s.conf pcp_tput
+
+************************
+Important Configurations
+************************
+
+VSPERF has introduced a new configuration parameters, as listed below, for kubernetes CNB. The file *12_k8s.conf*, present in conf folder provides sample values. User has to modify these parameters to suit their environment before running the above command.
+
+1. K8S_CONFIG_FILEPATH - location of the kubernetes-cluster access file. This will be used to connect to the cluster.
+2. PLUGIN - The plugin to use. Allowed values are OvsDPDK, VPP, and SRIOV.
+3. NETWORK_ATTACHMENT_FILEPATH - location of the network attachment definition file.
+4. CONFIGMAP_FILEPATH - location of the config-map file. This will be used only for SRIOV plugin.
+5. POD_MANIFEST_FILEPATH - location of the POD definition file.
+6. APP_NAME - Application to run in the pod. Options - l2fwd, testpmd, and l3fwd.
+
+
+*********
+Testcases
+*********
+Kubernetes CNB will be done through new testcases. For Jerma release, only pcp_tput will be supported. This testcase, will be similar to pvp_tput, where VNF is replaced with a pod/container. The pcp_tput testcase, will still use phy2phy as deployment. In future releases, a new deployment model will be added to support more testcases for kubernetes
diff --git a/docs/lma/index.rst b/docs/lma/index.rst
new file mode 100644
index 00000000..dd6be47b
--- /dev/null
+++ b/docs/lma/index.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, AT&T, Red Hat, Spirent, Ixia and others.
+
+.. OPNFV VSPERF LMA Documentation master file.
+
+***********************
+OPNFV VSPERF LMA Guides
+***********************
+
+.. toctree::
+ :caption: Developer Guide for Monitoring Tools
+ :maxdepth: 2
+
+ ./metrics/userguide.rst
+ ./metrics/devguide.rst
+ ./logs/userguide.rst
+ ./logs/devguide.rst
diff --git a/docs/lma/logs/devguide.rst b/docs/lma/logs/devguide.rst
new file mode 100644
index 00000000..7aeaad29
--- /dev/null
+++ b/docs/lma/logs/devguide.rst
@@ -0,0 +1,145 @@
+====================
+Logs Developer Guide
+====================
+
+Ansible Client-side
+-------------------
+
+Ansible File Organisation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Files Structure::
+
+ ansible-client
+ ├── ansible.cfg
+ ├── hosts
+ ├── playbooks
+ │ └── setup.yaml
+ └── roles
+ ├── clean-td-agent
+ │ └── tasks
+ │ └── main.yml
+ └── td-agent
+ ├── files
+ │ └── td-agent.conf
+ └── tasks
+ └── main.yml
+
+Summary of roles
+^^^^^^^^^^^^^^^^
+====================== ======================
+Roles Description
+====================== ======================
+``td-agent`` Install Td-agent & change configuration file
+``clean-td-agent`` Unistall Td-agent
+====================== ======================
+
+Configurable Parameters
+^^^^^^^^^^^^^^^^^^^^^^^
+====================================================== ====================== ======================
+File (ansible-client/roles/) Parameter Description
+====================================================== ====================== ======================
+``td-agent/files/td-agent.conf`` host Fluentd-server IP
+``td-agent/files/td-agent.conf`` port Fluentd-Server Port
+====================================================== ====================== ======================
+
+Ansible Server-side
+-------------------
+
+Ansible File Organisation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Files Structure::
+
+ ansible-server
+ ├── ansible.cfg
+ ├── group_vars
+ │ └── all.yml
+ ├── hosts
+ ├── playbooks
+ │ └── setup.yaml
+ └── roles
+ ├── clean-logging
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-master
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-pre
+ │ └── tasks
+ │ └── main.yml
+ ├── k8s-worker
+ │ └── tasks
+ │ └── main.yml
+ ├── logging
+ │ ├── files
+ │ │ ├── elastalert
+ │ │ │ ├── ealert-conf-cm.yaml
+ │ │ │ ├── ealert-key-cm.yaml
+ │ │ │ ├── ealert-rule-cm.yaml
+ │ │ │ └── elastalert.yaml
+ │ │ ├── elasticsearch
+ │ │ │ ├── elasticsearch.yaml
+ │ │ │ └── user-secret.yaml
+ │ │ ├── fluentd
+ │ │ │ ├── fluent-cm.yaml
+ │ │ │ ├── fluent-service.yaml
+ │ │ │ └── fluent.yaml
+ │ │ ├── kibana
+ │ │ │ └── kibana.yaml
+ │ │ ├── namespace.yaml
+ │ │ ├── nginx
+ │ │ │ ├── nginx-conf-cm.yaml
+ │ │ │ ├── nginx-key-cm.yaml
+ │ │ │ ├── nginx-service.yaml
+ │ │ │ └── nginx.yaml
+ │ │ ├── persistentVolume.yaml
+ │ │ └── storageClass.yaml
+ │ └── tasks
+ │ └── main.yml
+ └── nfs
+ └── tasks
+ └── main.yml
+
+Summary of roles
+^^^^^^^^^^^^^^^^
+====================== ======================
+Roles Description
+====================== ======================
+``k8s-pre`` Pre-requisite for installing K8s, like installing docker & K8s, disable swap etc.
+``k8s-master`` Reset K8s & make a master
+``k8s-worker`` Join woker nodes with token
+``logging`` EFK & elastalert setup in K8s
+``clean logging`` Remove EFK & elastalert setup from K8s
+``nfs`` Start a NFS server to store Elasticsearch data
+====================== ======================
+
+Configurable Parameters
+^^^^^^^^^^^^^^^^^^^^^^^
+========================================================================= ============================================ ======================
+File (ansible-server/roles/) Parameter name Description
+========================================================================= ============================================ ======================
+**Role: logging**
+``logging/files/persistentVolume.yaml`` storage Increase or Decrease Storage size of Persistent Volume size for each VM
+``logging/files/kibana/kibana.yaml`` version To Change the Kibana Version
+``logging/files/kibana/kibana.yaml`` count To increase or decrease the replica
+``logging/files/elasticsearch/elasticsearch.yaml`` version To Change the Elasticsearch Version
+``logging/files/elasticsearch/elasticsearch.yaml`` nodePort To Change Service Port
+``logging/files/elasticsearch/elasticsearch.yaml`` storage Increase or Decrease Storage size of Elasticsearch data for each VM
+``logging/files/elasticsearch/elasticsearch.yaml`` nodeAffinity -> values (hostname) In which VM Elasticsearch master or data pod will run (change the hostname to run the Elasticsearch master or data pod on a specific node)
+``logging/files/elasticsearch/user-secret.yaml`` stringData Add Elasticsearch User & its roles (`Elastic Docs <https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-users-and-roles.html#k8s_file_realm>`_)
+``logging/files/fluentd/fluent.yaml`` replicas To increase or decrease the replica
+``logging/files/fluentd/fluent-service.yaml`` nodePort To Change Service Port
+``logging/files/fluentd/fluent-cm.yaml`` index_template.json -> number_of_replicas To increase or decrease replica of data in Elasticsearch
+``logging/files/fluentd/fluent-cm.yaml`` fluent.conf Server port & other Fluentd Configuration
+``logging/files/nginx/nginx.yaml`` replicas To increase or decrease the replica
+``logging/files/nginx/nginx-service.yaml`` nodePort To Change Service Port
+``logging/files/nginx/nginx-key-cm.yaml`` kibana-access.key, kibana-access.pem Key file for HTTPs Connection
+``logging/files/nginx/nginx-conf-cm.yaml`` - Nginx Configuration
+``logging/files/elastalert/elastalert.yaml`` replicas To increase or decrease the replica
+``logging/files/elastalert/ealert-key-cm.yaml`` elastalert.key, elastalert.pem Key file for HTTPs Connection
+``logging/files/elastalert/ealert-conf-cm.yaml`` run_every How often ElastAlert will query Elasticsearch
+``logging/files/elastalert/ealert-conf-cm.yaml`` alert_time_limit If an alert fails for some reason, ElastAlert will retry sending the alert until this time period has elapsed
+``logging/files/elastalert/ealert-conf-cm.yaml`` es_host, es_port Elasticsearch Serivce name & port in K8s
+``logging/files/elastalert/ealert-rule-cm.yaml`` http_post_url Alert Receiver IP (`Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_)
+**Role: nfs**
+``nfs/tasks/main.yml`` line Path of NFS storage
+========================================================================= ============================================ ======================
diff --git a/docs/lma/logs/images/elasticsearch.png b/docs/lma/logs/images/elasticsearch.png
new file mode 100644
index 00000000..f0b876f5
--- /dev/null
+++ b/docs/lma/logs/images/elasticsearch.png
Binary files differ
diff --git a/docs/lma/logs/images/fluentd-cs.png b/docs/lma/logs/images/fluentd-cs.png
new file mode 100644
index 00000000..513bb3ef
--- /dev/null
+++ b/docs/lma/logs/images/fluentd-cs.png
Binary files differ
diff --git a/docs/lma/logs/images/fluentd-ss.png b/docs/lma/logs/images/fluentd-ss.png
new file mode 100644
index 00000000..4e9ab112
--- /dev/null
+++ b/docs/lma/logs/images/fluentd-ss.png
Binary files differ
diff --git a/docs/lma/logs/images/nginx.png b/docs/lma/logs/images/nginx.png
new file mode 100644
index 00000000..a0b00514
--- /dev/null
+++ b/docs/lma/logs/images/nginx.png
Binary files differ
diff --git a/docs/lma/logs/images/setup.png b/docs/lma/logs/images/setup.png
new file mode 100644
index 00000000..267685fa
--- /dev/null
+++ b/docs/lma/logs/images/setup.png
Binary files differ
diff --git a/docs/lma/logs/userguide.rst b/docs/lma/logs/userguide.rst
new file mode 100644
index 00000000..9b616fe7
--- /dev/null
+++ b/docs/lma/logs/userguide.rst
@@ -0,0 +1,386 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation, AT&T, Red Hat, Spirent, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+***************
+Logs User Guide
+***************
+
+Prerequisites
+=============
+
+- Require 3 VMs to setup K8s
+- ``$ sudo yum install ansible``
+- ``$ pip install openshift pyyaml kubernetes`` (required for ansible K8s module)
+- Update IPs in all these files (if changed)
+ ====================================================================== ======================
+ Path Description
+ ====================================================================== ======================
+ ``ansible-server/group_vars/all.yml`` IP of K8s apiserver and VM hostname
+ ``ansible-server/hosts`` IP of VMs to install
+ ``ansible-server/roles/logging/files/persistentVolume.yaml`` IP of NFS-Server
+ ``ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml`` IP of alert-receiver
+ ====================================================================== ======================
+
+Architecture
+============
+.. image:: images/setup.png
+
+Installation - Clientside
+=========================
+
+Nodes
+-----
+
+- **Node1** = 10.10.120.21
+- **Node4** = 10.10.120.24
+
+How installation is done?
+-------------------------
+
+- TD-agent installation
+ ``$ curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent3.sh | sh``
+- Copy the TD-agent config file in **Node1**
+ ``$ cp tdagent-client-config/node1.conf /etc/td-agent/td-agent.conf``
+- Copy the TD-agent config file in **Node4**
+ ``$ cp tdagent-client-config/node4.conf /etc/td-agent/td-agent.conf``
+- Restart the service
+ ``$ sudo service td-agent restart``
+
+Installation - Serverside
+=========================
+
+Nodes
+-----
+
+Inside Jumphost - POD12
+ - **VM1** = 10.10.120.211
+ - **VM2** = 10.10.120.203
+ - **VM3** = 10.10.120.204
+
+
+How installation is done?
+-------------------------
+
+**Using Ansible:**
+ - **K8s**
+ - **Elasticsearch:** 1 Master & 1 Data node at each VM
+ - **Kibana:** 1 Replicas
+ - **Nginx:** 2 Replicas
+ - **Fluentd:** 2 Replicas
+ - **Elastalert:** 1 Replica (get duplicate alert, if increase replica)
+ - **NFS Server:** at each VM to store elasticsearch data at following path
+ - ``/srv/nfs/master``
+ - ``/srv/nfs/data``
+
+How to setup?
+-------------
+
+- **To setup K8s cluster and EFK:** Run the ansible-playbook ``ansible/playbooks/setup.yaml``
+- **To clean everything:** Run the ansible-playbook ``ansible/playbooks/clean.yaml``
+
+Do we have HA?
+--------------
+
+Yes
+
+Configuration
+=============
+
+K8s
+---
+
+Path of all yamls (Serverside)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``ansible-server/roles/logging/files/``
+
+K8s namespace
+^^^^^^^^^^^^^
+
+``logging``
+
+K8s Service details
+^^^^^^^^^^^^^^^^^^^
+
+``$ kubectl get svc -n logging``
+
+Elasticsearch Configuration
+---------------------------
+
+Elasticsearch Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/elasticsearch.png
+
+Elasticsearch service details
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``logging-es-http``
+| **Service Port:** ``9200``
+| **Service Type:** ``ClusterIP``
+
+How to get elasticsearch default username & password?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- User1 (custom user):
+ | **Username:** ``elasticsearch``
+ | **Password:** ``password123``
+- User2 (by default created by Elastic Operator):
+ | **Username:** ``elastic``
+ | To get default password:
+ | ``$ PASSWORD=$(kubectl get secret -n logging logging-es-elastic-user -o go-template='{{.data.elastic | base64decode}}')``
+ | ``$ echo $PASSWORD``
+
+How to increase replica of any index?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+| $ curl -k -u "elasticsearch:password123" -H 'Content-Type: application/json' -XPUT "https://10.10.120.211:9200/indexname*/_settings" -d '
+| {
+| "index" : {
+| "number_of_replicas" : "2" }
+| }'
+
+Index Life
+^^^^^^^^^^
+**30 Days**
+
+Kibana Configuration
+--------------------
+
+Kibana Service details
+^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``logging-kb-http``
+| **Service Port:** ``5601``
+| **Service Type:** ``ClusterIP``
+
+Nginx Configuration
+-------------------
+
+IP
+^^
+
+The IP address with https. Ex: "10.10.120.211:32000"
+
+Nginx Setup Structure
+^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/nginx.png
+
+Ngnix Service details
+^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``nginx``
+| **Service Port:** ``32000``
+| **Service Type:** ``NodePort``
+
+Why NGINX is used?
+^^^^^^^^^^^^^^^^^^
+
+`Securing ELK using Nginx <https://logz.io/blog/securing-elk-nginx/>`_
+
+Nginx Configuration
+^^^^^^^^^^^^^^^^^^^
+
+**Path:** ``ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml``
+
+Fluentd Configuration - Clientside (Td-agent)
+---------------------------------------------
+
+Fluentd Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/fluentd-cs.png
+
+Log collection paths
+^^^^^^^^^^^^^^^^^^^^
+
+- ``/tmp/result*/*.log``
+- ``/tmp/result*/*.dat``
+- ``/tmp/result*/*.csv``
+- ``/tmp/result*/stc-liveresults.dat.*``
+- ``/var/log/userspace*.log``
+- ``/var/log/sriovdp/*.log.*``
+- ``/var/log/pods/**/*.log``
+
+Logs sent to
+^^^^^^^^^^^^
+
+Another fluentd instance of K8s cluster (K8s Master: 10.10.120.211) at Jumphost.
+
+Td-agent logs
+^^^^^^^^^^^^^
+
+Path of td-agent logs: ``/var/log/td-agent/td-agent.log``
+
+Td-agent configuration
+^^^^^^^^^^^^^^^^^^^^^^
+
+| Path of conf file: ``/etc/td-agent/td-agent.conf``
+| **If any changes is made in td-agent.conf then restart the td-agent service,** ``$ sudo service td-agent restart``
+
+Config Description
+^^^^^^^^^^^^^^^^^^
+
+- Get the logs from collection path
+- | Convert to this format
+ | {
+ | msg: "log line"
+ | log_path: “/file/path”
+ | file: “file.name”
+ | host: “pod12-node4”
+ | }
+- Sends it to fluentd
+
+Fluentd Configuration - Serverside
+----------------------------------
+
+Fluentd Setup Structure
+^^^^^^^^^^^^^^^^^^^^^^^
+
+.. image:: images/fluentd-ss.png
+
+Fluentd Service details
+^^^^^^^^^^^^^^^^^^^^^^^
+
+| **Service Name:** ``fluentd``
+| **Service Port:** ``32224``
+| **Service Type:** ``NodePort``
+
+Logs sent to
+^^^^^^^^^^^^
+Elasticsearch service (Example: logging-es-http at port 9200)
+
+Config Description
+^^^^^^^^^^^^^^^^^^
+
+- **Step 1**
+ - Get the logs from Node1 & Node4
+- **Step 2**
+ ======================================== ======================
+ log_path add tag (for routing)
+ ======================================== ======================
+ ``/tmp/result.*/.*errors.dat`` errordat.log
+ ``/tmp/result.*/.*counts.dat`` countdat.log
+ ``/tmp/result.*/stc-liveresults.dat.tx`` stcdattx.log
+ ``/tmp/result.*/stc-liveresults.dat.rx`` stcdatrx.log
+ ``/tmp/result.*/.*Statistics.csv`` ixia.log
+ ``/tmp/result.*/vsperf-overall*`` vsperf.log
+ ``/tmp/result.*/vswitchd*`` vswitchd.log
+ ``/var/log/userspace*`` userspace.log
+ ``/var/log/sriovdp*`` sriovdp.log
+ ``/var/log/pods*`` pods.log
+ ======================================== ======================
+
+- **Step 3**
+ Then parse each type using tags.
+ - error.conf: to find any error
+ - time-series.conf: to parse time series data
+ - time-analysis.conf: to calculate time analyasis
+- **Step 4**
+ ================================ ======================
+ host add tag (for routing)
+ ================================ ======================
+ ``pod12-node4`` node4
+ ``worker`` node1
+ ================================ ======================
+- **Step 5**
+ ================================ ======================
+ Tag elasticsearch
+ ================================ ======================
+ ``node4`` index “node4*”
+ ``node1`` index “node1*”
+ ================================ ======================
+
+Elastalert
+==========
+
+Send alert if
+-------------
+
+- Blacklist
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "dpdk|ERR|VHOST_CONFIG: * device not found"
+- Time
+ - vswitch_duration > 3 sec
+
+How to configure alert?
+-----------------------
+
+- Add your rule in ``ansible/roles/logging/files/elastalert/ealert-rule-cm.yaml`` (`Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_)
+ | name: anything
+ | type: <check-above-link> #The RuleType to use
+ | index: node4* #index name
+ | realert:
+ | minutes: 0 #to get alert for all cases after each interval
+ | alert: post #To send alert as HTTP POST
+ | http_post_url: # Provide URL
+
+- Mount this file to elastalert pod in ``ansible/roles/logging/files/elastalert/elastalert.yaml``.
+
+Alert Format
+------------
+
+{"type": "pattern-match", "label": "failed", "index": "node4-20200815", "log": "error-log-line", "log-path": "/tmp/result/file.log", "reson": "error-message" }
+
+Data Management
+===============
+
+Elasticsearch
+-------------
+
+Q&As
+^^^^
+
+Where data is stored now?
+Data is stored in NFS server with 1 replica of each index (default). Path of data are following:
+
+ - ``/srv/nfs/data (VM1)``
+ - ``/srv/nfs/data (VM2)``
+ - ``/srv/nfs/data (VM3)``
+ - ``/srv/nfs/master (VM1)``
+ - ``/srv/nfs/master (VM2)``
+ - ``/srv/nfs/master (VM3)``
+
+If user wants to change from NFS to local storage, can he do it?
+Yes, user can do this, need to configure persistent volume. (``ansible-server/roles/logging/files/persistentVolume.yaml``)
+
+Do we have backup of data?
+Yes. 1 replica of each index
+
+When K8s restart, the data is still accessible?
+Yes (If data is not deleted from /srv/nfs/data)
+
+Troubleshooting
+===============
+
+If no logs receiving in Elasticsearch
+-------------------------------------
+
+- Check IP & port of server-fluentd in client config.
+- Check client-fluentd logs, ``$ sudo tail -f /var/log/td-agent/td-agent.log``
+- Check server-fluentd logs, ``$ sudo kubectl logs -n logging <fluentd-pod-name>``
+
+If no notification received
+---------------------------
+
+- Search your "log" in Elasticsearch.
+- Check config of elastalert
+- Check IP of alert-receiver
+
+Reference
+=========
+- `Elastic cloud on K8s <https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html>`_
+- `HA Elasticsearch on K8s <https://www.elastic.co/blog/high-availability-elasticsearch-on-kubernetes-with-eck-and-gke>`_
+- `Fluentd Configuration <https://docs.fluentd.org/configuration/config-file>`_
+- `Elastalert Rule Config <https://elastalert.readthedocs.io/en/latest/ruletypes.html>`_
diff --git a/docs/lma/metrics/devguide.rst b/docs/lma/metrics/devguide.rst
new file mode 100644
index 00000000..40162397
--- /dev/null
+++ b/docs/lma/metrics/devguide.rst
@@ -0,0 +1,469 @@
+=======================
+Metrics Developer Guide
+=======================
+
+Anible File Organization
+========================
+
+Ansible-Server
+--------------
+
+Please follow the following file structure:
+
+.. code-block:: bash
+
+ ansible-server
+ | ansible.cfg
+ | hosts
+ |
+ +---group_vars
+ | all.yml
+ |
+ +---playbooks
+ | clean.yaml
+ | setup.yaml
+ |
+ \---roles
+ +---clean-monitoring
+ | \---tasks
+ | main.yml
+ |
+ +---monitoring
+ +---files
+ | | monitoring-namespace.yaml
+ | |
+ | +---alertmanager
+ | | alertmanager-config.yaml
+ | | alertmanager-deployment.yaml
+ | | alertmanager-service.yaml
+ | | alertmanager1-deployment.yaml
+ | | alertmanager1-service.yaml
+ | |
+ | +---cadvisor
+ | | cadvisor-daemonset.yaml
+ | | cadvisor-service.yaml
+ | |
+ | +---collectd-exporter
+ | | collectd-exporter-deployment.yaml
+ | | collectd-exporter-service.yaml
+ | |
+ | +---grafana
+ | | grafana-datasource-config.yaml
+ | | grafana-deployment.yaml
+ | | grafana-pv.yaml
+ | | grafana-pvc.yaml
+ | | grafana-service.yaml
+ | |
+ | +---kube-state-metrics
+ | | kube-state-metrics-deployment.yaml
+ | | kube-state-metrics-service.yaml
+ | |
+ | +---node-exporter
+ | | nodeexporter-daemonset.yaml
+ | | nodeexporter-service.yaml
+ | |
+ | \---prometheus
+ | main-prometheus-service.yaml
+ | prometheus-config.yaml
+ | prometheus-deployment.yaml
+ | prometheus-pv.yaml
+ | prometheus-pvc.yaml
+ | prometheus-service.yaml
+ | prometheus1-deployment.yaml
+ | prometheus1-service.yaml
+ |
+ \---tasks
+ main.yml
+
+
+Ansible - Client
+----------------
+
+Please follow the following file structure:
+
+.. code-block:: bash
+
+ ansible-server
+ | ansible.cfg
+ | hosts
+ |
+ +---group_vars
+ | all.yml
+ |
+ +---playbooks
+ | clean.yaml
+ | setup.yaml
+ |
+ \---roles
+ +---clean-collectd
+ | \---tasks
+ | main.yml
+ |
+ +---collectd
+ +---files
+ | collectd.conf.j2
+ |
+ \---tasks
+ main.yml
+
+
+Summary of Roles
+================
+
+A brief description of the Ansible playbook roles,
+which are used to deploy the monitoring cluster
+
+Ansible Server Roles
+--------------------
+
+Ansible Server, this part consists of the roles used to deploy
+Prometheus Alertmanager Grafana stack on the server-side
+
+Role: Monitoring
+~~~~~~~~~~~~~~~~
+
+Deployment and configuration of PAG stack along with collectd-exporter,
+cadvisor and node-exporter.
+
+Role: Clean-Monitoring
+~~~~~~~~~~~~~~~~~~~~~~
+
+Removes all the components deployed by the Monitoring role.
+
+
+File-Task Mapping and Configurable Parameters
+================================================
+
+Ansible Server
+----------------
+
+Role: Monitoring
+~~~~~~~~~~~~~~~~~~~
+
+Alert Manager
+^^^^^^^^^^^^^^^
+
+File: alertmanager-config.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-config.yaml
+
+Task: Configures Receivers for alertmanager
+
+Summary: A configmap, currently configures webhook for alertmanager,
+can be used to configure any kind of receiver
+
+Configurable Parameters:
+ receiver.url: change to the webhook receiver's URL
+ route: Can be used to add receivers
+
+
+File: alertmanager-deployment.yaml
+''''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-deployment.yaml
+
+Task: Deploys alertmanager instance
+
+Summary: A Deployment, deploys 1 replica of alertmanager
+
+
+File: alertmanager-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/alertmanager/alertmanager-service.yaml
+
+Task: Creates a K8s service for alertmanager
+
+Summary: A Nodeport type of service, so that user can create "silences",
+view the status of alerts from the native alertmanager dashboard / UI.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+**Note: alertmanager1-deployment, alertmanager1-service are the same as
+alertmanager-deployment and alertmanager-service respectively.**
+
+CAdvisor
+^^^^^^^^^^^
+
+File: cadvisor-daemonset.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/cadvisor/cadvisor-daemonset.yaml
+
+Task: To create a cadvisor daemonset
+
+Summary: A daemonset, used to scrape data of the kubernetes cluster itself,
+its a daemonset so an instance is run on every node.
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: cadvisor-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/cadvisor/cadvisor-service.yaml
+
+Task: To create a cadvisor service
+
+Summary: A ClusterIP service for cadvisor to communicate with prometheus
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Collectd Exporter
+^^^^^^^^^^^^^^^^^^^^
+
+File: collectd-exporter-deployment.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
+
+Task: To create a collectd replica
+
+Summary: A deployment, acts as receiver for collectd data sent by client machines,
+prometheus pulls data from this exporter
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: collectd-exporter.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/collectd-exporter/collectd-exporter.yaml
+
+Task: To create a collectd service
+
+Summary: A NodePort service for collectd-exporter to hold data for prometheus
+to scrape
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Grafana
+^^^^^^^^^
+
+File: grafana-datasource-config.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-datasource-config.yaml
+
+Task: To create config file for grafana
+
+Summary: A configmap, adds prometheus datasource in grafana
+
+
+File: grafana-deployment.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-deployment.yaml
+
+Task: To create a grafana deployment
+
+Summary: The grafana deployment creates a single replica of grafana,
+with preconfigured prometheus datasource.
+
+Configurable Parameters:
+ spec.template.spec.ports: Edit ports
+ spec.template.spec.env: Add / Edit environment variables
+
+
+File: grafana-pv.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-pv.yaml
+
+Task: To create a persistent volume for grafana
+
+Summary: A persistent volume for grafana.
+
+Configurable Parameters:
+ spec.capacity.storage: Increase / decrease size
+ spec.accessModes: To change the way PV is accessed.
+ spec.nfs.server: To change the ip address of NFS server
+ spec.nfs.path: To change the path of the server
+
+
+File: grafana-pvc.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-pvc.yaml
+
+Task: To create a persistent volume claim for grafana
+
+Summary: A persistent volume claim for grafana.
+
+Configurable Parameters:
+ spec.resources.requests.storage: Increase / decrease size
+
+
+File: grafana-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/grafana/grafana-service.yaml
+
+Task: To create a service for grafana
+
+Summary: A Nodeport type of service, so that users actually connect to,
+view the dashboard / UI.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+
+Kube State Metrics
+^^^^^^^^^^^^^^^^^^^^
+
+File: kube-state-metrics-deployment.yaml
+''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
+
+Task: To create a kube-state-metrics instance
+
+Summary: A deployment, used to collect metrics of the kubernetes cluster iteself
+
+Configurable Parameters:
+ spec.template.spec.containers.ports: Port of the container
+
+
+File: kube-state-metrics-service.yaml
+'''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
+
+Task: To create a collectd service
+
+Summary: A NodePort service for collectd-exporter to hold data for prometheus
+to scrape
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Node Exporter
+^^^^^^^^^^^^^^^
+
+File: node-exporter-daemonset.yaml
+''''''''''''''''''''''''''''''''''
+Path : monitoring/files/node-exporter/node-exporter-daemonset.yaml
+
+Task: To create a node exporter daemonset
+
+Summary: A daemonset, used to scrape data of the host machines / node,
+its a daemonset so an instance is run on every node.
+
+Configurable Parameters:
+ spec.template.spec.ports: Port of the container
+
+
+File: node-exporter-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/node-exporter/node-exporter-service.yaml
+
+Task: To create a node exporter service
+
+Summary: A ClusterIP service for node exporter to communicate with Prometheus
+
+Configurable Parameters:
+ spec.ports: Add / Edit ports
+
+
+Prometheus
+^^^^^^^^^^^^^
+
+File: prometheus-config.yaml
+''''''''''''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-config.yaml
+
+Task: To create a config file for Prometheus
+
+Summary: A configmap, adds alert rules.
+
+Configurable Parameters:
+ data.alert.rules: Add / Edit alert rules
+
+
+File: prometheus-deployment.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-deployment.yaml
+
+Task: To create a Prometheus deployment
+
+Summary: The Prometheus deployment creates a single replica of Prometheus,
+with preconfigured Prometheus datasource.
+
+Configurable Parameters:
+ spec.template.spec.affinity: To change the node affinity,
+ make sure only 1 instance of prometheus is
+ running on 1 node.
+
+ spec.template.spec.ports: Add / Edit container port
+
+
+File: prometheus-pv.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-pv.yaml
+
+Task: To create a persistent volume for Prometheus
+
+Summary: A persistent volume for Prometheus.
+
+Configurable Parameters:
+ spec.capacity.storage: Increase / decrease size
+ spec.accessModes: To change the way PV is accessed.
+ spec.hostpath.path: To change the path of the volume
+
+
+File: prometheus-pvc.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-pvc.yaml
+
+Task: To create a persistent volume claim for Prometheus
+
+Summary: A persistent volume claim for Prometheus.
+
+Configurable Parameters:
+ spec.resources.requests.storage: Increase / decrease size
+
+
+File: prometheus-service.yaml
+'''''''''''''''''''''''''''''''''
+Path : monitoring/files/prometheus/prometheus-service.yaml
+
+Task: To create a service for prometheus
+
+Summary: A Nodeport type of service, prometheus native dashboard
+available here.
+
+Configurable Parameters:
+ spec.type: Options : NodePort, ClusterIP, LoadBalancer
+ spec.ports: Edit / add ports to be handled by the service
+
+
+File: main-prometheus-server.yaml
+'''''''''''''''''''''''''''''''''''
+Path: monitoring/files/prometheus/main-prometheus-service.yaml
+
+Task: A service that connects both prometheus instances.
+
+Summary: A Nodeport service for other services to connect to the Prometheus cluster.
+As HA Prometheus needs to independent instances of Prometheus scraping the same inputs
+having the same configuration
+
+**Note: prometheus-deployment, prometheus1-service are the same as
+prometheus-deployment and prometheus-service respectively.**
+
+
+Ansible Client Roles
+----------------------
+
+Role: Collectd
+~~~~~~~~~~~~~~~~~~
+
+File: main.yml
+^^^^^^^^^^^^^^^^
+Path: collectd/tasks/main.yaml
+
+Task: Install collectd along with prerequisites
+
+Associated template file:
+
+collectd.conf.j2
+Path: collectd/files/collectd.conf.j2
+
+Summary: Edit this file to change the default configuration to
+be installed on the client's machine
diff --git a/docs/lma/metrics/images/dataflow.png b/docs/lma/metrics/images/dataflow.png
new file mode 100644
index 00000000..ca1ec908
--- /dev/null
+++ b/docs/lma/metrics/images/dataflow.png
Binary files differ
diff --git a/docs/lma/metrics/images/setup.png b/docs/lma/metrics/images/setup.png
new file mode 100644
index 00000000..ce6a1274
--- /dev/null
+++ b/docs/lma/metrics/images/setup.png
Binary files differ
diff --git a/docs/lma/metrics/userguide.rst b/docs/lma/metrics/userguide.rst
new file mode 100644
index 00000000..eae336d7
--- /dev/null
+++ b/docs/lma/metrics/userguide.rst
@@ -0,0 +1,226 @@
+==================
+Metrics User Guide
+==================
+
+Setup
+=======
+
+Prerequisites
+-------------------------
+- Require 3 VMs to setup K8s
+- ``$ sudo yum install ansible``
+- ``$ pip install openshift pyyaml kubernetes`` (required for ansible K8s module)
+- Update IPs in all these files (if changed)
+ - ``ansible-server/group_vars/all.yml`` (IP of apiserver and hostname)
+ - ``ansible-server/hosts`` (IP of VMs to install)
+ - ``ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml`` (IP of NFS-Server)
+ - ``ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml`` (IP of alert-receiver)
+
+Setup Structure
+---------------
+.. image:: images/setup.png
+
+Installation - Client Side
+----------------------------
+
+Nodes
+`````
+- **Node1** = 10.10.120.21
+- **Node4** = 10.10.120.24
+
+How installation is done?
+`````````````````````````
+Ansible playbook available in ``tools/lma/ansible-client`` folder
+
+- ``cd tools/lma/ansible-client``
+- ``ansible-playbook setup.yaml``
+
+This deploys collectd and configures it to send data to collectd exporter
+configured at 10.10.120.211 (ip address of current instance of collectd-exporter)
+Please make appropriate changes in the config file present in ``tools/lma/ansible-client/roles/collectd/files/``
+
+Installation - Server Side
+----------------------------
+
+Nodes
+``````
+
+Inside Jumphost - POD12
+ - **VM1** = 10.10.120.211
+ - **VM2** = 10.10.120.203
+ - **VM3** = 10.10.120.204
+
+
+How installation is done?
+`````````````````````````
+**Using Ansible:**
+ - **K8s**
+ - **Prometheus:** 2 independent deployments
+ - **Alertmanager:** 2 independent deployments (cluster peers)
+ - **Grafana:** 1 Replica deployment
+ - **cAdvisor:** 1 daemonset, i.e 3 replicas, one on each node
+ - **collectd-exporter:** 1 Replica
+ - **node-exporter:** 1 statefulset with 3 replicas
+ - **kube-state-metrics:** 1 deployment
+ - **NFS Server:** at each VM to store grafana data at following path
+ - ``/usr/share/monitoring_data/grafana``
+
+How to setup?
+`````````````
+- **To setup K8s cluster, EFK and PAG:** Run the ansible-playbook ``ansible/playbooks/setup.yaml``
+- **To clean everything:** Run the ansible-playbook ``ansible/playbooks/clean.yaml``
+
+Do we have HA?
+````````````````
+Yes
+
+Configuration
+=============
+
+K8s
+---
+Path to all yamls (Server Side)
+````````````````````````````````
+``tools/lma/ansible-server/roles/monitoring/files/``
+
+K8s namespace
+`````````````
+``monitoring``
+
+Configuration
+---------------------------
+
+Serivces and Ports
+``````````````````````````
+
+Services and their ports are listed below,
+one can go to IP of any node on the following ports,
+service will correctly redirect you
+
+
+ ====================== =======
+ Service Port
+ ====================== =======
+ Prometheus 30900
+ Prometheus1 30901
+ Main-Prometheus 30902
+ Alertmanager 30930
+ Alertmanager1 30931
+ Grafana 30000
+ Collectd-exporter 30130
+ ====================== =======
+
+How to change Configuration?
+------------------------------
+- Ports, names of the containers, pretty much every configuration can be modified by changing the required values in the respective yaml files (``/tools/lma/ansible-server/roles/monitoring/``)
+- For metrics, on the client's machine, edit the collectd's configuration (jinja2 template) file, and add required plugins (``/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2``).
+ For more details refer `this <https://collectd.org/wiki/index.php/First_steps>`_
+
+Where to send metrics?
+------------------------
+
+Metrics are sent to collectd exporter.
+UDP packets are sent to port 38026
+(can be configured and checked at
+``tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml``)
+
+Data Management
+================================
+
+DataFlow:
+--------------
+.. image:: images/dataflow.png
+
+Where is the data stored now?
+----------------------------------
+ - Grafana data (including dashboards) ==> On master, at ``/usr/share/monitoring_data/grafana`` (its accessed by Presistent volume via NFS)
+ - Prometheus Data ==> On VM2 and VM3, at /usr/share/monitoring_data/prometheus
+
+ **Note: Promethei data also are independent of each other, a shared data solution gave errors**
+
+Do we have backup of data?
+-------------------------------
+ Promethei even though independent scrape same targets,
+ have same alert rules, therefore generate very similar data.
+
+ Grafana's NFS part of the data has no backup
+ Dashboards' json are available in the ``/tools/lma/metrics/dashboards`` directory
+
+When containers are restarted, the data is still accessible?
+-----------------------------------------------------------------
+ Yes, unless the data directories are deleted ``(/usr/share/monitoring_data/*)`` from each node
+
+Alert Management
+==================
+
+Configure Alert receiver
+--------------------------
+- Go to file ``/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml``
+- Under the config.yml section under receivers, add, update, delete receivers
+- Currently ip of unified alert receiver is used.
+- Alertmanager supports multiple types of receivers, you can get a `list here <https://prometheus.io/docs/alerting/latest/configuration/>`_
+
+Add new alerts
+--------------------------------------
+- Go to file ``/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml``
+- Under the data section alert.rules file is mounted on the config-map.
+- In this file alerts are divided in 4 groups, namely:
+ - targets
+ - host and hardware
+ - container
+ - kubernetes
+- Add alerts under exisiting group or add new group. Please follow the structure of the file for adding new group
+- To add new alert:
+ - Use the following structure:
+
+ alert: alertname
+
+ expr: alert rule (generally promql conditional query)
+
+ for: time-range (eg. 5m, 10s, etc, the amount of time the condition needs to be true for the alert to be triggered)
+
+ labels:
+
+ severity: critical (other severity options and other labels can be added here)
+
+ type: hardware
+
+ annotations:
+
+ summary: <summary of the alert>
+
+ description: <descibe the alert here>
+
+- For an exhaustive alerts list you can have a look `here <https://awesome-prometheus-alerts.grep.to/>`_
+
+Troubleshooting
+===============
+No metrics received in grafana plot
+---------------------------------------------
+- Check if all configurations are correctly done.
+- Go to main-prometheus's port and any one VMs' ip, and check if prometheus is getting the metrics
+- If prometheus is getting them, read grafana's logs (``kubectl -n monitoring logs <name_of_grafana_pod>``)
+- Else, have a look at collectd exporter's metrics endpoint (eg. 10.10.120.211:30103/metrics)
+- If collectd is getting them, check prometheus's config file if collectd's ip is correct over there.
+- Else ssh to master, check which node collectd-exporter is scheduled (lets say vm2)
+- Now ssh to vm2
+- Use ``tcpdump -i ens3 #the interface used to connect to the internet > testdump``
+- Grep your client node's ip and check if packets are reaching our monitoring cluster (``cat testdump | grep <ip of client>``)
+- Ideally you should see packets reaching the node, if so please see if the collectd-exporter is running correctly, check its logs.
+- If no packets are received, error is on the client side, check collectd's config file and make sure correct collectd-exporter ip is used in the ``<network>`` section.
+
+If no notification received
+---------------------------
+- Go to main-prometheus's port and any one VMs' ip,(eg. 10.10.120.211:30902) and check if prometheus is getting the metrics
+- If no, read "No metrics received in grafana plot" section, else read ahead.
+- Check IP of alert-receiver, you can see this by going to alertmanager-ip:port and check if alertmanager is configured correctly.
+- If yes, paste the alert rule in the prometheus' query-box and see if any metric staisfy the condition.
+- You may need to change alert rules in the alert.rules section of prometheus-config.yaml if there was a bug in the alert's rule. (please read the "Add new alerts" section for detailed instructions)
+
+Reference
+=========
+- `Prometheus K8S deployment <https://www.metricfire.com/blog/how-to-deploy-prometheus-on-kubernetes/>`_
+- `HA Prometheus <https://prometheus.io/docs/introduction/faq/#can-prometheus-be-made-highly-available>`_
+- `Data Flow Diagram <https://drive.google.com/file/d/1D--LXFqU_H-fqpD57H3lJFOqcqWHoF0U/view?usp=sharing>`_
+- `Collectd Configuration <https://docs.opnfv.org/en/stable-fraser/submodules/barometer/docs/release/userguide/docker.userguide.html#build-the-collectd-docker-image>`_
+- `Alertmanager Rule Config <https://awesome-prometheus-alerts.grep.to/>`_
diff --git a/docs/openstack/index.rst b/docs/openstack/index.rst
new file mode 100644
index 00000000..6009e669
--- /dev/null
+++ b/docs/openstack/index.rst
@@ -0,0 +1,39 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent Communications, AT&T, Ixia and others.
+
+.. OPNFV VSPERF With Openstack master file.
+
+***************************
+OPNFV VSPERF with OPENSTACK
+***************************
+
+Introduction
+------------
+VSPERF performs the following, when run with openstack:
+
+1. Connect to Openstack (using the credentials)
+2. Deploy Traffic-Generators in a required way (defined by scenarios)
+3. Update the VSPERF configuration based on the deployment.
+4. Use the updated configuration to run test in "Trafficgen" Mode.
+5. Publish and store results.
+
+
+What to Configure?
+^^^^^^^^^^^^^^^^^^
+The configurable parameters are provided in *conf/11_openstackstack.conf*. The configurable parameters are:
+
+1. Access to Openstack Environment: Auth-URL, Username, Password, Project and Domain IDs/Name.
+2. VM Details - Name, Flavor, External-Network.
+3. Scenario - How many compute nodes to use, and how many instances of trafficgenerator to deploy.
+
+User can customize these parameters. Assume the customized values are placed in openstack.conf file. This file will be used to run the test.
+
+How to run?
+^^^^^^^^^^^
+Add --openstack flag as show below
+
+.. code-block:: console
+
+ vsperf --openstack --conf-file openstack.conf phy2phy_tput
+
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 46eb74c0..486beaf0 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,6 +1,126 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, Spirent Communications, AT&T and others.
+
+OPNFV Jerma Release
+===================
+
+* Supported Versions - DPDK:18.11, OVS:2.12.0, VPP:19.08.1, QEMU:3.1.1, Trex:2.86
+
+* Supported Release-Requirements.
+
+ * RELREQ-6 - Openstack dataplane performance benchmarking.
+ * RELREQ-9 - Kubernetes container-networking benchmarking.
+
+* Additional Features
+
+ * OPNFV Xtesting integration - Baremetal and Openstack.
+ * Analytics of metrics and logs using Jupyter notebooks.
+ * Custom Alarms from both metrics and logs.
+ * Container metrics collection.
+
+* Traffic Generators.
+
+ * Ixia - Support for using multiple instances of Traffic-generator.
+ * Ixia - Live results support (real-time collection and storage)
+ * TRex - ETSI-NFV GS-TST009 binary search with loss-verification support.
+
+* New Tools
+
+ * Kubernetes cluster deployment.
+ * TestVNF deployment in Openstack.
+ * Server-side telemetry collection from the test-environment.
+ * Version-1 of multi-dimensional TestVNF.
+
+* Multiple bugfixes and minor improvements
+
+ * matplotlib version and log-dump.
+ * VPP socket paths.
+ * Newer version of some python packages.
+
+
+OPNFV Iruya Release
+====================
+
+* Supported Versions - DPDK:18.11, OVS:2.12.0, VPP:19.08.1, QEMU:3.1.1
+* Few bugfixes and minor improvements
+
+* New Feature: Containers to manage VSPERF.
+
+ * VSPERF Containers for both deployment and test runs
+
+* Improvement
+
+ * Results Analysis to include all 5 types of data.
+
+ * Infrastructure data
+ * End-Of-Test Results
+ * Live-Results
+ * Events from VSPERF Logs
+ * Test Environment
+
+* Usability
+
+ * Configuration Wizard tool.
+
+
+OPNFV Hunter Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Few bugfixes and minor improvements
+
+* Traffic Generators
+
+ * Spirent - Live Results Support.
+ * T-Rex - Live Results Support.
+
+* Improvment
+
+ * Results container to receive logs from Logstash/Fluentd.
+
+* CI
+
+ * Bug Fixes.
+
+
+OPNFV Gambia Release
+====================
+
+* Supported Versions - DPDK:17.08, OVS:2.8.1, VPP:17.07, QEMU:2.9.1
+* Several bugfixes and minor improvements
+
+* Documentation
+
+ * Spirent Latency histogram documentation
+
+* Virtual-Switches
+
+ * OVS-Enhancement: default bridge name and offload support.
+ * OVS-Enhancement: proper deletion of flows and bridges after stop.
+ * VSPERF-vSwitch Architecture Improvement
+
+* Tools
+
+ * Pidstat improvements
+
+* Traffic Generators
+
+ * Xena Enhancements - multi-flow and stability.
+ * T-Rex Additions - burst traffic, scapy frame, customized scapy version.
+ * Ixia: Script enhancements.
+ * Spirent: Latency-histogram support included
+
+* Tests
+
+ * Continuous stream testcase
+ * Tunnelling protocol support
+ * Custom statistics
+ * Refactoring integration testcases
+
+* CI
+
+ * Reduced daily testscases
OPNFV Fraser Release
====================
diff --git a/docs/testing/developer/devguide/design/vswitchperf_design.rst b/docs/testing/developer/devguide/design/vswitchperf_design.rst
index bc54476c..5fa892e0 100644
--- a/docs/testing/developer/devguide/design/vswitchperf_design.rst
+++ b/docs/testing/developer/devguide/design/vswitchperf_design.rst
@@ -23,7 +23,7 @@ Example Connectivity to DUT
Establish connectivity to the VSPERF DUT Linux host. If this is in an OPNFV lab
following the steps provided by `Pharos <https://www.opnfv.org/community/projects/pharos>`_
-to `access the POD <https://wiki.opnfv.org/display/pharos/Pharos+Lab+Support>`_
+to `access the POD <https://wiki.opnfv.org/display/INF/INFRA+Lab+Support>`_
The followign steps establish the VSPERF environment.
@@ -477,6 +477,20 @@ Detailed description of ``TRAFFIC`` dictionary items follows:
'enabled' - Specifies if the histogram provisioning is enabled or not.
'type' - Defines how histogram is provided. Currenty only 'Default' is defined.
'Default' - Default histogram as provided by the Traffic-generator.
+ 'imix' - A dictionary for IMIX Specification.
+ 'enabled' - Specifies if IMIX is enabled or NOT.
+ 'type' - The specification type - denotes how IMIX is specified.
+ Currently only 'genome' type is defined.
+ Other types (ex: table-of-proportions) can be added in future.
+ 'genome' - The Genome Encoding of Pkt-Sizes and Ratio for IMIX.
+ The Ratio is inferred from the number of particular geneome characters
+ Genome encoding is described in RFC 6985. This specification is closest
+ to the method described in section 6.2 of RFC 6985.
+ Ex: 'aaaaaaaddddg' denotes ratio of 7:4:1 of packets sizes 64:512:1518.
+ Note: Exact-sequence is not maintained, only the ratio of packets
+ is ensured.
+ Data type: str
+ Default Value: 'aaaaaaaddddg'
.. _configuration-of-guest-options:
diff --git a/docs/testing/developer/devguide/index.rst b/docs/testing/developer/devguide/index.rst
index 49659792..64a4758c 100644
--- a/docs/testing/developer/devguide/index.rst
+++ b/docs/testing/developer/devguide/index.rst
@@ -31,7 +31,7 @@ new techniques together. A new IETF benchmarking specification (RFC8204) is base
2015. VSPERF is also contributing to development of ETSI NFV test specifications through the Test and Open Source
Working Group.
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
@@ -43,7 +43,6 @@ Design Guides
.. toctree::
:caption: Traffic Gen Integration, VSPERF Design, Test Design, Test Plan
:maxdepth: 2
- :numbered:
./design/trafficgen_integration_guide.rst
./design/vswitchperf_design.rst
@@ -75,6 +74,3 @@ VSPERF CI Test Cases
:numbered:
CI Test cases run daily on the VSPERF Pharos POD for master and stable branches.
-
- ./results/scenario.rst
- ./results/results.rst
diff --git a/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst b/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
index ee7f98b5..10b07d54 100644
--- a/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
+++ b/docs/testing/developer/devguide/requirements/ietf_draft/rfc8204-vsperf-bmwg-vswitch-opnfv.rst
@@ -13,7 +13,7 @@ informational RFC published by the IETF available here https://tools.ietf.org/ht
For more information about VSPERF refer to:
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
diff --git a/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst b/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
index c703ff40..1ea99f7e 100644
--- a/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
+++ b/docs/testing/developer/devguide/requirements/vswitchperf_ltd.rst
@@ -62,21 +62,21 @@ References
==========
* `RFC 1242 Benchmarking Terminology for Network Interconnection
- Devices <http://www.ietf.org/rfc/rfc1242.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc1242.txt>`__
* `RFC 2544 Benchmarking Methodology for Network Interconnect
- Devices <http://www.ietf.org/rfc/rfc2544.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2544.txt>`__
* `RFC 2285 Benchmarking Terminology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2285.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2285.txt>`__
* `RFC 2889 Benchmarking Methodology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2889.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2889.txt>`__
* `RFC 3918 Methodology for IP Multicast
- Benchmarking <http://www.ietf.org/rfc/rfc3918.txt>`__
+ Benchmarking <https://www.ietf.org/rfc/rfc3918.txt>`__
* `RFC 4737 Packet Reordering
- Metrics <http://www.ietf.org/rfc/rfc4737.txt>`__
+ Metrics <https://www.ietf.org/rfc/rfc4737.txt>`__
* `RFC 5481 Packet Delay Variation Applicability
- Statement <http://www.ietf.org/rfc/rfc5481.txt>`__
+ Statement <https://www.ietf.org/rfc/rfc5481.txt>`__
* `RFC 6201 Device Reset
- Characterization <http://tools.ietf.org/html/rfc6201>`__
+ Characterization <https://tools.ietf.org/html/rfc6201>`__
.. 3.2
diff --git a/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst b/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
index e5147bea..c0b63859 100644
--- a/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
+++ b/docs/testing/developer/devguide/requirements/vswitchperf_ltp.rst
@@ -63,21 +63,21 @@ References
===============
* `RFC 1242 Benchmarking Terminology for Network Interconnection
- Devices <http://www.ietf.org/rfc/rfc1242.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc1242.txt>`__
* `RFC 2544 Benchmarking Methodology for Network Interconnect
- Devices <http://www.ietf.org/rfc/rfc2544.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2544.txt>`__
* `RFC 2285 Benchmarking Terminology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2285.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2285.txt>`__
* `RFC 2889 Benchmarking Methodology for LAN Switching
- Devices <http://www.ietf.org/rfc/rfc2889.txt>`__
+ Devices <https://www.ietf.org/rfc/rfc2889.txt>`__
* `RFC 3918 Methodology for IP Multicast
- Benchmarking <http://www.ietf.org/rfc/rfc3918.txt>`__
+ Benchmarking <https://www.ietf.org/rfc/rfc3918.txt>`__
* `RFC 4737 Packet Reordering
- Metrics <http://www.ietf.org/rfc/rfc4737.txt>`__
+ Metrics <https://www.ietf.org/rfc/rfc4737.txt>`__
* `RFC 5481 Packet Delay Variation Applicability
- Statement <http://www.ietf.org/rfc/rfc5481.txt>`__
+ Statement <https://www.ietf.org/rfc/rfc5481.txt>`__
* `RFC 6201 Device Reset
- Characterization <http://tools.ietf.org/html/rfc6201>`__
+ Characterization <https://tools.ietf.org/html/rfc6201>`__
.. 3.1.4
@@ -633,7 +633,7 @@ General Methodology:
--------------------------
To establish the baseline performance of the virtual switch, tests would
initially be run with a simple workload in the VNF (the recommended
-simple workload VNF would be `DPDK <http://www.dpdk.org/>`__'s testpmd
+simple workload VNF would be `DPDK <https://www.dpdk.org/>`__'s testpmd
application forwarding packets in a VM or vloop\_vnf a simple kernel
module that forwards traffic between two network interfaces inside the
virtualized environment while bypassing the networking stack).
@@ -656,7 +656,7 @@ tests:
- Reference application: Simple forwarding or Open Source VNF.
- Frame size (bytes): 64, 128, 256, 512, 1024, 1280, 1518, 2K, 4k OR
Packet size based on use-case (e.g. RTP 64B, 256B) OR Mix of packet sizes as
- maintained by the Functest project <https://wiki.opnfv.org/traffic_profile_management>.
+ maintained by the Functest project <https://wiki.opnfv.org/display/functest/Traffic+Profile+Management>.
- Reordering check: Tests should confirm that packets within a flow are
not reordered.
- Duplex: Unidirectional / Bidirectional. Default: Full duplex with
diff --git a/docs/testing/developer/devguide/results/scenario.rst b/docs/testing/developer/devguide/results/scenario.rst
index dbdc7877..f7eadd33 100644
--- a/docs/testing/developer/devguide/results/scenario.rst
+++ b/docs/testing/developer/devguide/results/scenario.rst
@@ -34,7 +34,7 @@ Deployment topologies:
Loopback applications in the Guest:
-* `DPDK testpmd <http://dpdk.org/doc/guides/testpmd_app_ug/index.html>`_.
+* `DPDK testpmd <http://doc.dpdk.org/guides/testpmd_app_ug/index.html>`_.
* Linux Bridge.
* :ref:`l2fwd-module`
diff --git a/docs/testing/user/configguide/index.rst b/docs/testing/user/configguide/index.rst
index 75a2082d..87c32d11 100644
--- a/docs/testing/user/configguide/index.rst
+++ b/docs/testing/user/configguide/index.rst
@@ -31,7 +31,7 @@ new techniques together. A new IETF benchmarking specification (RFC8204) is base
2015. VSPERF is also contributing to development of ETSI NFV test specifications through the Test and Open Source
Working Group.
-* Wiki: https://wiki.opnfv.org/characterize_vswitch_performance_for_telco_nfv_use_cases
+* Wiki: https://wiki.opnfv.org/display/vsperf
* Repository: https://git.opnfv.org/vswitchperf
* Artifacts: https://artifacts.opnfv.org/vswitchperf.html
* Continuous Integration: https://build.opnfv.org/ci/view/vswitchperf/
@@ -57,10 +57,10 @@ VSPERF Test Guide
.. toctree::
:caption: VSPERF Test Execution
:maxdepth: 2
- :numbered:
../userguide/testusage.rst
../userguide/teststeps.rst
../userguide/integration.rst
+ ../userguide/trafficcapture.rst
../userguide/yardstick.rst
../userguide/testlist.rst
diff --git a/docs/testing/user/configguide/installation.rst b/docs/testing/user/configguide/installation.rst
index fcf5b05d..b950442e 100644
--- a/docs/testing/user/configguide/installation.rst
+++ b/docs/testing/user/configguide/installation.rst
@@ -167,8 +167,12 @@ repository provided by Software Collections (`a link`_). The installation script
will also use `virtualenv`_ to create a vsperf virtual environment, which is
isolated from the default Python environment, using the Python3 package located
in **/usr/bin/python3**. This environment will reside in a directory called
-**vsperfenv** in $HOME. It will ensure, that system wide Python installation
- is not modified or broken by VSPERF installation. The complete list of Python
+**vsperfenv** in $HOME.
+
+It will ensure, that system wide Python installation is not modified or
+broken by VSPERF installation.
+
+The complete list of Python
packages installed inside virtualenv can be found in the file
``requirements.txt``, which is located at the vswitchperf repository.
@@ -266,8 +270,8 @@ running any of the above. For example:
export http_proxy=proxy.mycompany.com:123
export https_proxy=proxy.mycompany.com:123
-.. _a link: http://www.softwarecollections.org/en/scls/rhscl/python33/
-.. _virtualenv: https://virtualenv.readthedocs.org/en/latest/
+.. _a link: https://www.softwarecollections.org/en/scls/rhscl/python33/
+.. _virtualenv: https://virtualenv.pypa.io/en/latest/
.. _vloop-vnf-ubuntu-14.04_20160823: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160823.qcow2
.. _vloop-vnf-ubuntu-14.04_20160804: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160804.qcow2
.. _vloop-vnf-ubuntu-14.04_20160303: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160303.qcow2
@@ -326,7 +330,7 @@ to your OS documentation to set hugepages correctly. It is recommended to set
the required amount of hugepages to be allocated by default on reboots.
Information on hugepage requirements for dpdk can be found at
-http://dpdk.org/doc/guides/linux_gsg/sys_reqs.html
+http://doc.dpdk.org/guides/linux_gsg/sys_reqs.html
You can review your hugepage amounts by executing the following command
@@ -356,7 +360,7 @@ default on the Linux DUT
VSPerf recommends the latest tuned-adm package, which can be downloaded from the
following location:
-http://www.tuned-project.org/2017/04/27/tuned-2-8-0-released/
+https://github.com/redhat-performance/tuned/releases
Follow the instructions to install the latest tuned-adm onto your system. For
current RHEL customers you should already have the most current version. You
diff --git a/docs/testing/user/configguide/tools.rst b/docs/testing/user/configguide/tools.rst
index 907e86d2..72e515fa 100644
--- a/docs/testing/user/configguide/tools.rst
+++ b/docs/testing/user/configguide/tools.rst
@@ -46,13 +46,16 @@ Installation
No installation is required for *pidstat*, whereas, collectd has to be installed
separately. For installation of collectd, we recommend to follow the process described
-in *OPNFV-Barometer* project, which can be found here `Barometer-Euphrates <http://docs.opnfv.org/en/stable-euphrates/submodules/barometer/docs/release/userguide/feature.userguide.html#building-all-barometer-upstreamed-plugins-from-scratch>`_ or the most
+in *OPNFV-Barometer* project, which can be found here `Barometer <https://opnfv-barometer.readthedocs.io/en/latest/release/userguide>`_
recent release.
VSPERF assumes that collectd is installed and configured to send metrics over localhost.
The metrics sent should be for the following categories: CPU, Processes, Interface,
OVS, DPDK, Intel-RDT.
+For multicmd, apart from collectd, installation of PROX is also necessary.
+Installation steps for PROX can be found here - `DPPD-PROX <https://github.com/opnfv/samplevnf/tree/master/VNFs/DPPD-PROX>`_
+
Configuration
^^^^^^^^^^^^^
@@ -79,6 +82,13 @@ The *collectd* configuration option includes:
* ``COLLECTD_INTELRDT_KEYS`` - Interesting metrics from Intel-RDT
* ``COLLECTD_INTERFACE_XKEYS`` - Metrics to exclude from Interface
* ``COLLECTD_INTELRDT_XKEYS`` - Metrics to exclude from Intel-RDT
+* ``MC_COLLECTD_CSV`` - Path where collectd writes its metrics as CSV.
+* ``MC_COLLECTD_CMD`` - Path where Collectd is installed
+* ``MC_PROX_HOME`` - Path where PROX-IRQ is installed.
+* ``MC_PROX_CMD`` - Command to run PROX-IRQ
+* ``MC_PROX_OUT`` - Output file generated by PROX-IRQ stats collector.
+* ``MC_CRON_OUT`` - Output file path of the command run through CROND
+* ``MC_BEAT_CFILE`` - Filebeat configuration file path.
.. _`Load Generators`:
@@ -175,3 +185,43 @@ VSPERF provides following configuration options, for user to define and enforce
* ``VNF_CA`` - [min-cache-value, max-cache-value] for VNF
* ``PMD_CA`` - [min-cache-value, max-cache-value] for PMD
* ``NOISEVM_CA`` - [min-cache-value, max-cache-value] for Noisy VM
+
+VSPERF Containers
+-----------------
+
+VSPERF containers are found in tools/docker folder.
+
+RESULTS CONTAINER
+^^^^^^^^^^^^^^^^^
+
+The results container includes multiple services - ELK Stack, Barometer-Grafana, OPNFV-TestAPI & Jupyter.
+
+Pre-Deployment Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Set the limit on mmap counts equal to 262144 or more.
+ You can do this by the command - ``sysctl -w vm.max_map_count = 262144``.
+ Or to set it permanently, update the ``vm.max_map_count`` field in ``/etc/sysctl.conf``.
+
+2. You may want to modify the IP address from 0.0.0.0 to appropriate host-ip in ``docker-compose.yml``
+
+3. Please add dashboards folder from OPNFV-Barometer-Grafana into the grafana folder. It can be found in `Barometer Grafana <https://github.com/opnfv/barometer/tree/master/docker/barometer-grafana`
+
+Build
+~~~~~
+
+Run ``docker-compose build`` command to build the container.
+
+Run
+~~~
+
+Run the container with ``docker-compose up`` command.
+
+Post-Deployment Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The directory ``resultsdb`` contains the source from Dovetail/Dovetail-webportal project.
+Once the results container is deployed, please run the python script as follows, to ensure that results can be
+pushed and queried correctly - ``python init_db.py host_ip_address testapi_port``.
+For example, if the host on which the container is running is 10.10.120.22, and container is exposing 8000 as the port,
+the command should be: ``python init_db.py 10.10.120.22 8000``
diff --git a/docs/testing/user/configguide/trafficgen.rst b/docs/testing/user/configguide/trafficgen.rst
index ae745543..3bb09d52 100644
--- a/docs/testing/user/configguide/trafficgen.rst
+++ b/docs/testing/user/configguide/trafficgen.rst
@@ -91,6 +91,11 @@ and is configured as follows:
'enabled': False,
'type': 'Default',
},
+ 'imix': {
+ 'enabled': True,
+ 'type': 'genome',
+ 'genome': 'aaaaaaaddddg',
+ },
}
A detailed description of the ``TRAFFIC`` dictionary can be found at
@@ -119,6 +124,13 @@ commandline above to:
$ ./vsperf --test-params "TRAFFICGEN_PKT_SIZES=(x,y);TRAFFICGEN_DURATION=10;" \
"TRAFFICGEN_RFC2544_TESTS=1" $TESTNAME
+If you use imix, set the TRAFFICGEN_PKT_SIZES to 0.
+
+.. code-block:: console
+
+ TRAFFICGEN_PKT_SIZES = (0,)
+
+
.. _trafficgen-dummy:
Dummy
@@ -395,7 +407,7 @@ Spirent Setup
Spirent installation files and instructions are available on the
Spirent support website at:
-http://support.spirent.com
+https://support.spirent.com
Select a version of Spirent TestCenter software to utilize. This example
will use Spirent TestCenter v4.57 as an example. Substitute the appropriate
@@ -447,7 +459,7 @@ STC ReST API. Basic ReST functionality is provided by the resthttp module,
and may be used for writing ReST clients independent of STC.
- Project page: <https://github.com/Spirent/py-stcrestclient>
-- Package download: <http://pypi.python.org/pypi/stcrestclient>
+- Package download: <https://pypi.python.org/project/stcrestclient>
To use REST interface, follow the instructions in the Project page to
install the package. Once installed, the scripts named with 'rest' keyword
@@ -606,7 +618,7 @@ support contract.
To execute the Xena2544.exe file under Linux distributions the mono-complete
package must be installed. To install this package follow the instructions
below. Further information can be obtained from
-http://www.mono-project.com/docs/getting-started/install/linux/
+https://www.mono-project.com/docs/getting-started/install/linux/
.. code-block:: console
@@ -742,7 +754,7 @@ trafficgen.lua
Follow MoonGen set up and execution instructions here:
-https://github.com/atheurer/lua-trafficgen/blob/master/README.md
+https://github.com/atheurer/trafficgen/blob/master/README.md
Note one will need to set up ssh login to not use passwords between the server
running MoonGen and the device under test (running the VSPERF test
@@ -994,7 +1006,7 @@ dictionary are shown. The rest of the TRAFFIC dictionary is set to default value
as they are defined in ``conf/03_traffic.conf``.
Please check official documentation of SCAPY project for details about SCAPY frame
-definition and supported network layers at: http://www.secdev.org/projects/scapy
+definition and supported network layers at: https://scapy.net
#. Generate ICMP frames:
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index 350fbe54..2c7a78ff 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -11,7 +11,6 @@ VSPERF Test Guide
.. toctree::
:caption: VSPERF Test Execution
:maxdepth: 2
- :numbered:
./testusage.rst
./teststeps.rst
diff --git a/docs/testing/user/userguide/testusage.rst b/docs/testing/user/userguide/testusage.rst
index 9dc80bb7..3dd41846 100644
--- a/docs/testing/user/userguide/testusage.rst
+++ b/docs/testing/user/userguide/testusage.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation, AT&T and others.
+.. (c) OPNFV, Intel Corporation, Spirent, AT&T and others.
vSwitchPerf test suites userguide
---------------------------------
@@ -662,7 +662,7 @@ modified to use igb_uio_from_src instead.
Note: vfio_no_iommu requires kernels equal to or greater than 4.5 and dpdk
16.04 or greater. Using this option will also taint the kernel.
-Please refer to the dpdk documents at http://dpdk.org/doc/guides for more
+Please refer to the dpdk documents at https://doc.dpdk.org/guides for more
information on these drivers.
Guest Core and Thread Binding
diff --git a/docs/xtesting/index.rst b/docs/xtesting/index.rst
new file mode 100644
index 00000000..9259a12a
--- /dev/null
+++ b/docs/xtesting/index.rst
@@ -0,0 +1,85 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Spirent, AT&T, Ixia and others.
+
+.. OPNFV VSPERF Documentation master file.
+
+********************************
+OPNFV VSPERF with OPNFV Xtesting
+********************************
+
+============
+Introduction
+============
+User can use VSPERF with Xtesting for two different usecases.
+
+1. Baremetal Dataplane Testing/Benchmarking.
+2. Openstack Dataplane Testing/Benchmarking.
+
+The Baremetal usecase is the legacy usecase of OPNFV VSPERF.
+
+The below figure summarizes both the usecases.
+
+.. image:: ./vsperf-xtesting.png
+ :width: 400
+
+===========
+How to Use?
+===========
+
+Step-1: Build the container
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Go the xtesting/baremetal or xtesting/openstack and run the following command.
+
+.. code-block:: console
+
+ docker build -t 127.0.0.1:5000/vsperfbm
+
+
+Step-2: Install and run Xtesting Playbook
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These commands are described in OPNFV Xtesting Documentation. Please refere to OPNFV Xtesting wiki for description of these commands.
+
+.. code-block:: console
+
+ virtualenv xtesting
+ . xtesting/bin/activate
+ ansible-galaxy install collivier.xtesting
+ ansible-playbook site.yml
+
+======================
+Accessing the Results?
+======================
+
+VSPERF automatically publishes the results to any OPNFV Testapi deployment.
+User has to configure following two parameters in VSPERF.
+
+1. OPNFVPOD - The name of the pod.
+2. OPNFV_URL - The endpoint serving testapi.
+
+As Xtesting runs its own testapi, user should point to this (testapi endpoint of Xtesting) using the above two configuration.
+
+The above two configurations should be done wherever VSPERF is running (refer to the figure above)
+
+NOTE: Before running the test, it would help if user can prepre the testapi of Xtesting (if needed). The preparation include setting up the following:
+
+1. Projects
+2. Testcases.
+3. Pods.
+
+Please refer to the documentation of testapi for more details.
+
+=======================================
+Accessing other components of Xtesting?
+=======================================
+
+Please refer to the documentation of Xtesting in OPNFV Wiki.
+
+===========
+Limitations
+===========
+For Jerma Release, following limitations apply:
+
+1. For both baremetal and openstack, only phy2phy_tput testcase is supported.
+2. For openstack, only Spirent's STCv and Keysight's Ixnet-Virtual is supported.
diff --git a/docs/xtesting/vsperf-xtesting.png b/docs/xtesting/vsperf-xtesting.png
new file mode 100755
index 00000000..64cad722
--- /dev/null
+++ b/docs/xtesting/vsperf-xtesting.png
Binary files differ
diff --git a/pods/__init__.py b/pods/__init__.py
new file mode 100644
index 00000000..e3ce18d9
--- /dev/null
+++ b/pods/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package for POD wrappers for use with VSPERF.
+
+This package contains an interface the VSPERF core uses for controlling
+PODs and POD-specific implementation modules of this interface.
+"""
diff --git a/pods/papi/__init__.py b/pods/papi/__init__.py
new file mode 100644
index 00000000..16760b86
--- /dev/null
+++ b/pods/papi/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package for POD wrappers for use with VSPERF.
+
+This package contains an implementation of the interface the VSPERF core
+uses for controlling PODs using Kubernetes Python-API (PAPI)
+"""
diff --git a/pods/papi/papi.py b/pods/papi/papi.py
new file mode 100644
index 00000000..5a21f1d6
--- /dev/null
+++ b/pods/papi/papi.py
@@ -0,0 +1,143 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Automation of Pod Deployment with Kubernetes Python API
+"""
+
+# import os
+import logging
+import json
+import time
+import yaml
+from kubernetes import client, config
+from kubernetes.client.rest import ApiException
+
+from conf import settings as S
+from pods.pod.pod import IPod
+
+class Papi(IPod):
+ """
+ Class for controlling the pod through PAPI
+ """
+
+ def __init__(self):
+ """
+ Initialisation function.
+ """
+ #super(Papi, self).__init__()
+ super().__init__()
+
+ self._logger = logging.getLogger(__name__)
+ self._sriov_config = None
+ self._sriov_config_ns = None
+ config.load_kube_config(S.getValue('K8S_CONFIG_FILEPATH'))
+
+ def create(self):
+ """
+ Creation Process
+ """
+ # create vswitchperf namespace
+ api = client.CoreV1Api()
+ namespace = 'default'
+ #namespace = 'vswitchperf'
+ # replace_namespace(api, namespace)
+
+ # sriov configmap
+ if S.getValue('PLUGIN') == 'sriov':
+ configmap = load_manifest(S.getValue('CONFIGMAP_FILEPATH'))
+ self._sriov_config = configmap['metadata']['name']
+ self._sriov_config_ns = configmap['metadata']['namespace']
+ api.create_namespaced_config_map(self._sriov_config_ns, configmap)
+
+
+ # create nad(network attachent definitions)
+ group = 'k8s.cni.cncf.io'
+ version = 'v1'
+ kind_plural = 'network-attachment-definitions'
+ api = client.CustomObjectsApi()
+
+ for nad_filepath in S.getValue('NETWORK_ATTACHMENT_FILEPATH'):
+ nad_manifest = load_manifest(nad_filepath)
+
+ try:
+ response = api.create_namespaced_custom_object(group, version, namespace,
+ kind_plural, nad_manifest)
+ self._logger.info(str(response))
+ self._logger.info("Created Network Attachment Definition: %s", nad_filepath)
+ except ApiException as err:
+ raise Exception from err
+
+ #create pod workloads
+ pod_manifest = load_manifest(S.getValue('POD_MANIFEST_FILEPATH'))
+ api = client.CoreV1Api()
+
+ try:
+ response = api.create_namespaced_pod(namespace, pod_manifest)
+ self._logger.info(str(response))
+ self._logger.info("Created POD %d ...", self._number)
+ except ApiException as err:
+ raise Exception from err
+
+ time.sleep(12)
+
+ def terminate(self):
+ """
+ Cleanup Process
+ """
+ #self._logger.info(self._log_prefix + "Cleaning vswitchperf namespace")
+ self._logger.info("Terminating Pod")
+ api = client.CoreV1Api()
+ # api.delete_namespace(name="vswitchperf", body=client.V1DeleteOptions())
+
+ if S.getValue('PLUGIN') == 'sriov':
+ api.delete_namespaced_config_map(self._sriov_config, self._sriov_config_ns)
+
+
+def load_manifest(filepath):
+ """
+ Reads k8s manifest files and returns as string
+
+ :param str filepath: filename of k8s manifest file to read
+
+ :return: k8s resource definition as string
+ """
+ with open(filepath) as handle:
+ data = handle.read()
+
+ try:
+ manifest = json.loads(data)
+ except ValueError:
+ try:
+ manifest = yaml.safe_load(data)
+ except yaml.parser.ParserError as err:
+ raise Exception from err
+
+ return manifest
+
+def replace_namespace(api, namespace):
+ """
+ Creates namespace if does not exists
+ """
+ namespaces = api.list_namespace()
+ for nsi in namespaces.items:
+ if namespace == nsi.metadata.name:
+ api.delete_namespace(name=namespace,
+ body=client.V1DeleteOptions())
+ break
+
+ time.sleep(0.5)
+ api.create_namespace(client.V1Namespace(
+ metadata=client.V1ObjectMeta(name=namespace)))
diff --git a/pods/pod/__init__.py b/pods/pod/__init__.py
new file mode 100644
index 00000000..b91706e2
--- /dev/null
+++ b/pods/pod/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2020 Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""POD interface and helpers.
+"""
+
+import pods
diff --git a/pods/pod/pod.py b/pods/pod/pod.py
new file mode 100644
index 00000000..c25744d2
--- /dev/null
+++ b/pods/pod/pod.py
@@ -0,0 +1,63 @@
+# Copyright 2020 Spirent Communications, University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Interface for POD
+"""
+
+#import time
+#import pexpect
+from tools import tasks
+
+class IPod(tasks.Process):
+ """
+ Interface for POD
+
+ Inheriting from Process helps in managing system process.
+ execute a command, wait, kill, etc.
+ """
+ _number_pods = 0
+
+ def __init__(self):
+ """
+ Initialization Method
+ """
+ self._number = IPod._number_pods
+ self._logger.debug('Initializing %s. Pod with index %s',
+ self._number + 1, self._number)
+ IPod._number_pods = IPod._number_pods + 1
+ self._log_prefix = 'pod_%d_cmd : ' % self._number
+ # raise NotImplementedError()
+
+ def create(self):
+ """
+ Start the Pod
+ """
+ raise NotImplementedError()
+
+
+ def terminate(self):
+ """
+ Stop the Pod
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ def reset_pod_counter():
+ """
+ Reset internal POD counter
+
+ This method is static
+ """
+ IPod._number_pods = 0
diff --git a/requirements.txt b/requirements.txt
index cb5a0d89..a50569dd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,14 +9,34 @@ pexpect==3.3
tox==1.8.1
jinja2==2.7.3
xmlrunner==1.7.7
-requests==2.8.1
+requests>=2.14.2
netaddr==0.7.18
scapy-python3==0.18
pylint==1.8.2
-pyzmq==14.5.0
distro
stcrestclient
-matplotlib==2.2.2
+matplotlib
numpy
pycrypto
tabulate
+pypsi
+paramiko
+keystoneauth1>=2.18.0
+os-client-config>=1.22.0
+oslo.concurrency>=3.8.0
+oslo.config>=3.14.0
+oslo.log>=3.11.0
+oslo.serialization>=1.10.0
+oslo.utils>=3.18.0
+pygal
+pykwalify
+python-glanceclient>=2.5.0
+python-neutronclient>=5.1.0
+python-novaclient>=7.1.0
+python-heatclient>=1.6.1
+python-subunit>=0.0.18
+PyYAML>=3.10.0
+pyzmq>=16.0
+six>=1.9.0
+timeout-decorator>=0.4.0
+kubernetes
diff --git a/src/dpdk/Makefile b/src/dpdk/Makefile
index 4b4330d7..1a1521db 100755
--- a/src/dpdk/Makefile
+++ b/src/dpdk/Makefile
@@ -82,13 +82,13 @@ endif
# CentOS 7.3 specific config changes to compile
ifeq ($(ID),"centos")
ifeq ($(VERSION_ID),"7")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
# RHEL 7.3 specific config changes to compile
ifeq ($(ID),"rhel")
ifeq ($(VERSION_ID),"7.3")
- $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/lib/librte_eal/linuxapp/kni/Makefile
+ $(AT)sed -i.bak s@'SRCS-y += ethtool/igb/igb_main.c'@'#SRCS-y += ethtool/igb/igb_main.c'@g $(WORK_DIR)/kernel/linux/kni/Makefile
endif
endif
$(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST=./CONFIG_RTE_LIBRTE_VHOST=y/g' $(CONFIG_FILE_LINUXAPP)
diff --git a/src/dpdk/testpmd_proc.py b/src/dpdk/testpmd_proc.py
index a8fa8eee..b89bcec2 100644
--- a/src/dpdk/testpmd_proc.py
+++ b/src/dpdk/testpmd_proc.py
@@ -27,8 +27,12 @@ from tools import tasks
_TESTPMD_PROMPT = 'Done'
+_NAME, _EXT = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
_LOG_FILE_VSWITCHD = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD'))
+ settings.getValue('LOG_DIR'),
+ ("{name}_{uid}{ex}".format(name=_NAME, uid=settings.getValue(
+ 'LOG_TIMESTAMP'), ex=_EXT)))
+
class TestPMDProcess(tasks.Process):
"""Class wrapper for controlling a TestPMD instance.
diff --git a/src/package-list.mk b/src/package-list.mk
index d32a9ffd..1e40a60d 100644
--- a/src/package-list.mk
+++ b/src/package-list.mk
@@ -13,20 +13,20 @@
# dpdk section
# DPDK_URL ?= git://dpdk.org/dpdk
DPDK_URL ?= http://dpdk.org/git/dpdk
-DPDK_TAG ?= v17.08
+DPDK_TAG ?= v18.11-rc2
# OVS section
OVS_URL ?= https://github.com/openvswitch/ovs
-OVS_TAG ?= v2.8.1
+OVS_TAG ?= v2.12.0
# VPP section
VPP_URL ?= https://git.fd.io/vpp
-VPP_TAG ?= v17.07
+VPP_TAG ?= v19.08.1
# QEMU section
QEMU_URL ?= https://github.com/qemu/qemu.git
-QEMU_TAG ?= v2.9.1
+QEMU_TAG ?= v3.1.1
# TREX section
TREX_URL ?= https://github.com/cisco-system-traffic-generator/trex-core.git
-TREX_TAG ?= v2.38
+TREX_TAG ?= v2.86
diff --git a/src/trex/Makefile b/src/trex/Makefile
index 9a0704af..fd5c47bb 100644
--- a/src/trex/Makefile
+++ b/src/trex/Makefile
@@ -29,8 +29,8 @@ all: force_pull
force_pull: $(TAG_DONE_FLAG)
$(AT)cd $(WORK_DIR) && git pull $(TREX_URL) $(TREX_TAG)
@echo "git pull done"
- $(AT)wget https://raw.githubusercontent.com/phaethon/scapy/v0.18/scapy/layers/all.py -O $(WORK_DIR)/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py
- @echo "orignal SCAPY 2.3.1 layers/all.py was restored"
+# $(AT)wget https://raw.githubusercontent.com/phaethon/scapy/v0.18/scapy/layers/all.py -O $(WORK_DIR)/scripts/external_libs/scapy-2.3.1/python3/scapy/layers/all.py
+# @echo "orignal SCAPY 2.3.1 layers/all.py was restored"
$(WORK_DIR):
$(AT)git clone $(TREX_URL) $(WORK_DIR)
diff --git a/systems/README.md b/systems/README.md
index d72aae65..ca6557ea 100644
--- a/systems/README.md
+++ b/systems/README.md
@@ -12,3 +12,7 @@ On a freshly built system, run the following with a super user privilege
or with password less sudo access.
./build_base_machine.sh
+
+If you want to use vsperf in trafficgen-mode ONLY, then add a parameter.
+
+./build_base_machine.sh trafficgen
diff --git a/systems/build_base_machine.sh b/systems/build_base_machine.sh
index 59712b96..37b74ffe 100755
--- a/systems/build_base_machine.sh
+++ b/systems/build_base_machine.sh
@@ -68,15 +68,30 @@ else
die "$distro_dir is not yet supported"
fi
-if [ ! -d /lib/modules/`uname -r`/build ] ; then
- die "Kernel devel is not available for active kernel. It can be caused by recent kernel update. Please reboot and run $0 again."
+if [ $# -eq 0 ]; then
+ echo "No parameters provided - continuing with Lib checking"
+ if [ ! -d /lib/modules/`uname -r`/build ] ; then
+ die "Kernel devel is not available for active kernel. It can be caused by recent kernel update. Please reboot and run $0 again."
+ fi
fi
-# download and compile DPDK, OVS and QEMU
-if [ -f ../src/Makefile ] ; then
- cd ../src
- make || die "Make failed"
- cd -
+if [ $# -eq 0 ]; then
+ echo "No parameters provided - continuing with SRC Download and Compile"
+ # download and compile DPDK, OVS and QEMU
+ if [ -f ../src/Makefile ] ; then
+ cd ../src
+ make || die "Make failed"
+ cd -
+ else
+ die "Make failed; No Makefile"
+ fi
else
- die "Make failed; No Makefile"
+ echo "Downloading and compiling only T-Rex"
+ if [ -f ../src/trex/Makefile ]; then
+ cd ../src/trex/
+ make || die "Make failed"
+ cd -
+ else
+ die "Make failed; No Makefile"
+ fi
fi
diff --git a/systems/centos/build_base_machine.sh b/systems/centos/build_base_machine.sh
index 95f9e211..0e1ed830 100755
--- a/systems/centos/build_base_machine.sh
+++ b/systems/centos/build_base_machine.sh
@@ -76,8 +76,8 @@ sudo yum -y install centos-release-scl-rh
# install python34 packages and git-review tool
yum -y install $(echo "
-rh-python34
-rh-python34-python-tkinter
+rh-python36
+rh-python36-python-tkinter
git-review
" | grep -v ^#)
# prevent ovs vanilla from building from source due to kernel incompatibilities
diff --git a/systems/centos/prepare_python_env.sh b/systems/centos/prepare_python_env.sh
index 108ba1f6..4f5c0065 100755
--- a/systems/centos/prepare_python_env.sh
+++ b/systems/centos/prepare_python_env.sh
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable rh-python34 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
"
diff --git a/systems/debian/build_base_machine.sh b/systems/debian/build_base_machine.sh
new file mode 100755
index 00000000..cc3f1eb8
--- /dev/null
+++ b/systems/debian/build_base_machine.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# Build a base machine for Debian style distro
+#
+# Copyright 2020 OPNFV
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contributors:
+# Sridhar K. N. Rao Spirent Communications
+
+# This is meant to be used only for Containerized VSPERF.
+
+# Synchronize package index files
+apt-get -y update
+apt-get -y install curl
+apt-get -y install git
+apt-get -y install wget
+apt-get -y install python3-venv
+
+# Make and Compilers
+apt-get -y install make
+apt-get -y install automake
+apt-get -y install gcc
+apt-get -y install g++
+apt-get -y install libssl1.1
+apt-get -y install libxml2
+apt-get -y install zlib1g-dev
+apt-get -y install scapy
diff --git a/systems/debian/prepare_python_env.sh b/systems/debian/prepare_python_env.sh
new file mode 100755
index 00000000..7c3b530b
--- /dev/null
+++ b/systems/debian/prepare_python_env.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Prepare Python environment for vsperf execution on Debian systems
+#
+# Copyright 2020 OPNFV, Spirent Communications
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -d "$VSPERFENV_DIR" ] ; then
+ echo "Directory $VSPERFENV_DIR already exists. Skipping python virtualenv creation."
+ exit
+fi
+
+# enable virtual environment in a subshell
+
+(python3 -m venv "$VSPERFENV_DIR"
+source "$VSPERFENV_DIR"/bin/activate
+pip install -r ../requirements.txt)
diff --git a/systems/rhel/7.2/build_base_machine.sh b/systems/rhel/7.2/build_base_machine.sh
index 198f39d7..c0f367ab 100755
--- a/systems/rhel/7.2/build_base_machine.sh
+++ b/systems/rhel/7.2/build_base_machine.sh
@@ -93,14 +93,14 @@ enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python34 packages and git-review tool
+# install python36 packages and git-review tool
yum -y install $(echo "
-rh-python34
-rh-python34-python-tkinter
+rh-python36
+rh-python36-python-tkinter
" | grep -v ^#)
-# cleanup python 34 repo file
-rm -f /etc/yum.repos.d/python34.repo
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.2/prepare_python_env.sh b/systems/rhel/7.2/prepare_python_env.sh
index 047d6961..b7506568 100755
--- a/systems/rhel/7.2/prepare_python_env.sh
+++ b/systems/rhel/7.2/prepare_python_env.sh
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable rh-python34 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
"
diff --git a/systems/rhel/7.3/build_base_machine.sh b/systems/rhel/7.3/build_base_machine.sh
index ae527214..42c36e4c 100755
--- a/systems/rhel/7.3/build_base_machine.sh
+++ b/systems/rhel/7.3/build_base_machine.sh
@@ -93,14 +93,14 @@ enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python34 packages and git-review tool
+# install python36 packages and git-review tool
yum -y install $(echo "
-rh-python34
-rh-python34-python-tkinter
+rh-python36
+rh-python36-python-tkinter
" | grep -v ^#)
-# cleanup python 34 repo file
-rm -f /etc/yum.repos.d/python34.repo
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.3/prepare_python_env.sh b/systems/rhel/7.3/prepare_python_env.sh
index 047d6961..b7506568 100755
--- a/systems/rhel/7.3/prepare_python_env.sh
+++ b/systems/rhel/7.3/prepare_python_env.sh
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable rh-python34 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
"
diff --git a/systems/rhel/7.5/build_base_machine.sh b/systems/rhel/7.5/build_base_machine.sh
index 2073a38c..deb4e8a2 100755
--- a/systems/rhel/7.5/build_base_machine.sh
+++ b/systems/rhel/7.5/build_base_machine.sh
@@ -93,14 +93,14 @@ enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-SCLo
EOT
-# install python34 packages and git-review tool
+# install python36 packages and git-review tool
yum -y install $(echo "
-rh-python34
-rh-python34-python-tkinter
+rh-python36
+rh-python36-python-tkinter
" | grep -v ^#)
-# cleanup python 34 repo file
-rm -f /etc/yum.repos.d/python34.repo
+# cleanup python 36 repo file
+rm -f /etc/yum.repos.d/python36.repo
# Create hugepage dirs
mkdir -p /dev/hugepages
diff --git a/systems/rhel/7.5/prepare_python_env.sh b/systems/rhel/7.5/prepare_python_env.sh
index 047d6961..b7506568 100755
--- a/systems/rhel/7.5/prepare_python_env.sh
+++ b/systems/rhel/7.5/prepare_python_env.sh
@@ -21,8 +21,8 @@ if [ -d "$VSPERFENV_DIR" ] ; then
exit
fi
-scl enable rh-python34 "
-virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python34/root/usr/bin/python3
+scl enable rh-python36 "
+virtualenv "$VSPERFENV_DIR" --python /opt/rh/rh-python36/root/usr/bin/python3
source "$VSPERFENV_DIR"/bin/activate
pip install -r ../requirements.txt
"
diff --git a/systems/ubuntu/14.04/build_base_machine.sh b/systems/ubuntu/14.04/build_base_machine.sh
index a0e6895c..5501cab2 100755
--- a/systems/ubuntu/14.04/build_base_machine.sh
+++ b/systems/ubuntu/14.04/build_base_machine.sh
@@ -75,7 +75,7 @@ python3-setuptools
python3-dbus
python3-dev
python3-tk
-libpython3.4
+libpython3.6
python3-reportlab
# libs
diff --git a/testcases/__init__.py b/testcases/__init__.py
index 0b6b77e4..736be883 100644
--- a/testcases/__init__.py
+++ b/testcases/__init__.py
@@ -17,3 +17,4 @@
from testcases.testcase import (TestCase)
from testcases.performance import (PerformanceTestCase)
from testcases.integration import (IntegrationTestCase)
+from testcases.k8s_performance import (K8sPerformanceTestCase)
diff --git a/testcases/k8s_performance.py b/testcases/k8s_performance.py
new file mode 100644
index 00000000..3c31430c
--- /dev/null
+++ b/testcases/k8s_performance.py
@@ -0,0 +1,39 @@
+# Copyright 2015-2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PerformanceTestCase class
+"""
+
+import logging
+
+from testcases.testcase import TestCase
+#from tools.report import report
+
+class K8sPerformanceTestCase(TestCase):
+ """K8sPerformanceTestCase class
+
+ In this basic form runs RFC2544 throughput test
+ """
+ def __init__(self, cfg):
+ """ Testcase initialization
+ """
+ self._type = 'k8s_performance'
+ super().__init__(cfg)
+ self._logger = logging.getLogger(__name__)
+ self._k8s = True
+
+ def run_report(self):
+ pass
+ #super().run_report()
+ #if self._tc_results:
+ # report.generate(self)
diff --git a/testcases/testcase.py b/testcases/testcase.py
index a30558ff..51d212b4 100644
--- a/testcases/testcase.py
+++ b/testcases/testcase.py
@@ -73,6 +73,8 @@ class TestCase(object):
self._hugepages_mounted = False
self._traffic_ctl = None
self._vnf_ctl = None
+ self._pod_ctl = None
+ self._pod_list = None
self._vswitch_ctl = None
self._collector = None
self._loadgen = None
@@ -81,6 +83,7 @@ class TestCase(object):
self._settings_paths_modified = False
self._testcast_run_time = None
self._versions = []
+ self._k8s = False
# initialization of step driven specific members
self._step_check = False # by default don't check result for step driven testcases
self._step_vnf_list = {}
@@ -216,6 +219,12 @@ class TestCase(object):
self._vnf_list = self._vnf_ctl.get_vnfs()
+ self._pod_ctl = component_factory.create_pod(
+ self.deployment,
+ loader.get_pod_class())
+
+ self._pod_list = self._pod_ctl.get_pods()
+
# verify enough hugepages are free to run the testcase
if not self._check_for_enough_hugepages():
raise RuntimeError('Not enough hugepages free to run test.')
@@ -281,6 +290,10 @@ class TestCase(object):
# Stop all VNFs started by TestSteps in case that something went wrong
self.step_stop_vnfs()
+ if self._k8s:
+ self._pod_ctl.stop()
+
+
# Cleanup any LLC-allocations
if S.getValue('LLC_ALLOCATION'):
self._rmd.cleanup_llc_allocation()
@@ -350,15 +363,18 @@ class TestCase(object):
"""Run the test
All setup and teardown through controllers is included.
+
"""
# prepare test execution environment
self.run_initialize()
try:
with self._vswitch_ctl:
- with self._vnf_ctl, self._collector, self._loadgen:
- if not self._vswitch_none:
+ with self._vnf_ctl, self._pod_ctl, self._collector, self._loadgen:
+ if not self._vswitch_none and not self._k8s:
self._add_flows()
+ if self._k8s:
+ self._add_connections()
self._versions += self._vswitch_ctl.get_vswitch().get_version()
@@ -595,6 +611,43 @@ class TestCase(object):
return list(result.keys())
+ def _add_connections(self):
+ """
+ Add connections for Kubernetes Usecases
+ """
+ logging.info("Kubernetes: Adding Connections")
+ vswitch = self._vswitch_ctl.get_vswitch()
+ bridge = S.getValue('VSWITCH_BRIDGE_NAME')
+ if S.getValue('K8S') and 'sriov' not in S.getValue('PLUGIN'):
+ if 'Ovs' in S.getValue('VSWITCH'):
+ # Add OVS Flows
+ logging.info("Kubernetes: Adding OVS Connections")
+ flow = {'table':'0', 'in_port':'1',
+ 'idle_timeout':'0', 'actions': ['output:3']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'3',
+ 'idle_timeout':'0', 'actions': ['output:1']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'2',
+ 'idle_timeout':'0', 'actions': ['output:4']}
+ vswitch.add_flow(bridge, flow)
+ flow = {'table':'0', 'in_port':'4',
+ 'idle_timeout':'0', 'actions': ['output:2']}
+ vswitch.add_flow(bridge, flow)
+ elif 'vpp' in S.getValue('VSWITCH'):
+ phy_ports = vswitch.get_ports()
+ virt_port0 = 'memif1/0'
+ virt_port1 = 'memif2/0'
+ vswitch.add_connection(bridge, phy_ports[0],
+ virt_port0, None)
+ vswitch.add_connection(bridge, virt_port0,
+ phy_ports[0], None)
+ vswitch.add_connection(bridge, phy_ports[1],
+ virt_port1, None)
+ vswitch.add_connection(bridge, virt_port1,
+ phy_ports[1], None)
+
+
def _add_flows(self):
"""Add flows to the vswitch
"""
diff --git a/tools/collectors/cadvisor/__init__.py b/tools/collectors/cadvisor/__init__.py
new file mode 100755
index 00000000..235ab875
--- /dev/null
+++ b/tools/collectors/cadvisor/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for cAdvisor as a collector
+"""
diff --git a/tools/collectors/cadvisor/cadvisor.py b/tools/collectors/cadvisor/cadvisor.py
new file mode 100644
index 00000000..de48cecd
--- /dev/null
+++ b/tools/collectors/cadvisor/cadvisor.py
@@ -0,0 +1,218 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects container metrics from cAdvisor.
+Sends metrics to influxDB and also stores results locally.
+"""
+
+import subprocess
+import logging
+import os
+from collections import OrderedDict
+
+from tools.collectors.collector import collector
+from tools import tasks
+from conf import settings
+
+
+
+# inherit from collector.Icollector.
+class Cadvisor(collector.ICollector):
+ """A collector of container metrics based on cAdvisor
+
+ It starts cadvisor and collects metrics.
+ """
+
+ def __init__(self, results_dir, test_name):
+ """
+ Initialize collection of statistics
+ """
+ self._logger = logging.getLogger(__name__)
+ self.resultsdir = results_dir
+ self.testname = test_name
+ self._pid = 0
+ self._results = OrderedDict()
+ self._log = os.path.join(results_dir,
+ settings.getValue('LOG_FILE_CADVISOR') +
+ '_' + test_name + '.log')
+ self._logfile = 0
+
+
+ def start(self):
+ """
+ Starts collection of statistics by cAdvisor and stores them
+ into-
+ 1. The file in directory with test results
+ 2. InfluxDB result container
+ """
+
+ # CMD options for cAdvisor
+ cmd = ['sudo', '/opt/cadvisor/cadvisor',
+ '-storage_driver='+settings.getValue('CADVISOR_STORAGE_DRIVER'),
+ '-storage_driver_host='+settings.getValue('CADVISOR_STORAGE_HOST'),
+ '-storage_driver_db='+settings.getValue('CADVISOR_DRIVER_DB'),
+ '-housekeeping_interval=0.5s',
+ '-storage_driver_buffer_duration=1s'
+ ]
+
+ self._logfile = open(self._log, 'a')
+
+ self._pid = subprocess.Popen(map(os.path.expanduser, cmd), stdout=self._logfile, bufsize=0)
+ self._logger.info('Starting cAdvisor')
+
+
+
+ def stop(self):
+ """
+ Stops collection of metrics by cAdvisor and stores statistic
+ summary for each monitored container into self._results dictionary
+ """
+ try:
+ subprocess.check_output(["pidof", "cadvisor"])
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'cadvisor'],
+ self._logger, 'Stopping cAdvisor', True)
+ except subprocess.CalledProcessError:
+ self._logger.error('Failed to stop cAdvisor, maybe process does not exist')
+
+
+ self._logfile.close()
+ self._logger.info('cAdvisor log available at %s', self._log)
+
+ containers = settings.getValue('CADVISOR_CONTAINERS')
+ self._results = cadvisor_log_result(self._log, containers)
+
+
+ def get_results(self):
+ """Returns collected statistics.
+ """
+ return self._results
+
+ def print_results(self):
+ """Logs collected statistics.
+ """
+ for cnt in self._results:
+ logging.info("Container: %s", cnt)
+ for (key, value) in self._results[cnt].items():
+
+ postfix = ''
+
+ if key == 'cpu_cumulative_usage':
+ key = 'CPU_usage'
+ value = round(float(value) / 1000000000, 4)
+ postfix = '%'
+
+ if key in ['memory_usage', 'memory_working_set']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'MB'
+
+ if key in ['rx_bytes', 'tx_bytes']:
+ value = round(float(value) / 1024 / 1024, 4)
+ postfix = 'mBps'
+
+ logging.info(" Statistic: %s Value: %s %s",
+ str(key), str(value), postfix)
+
+
+def cadvisor_log_result(filename, containers):
+ """
+ Processes cAdvisor logfile and returns average results
+
+ :param filename: Name of cadvisor logfile
+ :param containers: List of container names
+
+ :returns: Result as average stats of Containers
+ """
+ result = OrderedDict()
+ previous = OrderedDict()
+ logfile = open(filename, 'r')
+ with logfile:
+ # for every line
+ for _, line in enumerate(logfile):
+ # skip lines having root '/' metrics
+ if line[0:7] == 'cName=/':
+ continue
+
+ # parse line into OrderedDict
+ tmp_res = parse_line(line)
+
+ cnt = tmp_res['cName']
+
+ # skip if cnt is not in container list
+ if cnt not in containers:
+ continue
+
+ # add metrics to result
+ if cnt not in result:
+ result[cnt] = tmp_res
+ previous[cnt] = tmp_res
+ result[cnt]['count'] = 1
+ else:
+ for field in tmp_res:
+
+ if field in ['rx_errors', 'tx_errors', 'memory_usage', 'memory_working_set']:
+ val = float(tmp_res[field])
+ elif field in ['cpu_cumulative_usage', 'rx_bytes', 'tx_bytes']:
+ val = float(tmp_res[field]) - float(previous[cnt][field])
+ else:
+ # discard remaining fields
+ try:
+ result[cnt].pop(field)
+ except KeyError:
+ continue
+ continue
+
+ result[cnt][field] = float(result[cnt][field]) + val
+
+ result[cnt]['count'] += 1
+ previous[cnt] = tmp_res
+
+ # calculate average results for containers
+ result = calculate_average(result)
+ return result
+
+
+def calculate_average(results):
+ """
+ Calculates average for container stats
+ """
+ for cnt in results:
+ for field in results[cnt]:
+ if field != 'count':
+ val = float(results[cnt][field])/results[cnt]['count']
+ results[cnt][field] = '{0:.2f}'.format(val)
+
+ results[cnt].pop('count')
+ #sort results
+ results[cnt] = OrderedDict(sorted(results[cnt].items()))
+
+ return results
+
+
+def parse_line(line):
+ """
+ Reads single line from cAdvisor logfile
+
+ :param line: single line as str
+
+ :returns: OrderedDict of line read
+ """
+ tmp_res = OrderedDict()
+ # split line into array of "key=value" metrics
+ metrics = line.split()
+ for metric in metrics:
+ key, value = metric.split('=')
+ tmp_res[key] = value
+
+ return tmp_res
diff --git a/tools/collectors/multicmd/__init__.py b/tools/collectors/multicmd/__init__.py
new file mode 100755
index 00000000..2ae2340f
--- /dev/null
+++ b/tools/collectors/multicmd/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wrapper for multi-commands as a collector
+"""
diff --git a/tools/collectors/multicmd/multicmd.py b/tools/collectors/multicmd/multicmd.py
new file mode 100644
index 00000000..275a0693
--- /dev/null
+++ b/tools/collectors/multicmd/multicmd.py
@@ -0,0 +1,138 @@
+# Copyright 2019 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Collects information using various command line tools.
+"""
+
+#from tools.collectors.collector import collector
+import glob
+import logging
+import os
+from collections import OrderedDict
+from tools import tasks
+from tools.collectors.collector import collector
+from conf import settings
+
+class MultiCmd(collector.ICollector):
+ """ Multiple command-line controllers
+ collectd, prox, crond, filebeat
+ """
+ def __init__(self, results_dir, test_name):
+ """
+ initialize collectrs
+ """
+ self.prox_home = settings.getValue('MC_PROX_HOME')
+ self.collectd_cmd = settings.getValue('MC_COLLECTD_CMD')
+ self.collectd_csv = settings.getValue('MC_COLLECTD_CSV')
+ self.prox_out = settings.getValue('MC_PROX_OUT')
+ self.prox_cmd = settings.getValue('MC_PROX_CMD')
+ self.cron_out = settings.getValue('MC_CRON_OUT')
+ self.logger = logging.getLogger(__name__)
+ self.results_dir = results_dir
+ self.collectd_pid = 0
+ self.prox_pid = 0
+ self.cleanup_collectd_metrics()
+ self.logger.debug('%s', 'Multicmd data for '+ str(test_name))
+ # There should not be a file by name stop in prox_home folder
+ # Else Prox will start and stop immediately. This is a Hack to
+ # control prox-runrapid, which by default runs for specified duration.
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(filename):
+ tasks.run_task(['sudo', 'rm', filename],
+ self.logger, 'deleting stop')
+ self.results = OrderedDict()
+
+ def cleanup_collectd_metrics(self):
+ """
+ Cleaup the old or archived metrics
+ """
+ for name in glob.glob(os.path.join(self.collectd_csv, '*')):
+ tasks.run_task(['sudo', 'rm', '-rf', name], self.logger,
+ 'Cleaning up Metrics', True)
+
+ def start(self):
+ # Command-1: Start Collectd
+ self.collectd_pid = tasks.run_background_task(
+ ['sudo', self.collectd_cmd],
+ self.logger, 'Staring Collectd')
+
+ # Command-2: Start PROX
+ working_dir = os.getcwd()
+ if os.path.exists(self.prox_home):
+ os.chdir(self.prox_home)
+ self.prox_pid = tasks.run_background_task(['sudo', self.prox_cmd,
+ '--test', 'irq',
+ '--env', 'irq'],
+ self.logger,
+ 'Start PROX')
+ os.chdir(working_dir)
+ # Command-3: Start CROND
+ tasks.run_task(['sudo', 'systemctl', 'start', 'crond'],
+ self.logger, 'Staring CROND', True)
+
+ # command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'start', 'filebeat'],
+ self.logger, 'Starting BEATS', True)
+
+ def stop(self):
+ """
+ Stop All commands
+ """
+ # Command-1: COLLECTD
+ tasks.terminate_task_subtree(self.collectd_pid, logger=self.logger)
+ tasks.run_task(['sudo', 'pkill', '--signal', '2', 'collectd'],
+ self.logger, 'Stopping Collectd', True)
+
+ # Backup the collectd-metrics for this test into a results folder
+ # results_dir = os.path.join(settings.getValue('RESULTS_PATH'), '/')
+ tasks.run_task(['sudo', 'cp', '-r', self.collectd_csv,
+ self.results_dir], self.logger,
+ 'Copying Collectd Results File', True)
+ self.cleanup_collectd_metrics()
+
+ # Command-2: PROX
+ filename = os.path.join(self.prox_home, 'stop')
+ if os.path.exists(self.prox_home):
+ tasks.run_task(['sudo', 'touch', filename],
+ self.logger, 'Stopping PROX', True)
+
+ outfile = os.path.join(self.prox_home, self.prox_out)
+ if os.path.exists(outfile):
+ tasks.run_task(['sudo', 'mv', outfile, self.results_dir],
+ self.logger, 'Moving PROX-OUT file', True)
+
+ # Command-3: CROND
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'crond'],
+ self.logger, 'Stopping CROND', True)
+ if os.path.exists(self.cron_out):
+ tasks.run_task(['sudo', 'mv', self.cron_out, self.results_dir],
+ self.logger, 'Move Cron Logs', True)
+
+ # Command-4: BEATS
+ tasks.run_task(['sudo', 'systemctl', 'stop', 'filebeat'],
+ self.logger, 'Stopping BEATS', True)
+
+ def get_results(self):
+ """
+ Return results
+ """
+ return self.results
+
+ def print_results(self):
+ """
+ Print results
+ """
+ logging.info("Multicmd Output is not collected by VSPERF")
+ logging.info("Please refer to corresponding command's output")
diff --git a/tools/confgenwizard/__init__.py b/tools/confgenwizard/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tools/confgenwizard/__init__.py
diff --git a/tools/confgenwizard/nicinfo.py b/tools/confgenwizard/nicinfo.py
new file mode 100644
index 00000000..631b92c5
--- /dev/null
+++ b/tools/confgenwizard/nicinfo.py
@@ -0,0 +1,236 @@
+# Copyright 2019-2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Retrieve information from remote host.
+In this file, we retrive only NIC PICs
+"""
+
+from __future__ import print_function
+import sys
+import subprocess
+import os
+from os.path import exists
+from stat import S_ISDIR
+import paramiko
+
+# The PCI device class for ETHERNET devices
+ETHERNET_CLASS = "0200"
+LSPCI_PATH = '/usr/bin/lspci'
+RECV_BYTES = 4096
+ADVANCED = True
+
+
+#pylint: disable=too-many-instance-attributes
+class RemoteInfo(object):
+ """
+ Class to extract information from a remote system
+ """
+
+ def __init__(self, host, username, password):
+ """
+ Perform Initialization
+ """
+ # Dict of ethernet devices present. Dictionary indexed by PCI address.
+ # Each device within this is itself a dictionary of device properties
+ self.nic_devices = {}
+ if host == 'local':
+ self.local = True
+ else:
+ self.local = False
+ # Assuming port as 22.
+ self.port = 22
+ self.hostname = host
+ self.password = password
+ self.username = username
+ self.client = paramiko.Transport((self.hostname, self.port))
+ self.client.connect(username=self.username,
+ password=self.password)
+ self.session = self.client.open_channel(kind='session')
+ self.session.get_pty()
+ self.sftp = paramiko.SFTPClient.from_transport(self.client)
+
+ def sftp_exists(self, path):
+ """
+ Check if remote file exist
+ """
+ try:
+ self.sftp.stat(path)
+ return True
+ except IOError:
+ return False
+
+ def sft_listdir(self, path):
+ """
+ List directories on remote nost
+ """
+ files = []
+ for fil in self.sftp.listdir_attr(path):
+ if not S_ISDIR(fil.st_mode):
+ files.append(fil.filename)
+ return files
+
+ def is_connected(self):
+ """
+ Check if session is connected.
+ """
+ return self.client.is_active()
+
+ def new_channel(self):
+ """
+ FOr every command a new session is setup
+ """
+ if not self.is_connected():
+ self.client = paramiko.Transport((self.hostname, self.port))
+ self.client.connect(username=self.username,
+ password=self.password)
+ self.session = self.client.open_channel(kind='session')
+
+ # This is roughly compatible with check_output function in subprocess module
+ # which is only available in python 2.7.
+ def check_output(self, args, stderr=None):
+ '''
+ Run a command and capture its output
+ '''
+ stdout_data = []
+ stderr_data = []
+ if self.local:
+ return subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=stderr,
+ universal_newlines=True).communicate()[0]
+ else:
+ self.new_channel()
+ separator = ' '
+ command = separator.join(args)
+ # self.session.get_pty()
+ self.session.exec_command(command)
+ while True:
+ if self.session.recv_ready():
+ stdout_data.append(self.session.recv(RECV_BYTES))
+ if self.session.recv_stderr_ready():
+ stderr_data.append(self.session.recv_stderr(RECV_BYTES))
+ if self.session.exit_status_ready():
+ break
+ if stdout_data:
+ return b"".join(stdout_data)
+ return b"".join(stderr_data)
+
+ def get_pci_details(self, dev_id):
+ '''
+ This function gets additional details for a PCI device
+ '''
+ device = {}
+
+ extra_info = self.check_output([LSPCI_PATH,
+ "-vmmks", dev_id]).splitlines()
+
+ # parse lspci details
+ for line in extra_info:
+ if not line:
+ continue
+ if self.local:
+ name, value = line.split("\t", 1)
+ else:
+ name, value = line.decode().split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
+ # check for a unix interface name
+ sys_path = "/sys/bus/pci/devices/%s/net/" % dev_id
+ device["Interface"] = ""
+ if self.local:
+ if exists(sys_path):
+ device["Interface"] = ",".join(os.listdir(sys_path))
+ else:
+ if self.sftp_exists(sys_path):
+ device["Interface"] = ",".join(self.sft_listdir(sys_path))
+
+ # check if a port is used for ssh connection
+ device["Ssh_if"] = False
+ device["Active"] = ""
+
+ return device
+
+ def get_nic_details(self):
+ '''
+ This function populates the "devices" dictionary. The keys used are
+ the pci addresses (domain:bus:slot.func). The values are themselves
+ dictionaries - one for each NIC.
+ '''
+ devinfos = []
+ # first loop through and read details for all devices
+ # request machine readable format, with numeric IDs
+ dev = {}
+ dev_lines = self.check_output([LSPCI_PATH, "-Dvmmn"]).splitlines()
+ for dev_line in dev_lines:
+ if not dev_line:
+ if dev["Class"] == ETHERNET_CLASS:
+ # convert device and vendor ids to numbers, then add to
+ # global
+ dev["Vendor"] = int(dev["Vendor"], 16)
+ dev["Device"] = int(dev["Device"], 16)
+ self.nic_devices[dev["Slot"]] = dict(
+ dev) # use dict to make copy of dev
+ else:
+ # values = re.split(r'\t+', str(dev_line))
+ if self.local:
+ name, value = dev_line.split('\t', 1)
+ else:
+ name, value = dev_line.decode().split("\t", 1)
+ dev[name.rstrip(":")] = value
+
+ # based on the basic info, get extended text details
+ for dev in self.nic_devices:
+ # get additional info and add it to existing data
+ if ADVANCED:
+ self.nic_devices[dev].update(self.get_pci_details(dev).items())
+ devinfos.append(self.nic_devices[dev])
+ return devinfos
+
+ def dev_id_from_dev_name(self, dev_name):
+ '''
+ Take a device "name" - a string passed in by user to identify a NIC
+ device, and determine the device id - i.e. the domain:bus:slot.func-for
+ it, which can then be used to index into the devices array
+ '''
+ # dev = None
+ # check if it's already a suitable index
+ if dev_name in self.nic_devices:
+ return dev_name
+ # check if it's an index just missing the domain part
+ elif "0000:" + dev_name in self.nic_devices:
+ return "0000:" + dev_name
+ else:
+ # check if it's an interface name, e.g. eth1
+ for dev in self.nic_devices:
+ if dev_name in self.nic_devices[dev]["Interface"].split(","):
+ return self.nic_devices[dev]["Slot"]
+ # if nothing else matches - error
+ print("Unknown device: %s. "
+ "Please specify device in \"bus:slot.func\" format" % dev_name)
+ sys.exit(1)
+
+
+def main():
+ '''program main function'''
+ host = input("Enter Host IP: ")
+ username = input("Enter User Name: ")
+ pwd = input("Enter Password: ")
+ rhi = RemoteInfo(host, username, pwd)
+ dev_list = rhi.get_nic_details()
+ for dev in dev_list:
+ print(dev["Slot"])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/confgenwizard/vsperfwiz.py b/tools/confgenwizard/vsperfwiz.py
new file mode 100644
index 00000000..48a2d504
--- /dev/null
+++ b/tools/confgenwizard/vsperfwiz.py
@@ -0,0 +1,736 @@
+# Copyright 2019-2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Tool to create configuration file for VSPERF
+"""
+
+from __future__ import print_function
+import signal
+import sys
+from pypsi import wizard as wiz
+from pypsi.shell import Shell
+import nicinfo
+
+
+#pylint: disable=too-many-instance-attributes
+class VsperfWizard(object):
+ """
+ Class to create wizards
+ """
+
+ def __init__(self):
+ """
+ Perform Initialization.
+ """
+ self.shell = Shell()
+ self.vpp_values = {}
+ self.dut_values = {}
+ self.main_values = {}
+ self.guest_values = {}
+ self.ovs_values = {}
+ self.ixnet_values = {}
+ self.stc_values = {}
+ self.trex_values = {}
+ self.traffic_values = {}
+ self.vpp_values = {}
+ self.wiz_dut = None
+ self.wiz_ixnet = None
+ self.wiz_stc = None
+ self.wiz_ovs = None
+ self.wiz_traffic = None
+ self.wiz_main = None
+ self.wiz_guest = None
+ self.wiz_trex = None
+ self.wiz_vpp = None
+ self.rhi = None
+ self.devices = ''
+ self.devs = {}
+
+
+
+######## Support Functions ############################
+ def get_nicpcis(self):
+ """
+ Get NIC information from Remote Host
+ """
+ self.rhi = nicinfo.RemoteInfo(self.dut_values['dutip'],
+ self.dut_values['dutuname'],
+ self.dut_values['dutpwd'])
+ dev_list = self.rhi.get_nic_details()
+ index = 0
+ for dev in dev_list:
+ self.devices += str("(" + str(index) + ")" + " "
+ + str(dev["Slot"]) + ', ')
+ self.devs[str(index)] = str(dev["Slot"])
+ index = index + 1
+
+ def get_nics_string(self):
+ """
+ Create string that's acceptable to configuration
+ """
+ indexes = self.main_values['nics'].split(',')
+ wlns = ''
+ for index in indexes:
+ wlns += "'" + self.devs[index] + "' ,"
+ print(wlns)
+ return wlns.rstrip(',')
+
+
+############# All the Wizards ##################################
+
+ def dut_wizard(self):
+ """
+ Wizard to collect DUT information
+ """
+ self.wiz_dut = wiz.PromptWizard(
+ name="VSPERF DUT Info Collection",
+ description="This collects DUT info",
+ steps=(
+ # The list of input prompts to ask the user.
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutip",
+ # Display name
+ name="Enter the IP address of the DUT [local]",
+ # Help message
+ help="IP address of the DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutuname",
+ # Display name
+ name="Enter the username to connect to DUT",
+ # Help message
+ help="Username for DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="dutpwd",
+ # Display name
+ name="Enter the Password to connect to DUT",
+ # Help message
+ help="Password for the DUT host",
+ # List of validators to run on the input
+ validators=(wiz.required_validator)
+ ),
+ )
+ )
+
+ def main_wizard(self):
+ """
+ The Main Wizard
+ """
+ # First get the nics.
+ self.get_nicpcis()
+ self.wiz_main = wiz.PromptWizard(
+ name="VSPERF Common Configuration",
+ description="This configuration covers Basic inputs",
+ steps=(
+ # The list of input prompts to ask the user.
+ wiz.WizardStep(
+ # ID where the value will be stored
+ id="vswitch",
+ # Display name
+ name="VSwitch to use? - OVS or VPP?",
+ # Help message
+ help=" Enter the vswitch to use - either OVS or VPP",
+ # List of validators to run on the input
+ default='OVS'
+ ),
+ wiz.WizardStep(
+ id='nics',
+ name="NICs to Whitelist: " + self.devices,
+ help="Enter the list (separated by comma) of PCI-IDs",
+ validators=(wiz.required_validator),
+ ),
+ wiz.WizardStep(
+ id='tgen',
+ name=("What trafficgen to use: [TestCenter" +
+ " IxNet, Moongen, Trex]?"),
+ help=("Enter the trafficgen to use -" +
+ " TestCenter, IxNet, Moongen, Trex"),
+ validators=(wiz.required_validator),
+ default="Trex"
+ ),
+ wiz.WizardStep(
+ id='guest',
+ name=("Is Scenario either PVP or PVVP?"),
+ help=("This is ti capture guest Configuration"),
+ validators=(wiz.required_validator),
+ default="YES"
+ )
+ )
+ )
+
+ def traffic_wizard(self):
+ """
+ Wizard to collectd Traffic Info.
+ """
+ self.wiz_traffic = wiz.PromptWizard(
+ name="Traffic Configuration",
+ description="This configuration covers Traffic specifc inputs",
+ steps=(
+ wiz.WizardStep(
+ id='pktsizes',
+ name='Enter the Packet Sizes - comma separated',
+ help="Allowed values: (64,128,256,512,1024,1280,1518)",
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='duration',
+ name='Enter the Duration (in secs) for the traffic',
+ help="Enter for how long each iteration should be",
+ default='60',
+ ),
+ # wiz.WizardStep(
+ # id='multistream',
+ # name='Multistream preferred?',
+ # help="Multistream preference - Yes or No",
+ # default='No',
+ # validators=(wiz.required_validator)
+ #),
+ wiz.WizardStep(
+ id='count',
+ name='Number of flows?',
+ help="Enter the number of flows - 2 - 1,000,000",
+ default='2',
+ # validators=(wiz.required_validator)
+ ),
+ )
+ )
+
+ def ovs_wizard(self):
+ """
+ Wizard to collect OVS Information
+ """
+ self.wiz_ovs = wiz.PromptWizard(
+ name="Vswitch Configuration",
+ description="Specific configurations of the virtual-Switch",
+ steps=(
+ wiz.WizardStep(
+ id='type',
+ name='OVS Type? [Vanilla or DPDK]',
+ help='Enter either Vanilla or DPDK',
+ default='Vanilla',
+ ),
+ wiz.WizardStep(
+ id='mask',
+ name='Enter the CPU Mask for OVS to use',
+ help='Mask for OVS PMDs',
+ default='30',
+ ),
+ )
+ )
+
+ def vpp_wizard(self):
+ """
+ Wizard to collect VPP configuration
+ """
+ self.wiz_vpp = wiz.PromptWizard(
+ name="Vswitch Configuration",
+ description="Specific configurations of the virtual-Switch",
+ steps=(
+ wiz.WizardStep(
+ id='mode',
+ name='L2 Connection mode xconnect|bridge|l2patch to use?',
+ help='Select the l2 connection mode',
+ default='xconnect',
+ ),
+ )
+ )
+
+ def trex_wizard(self):
+ """
+ Wizard to collect Trex configuration
+ """
+ self.wiz_trex = wiz.PromptWizard(
+ name="Trex Traffic Generator Configuration",
+ description="Specific configurations of Trex TGen",
+ steps=(
+ wiz.WizardStep(
+ id='hostip',
+ name='What is IP address of the T-Rex Host?',
+ help='Enter the IP address of host where Trex is running',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='user',
+ name='What is Usernameof the T-Rex Host?',
+ help='Enter the Username of host where Trex is running',
+ default='root',
+ ),
+ wiz.WizardStep(
+ id='bdir',
+ name='What is Dir where the T-Rex Binary resides?',
+ help='Enter the Location where Trex Binary is',
+ default='/root/trex_2.37/scripts/',
+ ),
+ wiz.WizardStep(
+ id='pci1',
+ name='What is PCI address of the port-1?',
+ help='Enter the PCI address of Data port 1',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='pci2',
+ name='What is PCI address of the port-2?',
+ help='Enter the PCI address of Data port 2',
+ validators=(wiz.required_validator)
+ ),
+ wiz.WizardStep(
+ id='rate',
+ name='What is Line rate (in Gbps) of the ports?',
+ help='Enter the linerate of the ports',
+ default='10',
+ ),
+ wiz.WizardStep(
+ id='prom',
+ name='T-Rex Promiscuous enabled?',
+ help='Do you want to enable the Promiscuous mode?',
+ default='False',
+ ),
+ wiz.WizardStep(
+ id='lat',
+ name='Whats the Trex Latency PPS?',
+ help='Enter the Latency value in PPS',
+ default='1000',
+ ),
+ wiz.WizardStep(
+ id='bslv',
+ name='Do you want Binary Loss Verification Enabled?',
+ help='Enter True if you want it to be enabled.',
+ default='True',
+ ),
+ wiz.WizardStep(
+ id='maxrep',
+ name='If Loss Verification, what the max rep?',
+ help='If BSLV is enabled, whats the max repetition value?',
+ default='2',
+ ),
+ )
+ )
+
+ def stc_wizard(self):
+ """
+ Wizard to collect STC configuration
+ """
+ self.wiz_stc = wiz.PromptWizard(
+ name="Spirent STC Traffic Generator Configuration",
+ description="Specific configurations of Spirent-STC TGen",
+ steps=(
+ wiz.WizardStep(
+ id='lab',
+ name='Lab Server IP?',
+ help='Enter the IP of Lab Server',
+ default='10.10.120.244',
+ ),
+ wiz.WizardStep(
+ id='lisc',
+ name='License Server IP?',
+ help='Enter the IP of the License Server',
+ default='10.10.120.246',
+ ),
+ wiz.WizardStep(
+ id='eaddr',
+ name='East Port Chassis Address?',
+ help='IP address of the East-Port',
+ default='10.10.120.245',
+ ),
+ wiz.WizardStep(
+ id='eslot',
+ name='East Port Slot Number',
+ help='Slot Number of the East Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='eport',
+ name='Port Number of the East-Port',
+ help='Port Number for the East Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='eint',
+ name='East port Interface Address',
+ help='IP to use for East Port?',
+ default='192.85.1.3',
+ ),
+ wiz.WizardStep(
+ id='egw',
+ name='Gateway Address for East Port',
+ help='IP of the East-Port Gateway',
+ default='192.85.1.103',
+ ),
+ wiz.WizardStep(
+ id='waddr',
+ name='West Port Chassis Address?',
+ help='IP address of the West-Port',
+ default='10.10.120.245',
+ ),
+ wiz.WizardStep(
+ id='wslot',
+ name='West Port Slot Number',
+ help='Slot Number of the West Port',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='wport',
+ name='Port Number of the West-Port',
+ help='Port Number for the West Port',
+ default='2',
+ ),
+ wiz.WizardStep(
+ id='wint',
+ name='West port Interface Address',
+ help='IP to use for West Port?',
+ default='192.85.1.103',
+ ),
+ wiz.WizardStep(
+ id='wgw',
+ name='Gateway Address for West Port',
+ help='IP of the West-Port Gateway',
+ default='192.85.1.3',
+ ),
+ wiz.WizardStep(
+ id='script',
+ name='Name of the Script to use for RFC2544 Tests?',
+ help='Script Name to use for RFC 2544 Tests.',
+ default='testcenter-rfc2544-rest.py',
+ ),
+ )
+ )
+
+ def ixnet_wizard(self):
+ """
+ Wizard to collect ixnet configuration
+ """
+ self.wiz_ixnet = wiz.PromptWizard(
+ name="Ixia IxNet Traffic Generator Configuration",
+ description="Specific configurations of Ixia-Ixnet TGen",
+ steps=(
+ wiz.WizardStep(
+ id='card',
+ name='Card Number?',
+ help='Chassis Card Number',
+ default='1',
+ ),
+ wiz.WizardStep(
+ id='port1',
+ name='Port-1 Number?',
+ help='Chassis Port-1 Number',
+ default='5',
+ ),
+ wiz.WizardStep(
+ id='port2',
+ name='Port-2 Number?',
+ help='Chassis Port-2 Number',
+ default='6',
+ ),
+ wiz.WizardStep(
+ id='libp1',
+ name='IXIA Library path?',
+ help='Library path of Ixia',
+ default='/opt/ixnet/ixos-api/8.01.0.2/lib/ixTcl1.0',
+ ),
+ wiz.WizardStep(
+ id='libp2',
+ name='IXNET Library Path',
+ help='Library Path for the IXNET',
+ default='/opt/ixnet/ixnetwork/8.01.1029.6/lib/IxTclNetwork',
+ ),
+ wiz.WizardStep(
+ id='host',
+ name='IP of the CHassis?',
+ help='Chassis IP',
+ default='10.10.50.6',
+ ),
+ wiz.WizardStep(
+ id='machine',
+ name='IP of the API Server?',
+ help='API Server IP ',
+ default='10.10.120.6',
+ ),
+ wiz.WizardStep(
+ id='port',
+ name='Port of the API Server?',
+ help='API Server Port',
+ default='9127',
+ ),
+ wiz.WizardStep(
+ id='user',
+ name='Username for the API server?',
+ help='Username to use to connect to API Server',
+ default='vsperf_sandbox',
+ ),
+ wiz.WizardStep(
+ id='tdir',
+ name='Path for Results Directory on API Server',
+ help='Results Path on API Server',
+ default='c:/ixia_results/vsperf_sandbox',
+ ),
+ wiz.WizardStep(
+ id='rdir',
+ name='Path for Results directory on DUT',
+ help='DUT Results Path',
+ default='/mnt/ixia_results/vsperf_sandbox',
+ ),
+ )
+ )
+
+ def guest_wizard(self):
+ """
+ Wizard to collect guest configuration
+ """
+ self.wiz_guest = wiz.PromptWizard(
+ name="Guest Configuration for PVP and PVVP Scenarios",
+ description="Guest configurations",
+ steps=(
+ wiz.WizardStep(
+ id='image',
+ name='Enter the Path for the iamge',
+ help='Complete path where image resides',
+ default='/home/opnfv/vloop-vnf-ubuntu-14.04_20160823.qcow2',
+ ),
+ wiz.WizardStep(
+ id='mode',
+ name='Enter the forwarding mode to use',
+ help='one of io|mac|mac_retry|macswap|flowgen|rxonly|....',
+ default='io',
+ ),
+ wiz.WizardStep(
+ id='smp',
+ name='Number of SMP to use?',
+ help='While Spawning the guest, how many SMPs to use?',
+ default='2',
+ ),
+ wiz.WizardStep(
+ id='cores',
+ name="Guest Core binding. For 2 cores a & b: ['a', 'b']",
+ help='Enter the cores to use in the specified format',
+ default="['8', '9']",
+ ),
+ )
+ )
+
+############### All the Run Operations ######################
+
+ def run_dutwiz(self):
+ """
+ Run the DUT wizard
+ """
+ self.dut_wizard()
+ self.dut_values = self.wiz_dut.run(self.shell)
+
+ def run_mainwiz(self):
+ """
+ Run the Main wizard
+ """
+ self.main_wizard()
+ self.main_values = self.wiz_main.run(self.shell)
+ print(self.main_values['nics'])
+
+ def run_vswitchwiz(self):
+ """
+ Run the vSwitch wizard
+ """
+ if self.main_values['vswitch'] == "OVS":
+ self.ovs_wizard()
+ self.ovs_values = self.wiz_ovs.run(self.shell)
+ elif self.main_values['vswitch'] == 'VPP':
+ self.vpp_wizard()
+ self.vpp_values = self.wiz_vpp.run(self.shell)
+
+ def run_trafficwiz(self):
+ """
+ Run the Traffic wizard
+ """
+ self.traffic_wizard()
+ self.traffic_values = self.wiz_traffic.run(self.shell)
+
+ def run_tgenwiz(self):
+ """
+ Run the Tgen wizard
+ """
+ if self.main_values['tgen'] == "Trex":
+ self.trex_wizard()
+ self.trex_values = self.wiz_trex.run(self.shell)
+ elif self.main_values['tgen'] == "TestCenter":
+ self.stc_wizard()
+ self.stc_values = self.wiz_stc.run(self.shell)
+ elif self.main_values['tgen'] == 'IxNet':
+ self.ixnet_wizard()
+ self.ixnet_values = self.wiz_ixnet.run(self.shell)
+
+ def run_guestwiz(self):
+ """
+ Run the Guest wizard
+ """
+ if self.main_values['guest'] == 'YES':
+ self.guest_wizard()
+ self.guest_values = self.wiz_guest.run(self.shell)
+
+################ Prepare Configuration File ##################
+ #pylint: disable=too-many-statements
+ def prepare_conffile(self):
+ """
+ Create the Configuration file that can be used with VSPERF
+ """
+ with open("./vsperf.conf", 'w+') as ofile:
+ ofile.write("#### This file is Automatically Created ####\n\n")
+ if self.main_values['vswitch'] == "OVS":
+ if self.ovs_values['type'] == "Vanilla":
+ ofile.write("VSWITCH = 'OvsVanilla'\n")
+ else:
+ ofile.write("VSWITCH = 'OvsDpdkVhost'\n")
+ ofile.write("VSWITCH_PMD_CPU_MASK = '" +
+ self.ovs_values['mask'] + "'\n")
+ else:
+ ofile.write("VSWITCH = 'VppDpdkVhost'\n")
+ ofile.write("VSWITCH_VPP_L2_CONNECT_MODE = '" +
+ self.vpp_values['mode'] + "'\n")
+ nics = self.get_nics_string()
+ wln = "WHITELIST_NICS = [" + nics + "]" + "\n"
+ ofile.write(wln)
+ ofile.write("RTE_TARGET = 'x86_64-native-linuxapp-gcc'")
+ ofile.write("\n")
+ ofile.write("TRAFFICGEN = " + "'" + self.main_values['tgen'] + "'")
+ ofile.write("\n")
+ ofile.write("VSWITCH_BRIDGE_NAME = 'vsperf-br0'")
+ ofile.write("\n")
+ ofile.write("TRAFFICGEN_DURATION = " +
+ self.traffic_values['duration'] + "\n")
+ ofile.write("TRAFFICGEN_LOSSRATE = 0" + "\n")
+ ofile.write("TRAFFICGEN_PKT_SIZES = (" +
+ self.traffic_values['pktsizes'] +
+ ")" + "\n")
+ if self.main_values['tgen'] == "Trex":
+ ofile.write("TRAFFICGEN_TREX_HOST_IP_ADDR = '" +
+ self.trex_values['hostip'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_USER = '" +
+ self.trex_values['user'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_BASE_DIR = '" +
+ self.trex_values['bdir'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_LINE_SPEED_GBPS = '" +
+ self.trex_values['rate'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PORT1 = '" +
+ self.trex_values['pci1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PORT2 = '" +
+ self.trex_values['pci2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_TREX_PROMISCUOUS = " +
+ self.trex_values['prom'] + "\n")
+ ofile.write("TRAFFICGEN_TREX_LATENCY_PPS = " +
+ self.trex_values['lat'] + "\n")
+ ofile.write("TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = " +
+ self.trex_values['bslv'])
+ ofile.write("TRAFFICGEN_TREX_MAX_REPEAT = " +
+ self.trex_values['maxrep'] + "\n")
+ elif self.main_values['tgen'] == "TestCenter":
+ ofile.write("TRAFFICGEN_STC_LAB_SERVER_ADDR = '" +
+ self.stc_values['lab'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_LICENSE_SERVER_ADDR = '" +
+ self.stc_values['lisc'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_CHASSIS_ADDR = '" +
+ self.stc_values['eaddr'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_SLOT_NUM = '" +
+ self.stc_values['eslot'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_PORT_NUM = '" +
+ self.stc_values['eport'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_INTF_ADDR = '" +
+ self.stc_values['eint'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR = '" +
+ self.stc_values['egw'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_CHASSIS_ADDR = '" +
+ self.stc_values['waddr'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_SLOT_NUM = '" +
+ self.stc_values['wslot'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_PORT_NUM = '" +
+ self.stc_values['wport'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_INTF_ADDR = '" +
+ self.stc_values['wint'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR = '" +
+ self.stc_values['wgw'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = '" +
+ self.stc_values['script'] + "'" + "\n")
+ elif self.main_values['tgen'] == 'IxNet':
+ print("IXIA Trafficgen")
+ # Ixia/IxNet configuration
+ ofile.write("TRAFFICGEN_IXIA_CARD = '" +
+ self.ixnet_values['card'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_PORT1 = '" +
+ self.ixnet_values['port1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_PORT2 = '" +
+ self.ixnet_values['port2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_LIB_PATH = '" +
+ self.ixnet_values['libp1'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_LIB_PATH = '" +
+ self.ixnet_values['libp2'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXIA_HOST = '" +
+ self.ixnet_values['host'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_MACHINE = '" +
+ self.ixnet_values['machine'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_PORT = '" +
+ self.ixnet_values['port'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_USER = '" +
+ self.ixnet_values['user'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_TESTER_RESULT_DIR = '" +
+ self.ixnet_values['tdir'] + "'" + "\n")
+ ofile.write("TRAFFICGEN_IXNET_DUT_RESULT_DIR = '" +
+ self.ixnet_values['rdir'] + "'" + "\n")
+ if self.main_values['guest'] == 'YES':
+ ofile.write("GUEST_IMAGE = ['" +
+ self.guest_values['image'] + "']" + "\n")
+ ofile.write("GUEST_TESTPMD_FWD_MODE = ['" +
+ self.guest_values['mode'] + "']" + "\n")
+ ofile.write("GUEST_SMP = ['" +
+ self.guest_values['smp'] + "']" + "\n")
+ ofile.write("GUEST_CORE_BINDING = [" +
+ self.guest_values['cores'] + ",]" + "\n")
+
+
+def signal_handler(signum, frame):
+ """
+ Signal Handler
+ """
+ print("\n You interrupted, No File will be generated!")
+ print(signum, frame)
+ sys.exit(0)
+
+
+def main():
+ """
+ The Main Function
+ """
+ try:
+ vwiz = VsperfWizard()
+ vwiz.run_dutwiz()
+ vwiz.run_mainwiz()
+ vwiz.run_vswitchwiz()
+ vwiz.run_trafficwiz()
+ vwiz.run_tgenwiz()
+ vwiz.run_guestwiz()
+ vwiz.prepare_conffile()
+ except (KeyboardInterrupt, MemoryError):
+ print("Some Error Occured, No file will be generated!")
+
+ print("Thanks for using the VSPERF-WIZARD, Please look for vsperf.conf " +
+ "file in the current folder")
+
+
+if __name__ == "__main__":
+ signal.signal(signal.SIGINT, signal_handler)
+ main()
diff --git a/tools/docker/client/__init__.py b/tools/docker/client/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/client/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/client/vsperf_client.py b/tools/docker/client/vsperf_client.py
new file mode 100644
index 00000000..2a3e509f
--- /dev/null
+++ b/tools/docker/client/vsperf_client.py
@@ -0,0 +1,771 @@
+"""Deploy : vsperf_deploy_client"""
+#pylint: disable=import-error
+
+import configparser
+import sys
+from pathlib import Path
+
+
+import grpc
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+
+CHUNK_SIZE = 1024 * 1024 # 1MB
+
+
+HEADER = r"""
+ _ _ ___ ____ ____ ____ ____ ___ __ ____ ____ _ _ ____
+( \/ )/ __)( _ \( ___)( _ \( ___) / __)( ) (_ _)( ___)( \( )(_ _)
+ \ / \__ \ )___/ )__) ) / )__) ( (__ )(__ _)(_ )__) ) ( )(
+ \/ (___/(__) (____)(_)\_)(__) \___)(____)(____)(____)(_)\_) (__)
+"""
+
+COLORS = {
+ 'blue': '\033[94m',
+ 'pink': '\033[95m',
+ 'green': '\033[92m',
+}
+
+DUT_CHECK = 0
+TGEN_CHECK = 0
+
+def colorize(string, color):
+ """Colorized HEADER"""
+ if color not in COLORS:
+ return string
+ return COLORS[color] + string + '\033[0m'
+
+
+class VsperfClient():
+ """
+ This class reprsents the VSPERF-client.
+ It talks to vsperf-docker to perform installation, configuration and
+ test-execution
+ """
+ # pylint: disable=R0904,no-else-break
+ # pylint: disable=W0603,invalid-name
+ # pylint: disable=R1710
+ def __init__(self):
+ """read vsperfclient.conf"""
+ self.cfp = 'vsperfclient.conf'
+ self.config = configparser.RawConfigParser()
+ self.config.read(self.cfp)
+ self.stub = None
+ self.dut_check = 0
+ self.tgen_check = 0
+
+ def get_mode(self):
+ """read the mode for the client"""
+ return self.config.get('Mode', 'mode')
+
+ def get_deploy_channel_info(self):
+ """get the channel data"""
+ return (self.config.get('DeployServer', 'ip'),
+ self.config.get('DeployServer', 'port'))
+
+ def get_test_channel_info(self):
+ """get the channel for tgen"""
+ return (self.config.get('TestServer', 'ip'),
+ self.config.get('TestServer', 'port'))
+
+ def create_stub(self, channel):
+ """create stub to talk to controller"""
+ self.stub = vsperf_pb2_grpc.ControllerStub(channel)
+
+ def host_connect(self):
+ """provice dut-host credential to controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ connect_reply = self.stub.HostConnect(hostinfo)
+ DUT_CHECK = 1
+ print(connect_reply.message)
+
+ def tgen_connect(self):
+ """provide tgen-host credential to controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ connect_reply = self.stub.TGenHostConnect(tgeninfo)
+ TGEN_CHECK = 1
+ print(connect_reply.message)
+
+ def host_connect_both(self):
+ """provice dut-host credential to controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ connect_reply = self.stub.HostConnect(hostinfo)
+ client = VsperfClient()
+ client.automatically_test_dut_connect()
+ DUT_CHECK = 1
+ print(connect_reply.message)
+
+ def tgen_connect_both(self):
+ """provide tgen-host credential to controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ connect_reply = self.stub.TGenHostConnect(tgeninfo)
+ TGEN_CHECK = 1
+ client = VsperfClient()
+ client.automatically_test_tgen_connect()
+ print(connect_reply.message)
+
+ @classmethod
+ def automatically_test_dut_connect(cls):
+ """handle automatic connection with tgen"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+ client.create_stub(channel)
+ client.host_testcontrol_connect()
+
+ @classmethod
+ def automatically_test_tgen_connect(cls):
+ """handle automatic connection with tgen"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+ client.create_stub(channel)
+ client.tgen_testcontrol_connect()
+
+ def exit_section(self):
+ """exit"""
+ @classmethod
+ def section_execute(cls, menuitems, client, ip_add, port):
+ """it will use to enter into sub-option"""
+ channel = grpc.insecure_channel(ip_add + ':' + port)
+
+ while True:
+ client.create_stub(channel)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ if (int(choice) >= 0) and (int(choice) < (len(menuitems) - 1)):
+ list(menuitems[int(choice)].values())[0]()
+ else:
+ break
+ except (ValueError, IndexError):
+ pass
+ break
+ @classmethod
+ def get_user_trex_conf_location(cls):
+ """Ask user for t-rex configuration location"""
+ while True:
+ filename_1 = str(input("Provide correct location for your t-rex configuration " \
+ "file where trex_cfg.yaml exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+
+ def upload_tgen_config(self):
+ """t-rex config file as a chunk to controller"""
+ if TGEN_CHECK == 0:
+ return print("TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with TGen-Host.")
+ default_location = self.config.get('ConfFile', 'tgenpath')
+ if not default_location:
+ filename = self.get_user_trex_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'tgenpath')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_trex_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ chunks = self.get_file_chunks_1(filename)
+ upload_status = self.stub.TGenUploadConfigFile(chunks)
+ print(upload_status.Message)
+
+ def vsperf_install(self):
+ """vsperf install on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ install_reply = self.stub.VsperfInstall(hostinfo)
+ print(install_reply.message)
+
+ def collectd_install(self):
+ """collectd install on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ install_reply = self.stub.CollectdInstall(hostinfo)
+ print(install_reply.message)
+
+ def tgen_install(self):
+ """install t-rex on Tgen host"""
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ install_reply = self.stub.TGenInstall(tgeninfo)
+ print(install_reply.message)
+
+ @classmethod
+ def get_user_conf_location(cls):
+ """get user input for test configuration file"""
+ while True:
+ filename_1 = str(input("Provide correct location for your test configuration " \
+ "file where it exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " Test File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+
+ def upload_config(self):
+ """transfer config file as a chunk to controller"""
+ if DUT_CHECK == 0:
+ return print("DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with DUT-Host.")
+ default_location = self.config.get('ConfFile', 'path')
+ if not default_location:
+ filename = self.get_user_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'path')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ upload_param = self.get_file_chunks(filename)
+ upload_status = self.stub.UploadConfigFile(upload_param)
+ print(upload_status.Message)
+
+ def start_test(self):
+ """start test parameter, test config file and test name"""
+ test_control = vsperf_pb2.ControlVsperf(testtype=self.config.get('Testcase', 'test'), \
+ conffile=self.config.get('Testcase', 'conffile'))
+ control_reply = self.stub.StartTest(test_control)
+ print(control_reply.message)
+
+ def start_tgen(self):
+ """start t-rex traffic generetor on tgen-host"""
+ tgen_control = vsperf_pb2.ControlTGen(params=self.config.get('TGen', 'params'))
+ control_reply = self.stub.StartTGen(tgen_control)
+ print(control_reply.message)
+
+ @classmethod
+ def get_file_chunks(cls, filename):
+ """convert file into chunk to stream between client and controller with filename"""
+ with open(filename, 'rb') as f_1:
+ while True:
+ file_path = filename
+ file_path_list = file_path.split("/")
+ test_filename = file_path_list[(len(file_path_list)-1)]
+ piece = f_1.read(CHUNK_SIZE)
+ if not piece:
+ return None
+ return vsperf_pb2.ConfFileTest(Content=piece, Filename=test_filename)
+ @classmethod
+ def get_file_chunks_1(cls, filename):
+ """Convert file into chunks"""
+ with open(filename, 'rb') as f:
+ while True:
+ piece = f.read(CHUNK_SIZE)
+ if len(piece) == 0:
+ return
+ yield vsperf_pb2.ConfFile(Content=piece)
+
+
+ def test_status(self):
+ """check the test_status"""
+ test_check = vsperf_pb2.StatusQuery(
+ testtype=self.config.get('Testcase', 'test'))
+ check_result_reply = self.stub.TestStatus(test_check)
+ print(check_result_reply.message)
+
+ def vsperf_terminate(self):
+ """after running test terminate vsperf on dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ termination_reply = self.stub.TerminateVsperf(hostinfo)
+ print(termination_reply.message)
+
+ def start_beats(self):
+ """start beats on dut-host before running test"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.StartBeats(hostinfo)
+ print(status_reply.message)
+
+ def remove_vsperf(self):
+ """remove vsperf from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveVsperf(hostinfo)
+ print(status_reply.message)
+
+ def remove_result_folder(self):
+ """remove resutl folder from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveResultFolder(hostinfo)
+ print(status_reply.message)
+
+ def remove_config_files(self):
+ """remove all config files"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveUploadedConfig(hostinfo)
+ print(status_reply.message)
+
+ def remove_collectd(self):
+ """remove collectd from dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveCollectd(hostinfo)
+ print(status_reply.message)
+
+ def remove_everything(self):
+ """remove everything from dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.RemoveEverything(hostinfo)
+ print(status_reply.message)
+
+ def sanity_nic_check(self):
+ """nic is available on tgen host check"""
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ status_reply = self.stub.SanityNICCheck(tgeninfo)
+ print(status_reply.message)
+
+ def sanity_collectd_check(self):
+ """check collecd properly running"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityCollectdCheck(hostinfo)
+ print(status_reply.message)
+
+ def cpu_allocation_check(self):
+ """check cpu allocation"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityCPUAllocationCheck(hostinfo)
+ print(status_reply.message)
+
+ def sanity_vnf_path(self):
+ """vnf path available on dut host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityVNFpath(hostinfo)
+ print(status_reply.message)
+
+ def sanity_vsperf_check(self):
+ """check vsperf correctly installed"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityVSPERFCheck(hostinfo)
+ print(status_reply.message)
+
+ def sanity_dut_tgen_conn_check(self):
+ """check the connection between dut-host and tgen-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.SanityTgenConnDUTCheck(hostinfo)
+ print(status_reply.message)
+
+ def dut_test_availability(self):
+ """dut-host is free for test check"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.DUTvsperfTestAvailability(hostinfo)
+ print(status_reply.message)
+
+ def get_test_conf_from_dut(self):
+ """get the vsperf test config file from dut host for user to check"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ status_reply = self.stub.GetVSPERFConffromDUT(hostinfo)
+ print(status_reply.message)
+
+ def dut_hugepage_config(self):
+ """setup hugepages on dut-host"""
+ configparam = vsperf_pb2.HugepConf(hpmax=self.config.get('HugepageConfig', 'HpMax'), \
+ hprequested=self.config.get('HugepageConfig',\
+ 'HpRequested'))
+ config_status_reply = self.stub.DutHugepageConfig(configparam)
+ print(config_status_reply.message)
+ @classmethod
+ def get_user_collectd_conf_location(cls):
+ """get collectd configuration file location from user"""
+ while True:
+ filename_1 = str(input("Provide correct location for your collectd configuration " \
+ "file where collectd.conf exist\n" \
+ "***************** Make Sure You Choose Correct" \
+ " File for Upload*******************\n" \
+ "Provide location: \n"))
+ user_file = Path("{}".format(filename_1.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ continue
+ return filename_1
+ def host_testcontrol_connect(self):
+ """provice dut-host credential to test controller"""
+ global DUT_CHECK
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ self.stub.HostConnect(hostinfo)
+
+ def tgen_testcontrol_connect(self):
+ """provide tgen-host credential to test controller"""
+ global TGEN_CHECK
+ tgeninfo = vsperf_pb2.HostInfo(ip=self.config.get('TGen', 'ip'),
+ uname=self.config.get('TGen', 'uname'),
+ pwd=self.config.get('TGen', 'pwd'))
+ self.stub.TGenHostConnect(tgeninfo)
+
+ def upload_collectd_config(self):
+ """collectd config file chunks forwarded to controller"""
+ if DUT_CHECK == 0:
+ return print("DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with DUT-Host.")
+ default_location = self.config.get('ConfFile', 'collectdpath')
+ if not default_location:
+ filename = self.get_user_collectd_conf_location()
+ else:
+ user_preference = str(input("Use location specified in vsperfclient.conf?[Y/N] :"))
+ while True:
+ if 'y' in user_preference.lower().strip():
+ filename = self.config.get('ConfFile', 'collectdpath')
+ user_file = Path("{}".format(filename.strip()))
+ if user_file.is_file():
+ break
+ else:
+ print("**************File Does Not Exist*****************\n")
+ user_preference = 'n'
+ continue
+ elif 'n' in user_preference.lower().strip():
+ filename = self.get_user_collectd_conf_location()
+ break
+ else:
+ print("Invalid Input")
+ user_preference = str(input("Use location specified in vsperfclient.conf?" \
+ "[Y/N] : "))
+ continue
+ filename = filename.strip()
+ chunks = self.get_file_chunks_1(filename)
+ upload_status = self.stub.CollectdUploadConfig(chunks)
+ print(upload_status.Message)
+
+ def dut_check_dependecies(self):
+ """check_dependecies on dut-host"""
+ hostinfo = vsperf_pb2.HostInfo(ip=self.config.get('Host', 'ip'),
+ uname=self.config.get('Host', 'uname'),
+ pwd=self.config.get('Host', 'pwd'))
+ check_reply = self.stub.CheckDependecies(hostinfo)
+ print(check_reply.message)
+
+ @classmethod
+ def establish_connection_both(cls):
+ """
+ This Function use to establish connection for vsperf to both the deploy server \
+ and testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect_both},
+ {"Connect to TGen Host": client.tgen_connect_both},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def establish_connection_deploy(cls):
+ """
+ This Function use to establish connection for vsperf to either using the dploy
+ or using the testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect},
+ {"Connect to TGen Host": client.tgen_connect},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def establish_connection_test(cls):
+ """
+ This Function use to establish connection for vsperf to either using the dploy
+ or using the testcontrol server
+ """
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ print("Establish connection for vsperf")
+ menuitems_connection = [
+ {"Connect to DUT Host": client.host_connect},
+ {"Connect to TGen Host": client.tgen_connect},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_connection, client, ip_add, port)
+ @classmethod
+ def vsperf_setup(cls):
+ """setup sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ print("Prerequisites Installation for VSPERF")
+ menuitems_setup = [
+ {"Install VSPERF": client.vsperf_install},
+ {"Install TGen ": client.tgen_install},
+ {"Install Collectd": client.collectd_install},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+ @classmethod
+ def upload_config_files(cls):
+ """all the upload sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ menuitems_setup = [
+ {"Upload TGen Configuration File": client.upload_tgen_config},
+ {"Upload Collectd Configuration File": client.upload_collectd_config},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+ @classmethod
+ def manage_sysparam_config(cls):
+ """manage system parameter on dut host before run test"""
+ client = VsperfClient()
+ ip_add, port = client.get_deploy_channel_info()
+ menuitems_setup = [
+ {"DUT-Host hugepages configuration": client.dut_hugepage_config},
+ {"Check VSPERF Dependencies on DUT-Host": client.dut_check_dependecies},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def test_status_check(cls):
+ """after running test , test status related sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Test status": client.test_status},
+ {"Get Test Configuration file from DUT-host": client.get_test_conf_from_dut},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def sanity_check_options(cls):
+ """all sanity check sub-options"""
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Check installed VSPERF": client.sanity_vsperf_check},
+ {"Check Test Config's VNF path is available on DUT-Host": client.sanity_vnf_path},
+ {"Check NIC PCIs is available on Traffic Generator": client.sanity_nic_check},
+ {"Check CPU allocation on DUT-Host": client.cpu_allocation_check},
+ {"Check installed Collectd": client.sanity_collectd_check},
+ {"Check Connection between DUT-Host and Traffic Generator Host":
+ client.sanity_dut_tgen_conn_check},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def run_test(cls):
+ """run test sub-options"""
+ print("**Before user Run Tests we highly recommend user to perform Sanity Checks.......")
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Upload Test Configuration File": client.upload_config},
+ {"Perform Sanity Checks before running tests": client.sanity_check_options},
+ {"Check if DUT-HOST is available": client.dut_test_availability},
+ {"Start TGen ": client.start_tgen},
+ {"Start Beats": client.start_beats},
+ {"Start Test": client.start_test},
+ {"Return to Previous Menu": client.exit_section}
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+ @classmethod
+ def clean_up(cls):
+ """clean-up sub-options"""
+ print(
+ "*******************************************************************\n\n\
+ IF you are performing Test on IntelPOD 12 - Node 4, Be careful during removal\n\n\
+ *******************************************************************")
+ client = VsperfClient()
+ ip_add, port = client.get_test_channel_info()
+ menuitems_setup = [
+ {"Remove VSPERF": client.remove_vsperf},
+ {"Terminate VSPERF": client.vsperf_terminate},
+ {"Remove Results from DUT-Host": client.remove_result_folder},
+ {"Remove Uploaded Configuration File": client.remove_config_files},
+ {"Remove Collectd": client.remove_collectd},
+ {"Remove Everything": client.remove_everything},
+ {"Return to Previous Menu": client.exit_section}
+
+ ]
+ client.section_execute(menuitems_setup, client, ip_add, port)
+
+def run():
+ """It will run the actul primary options"""
+ client = VsperfClient()
+ client_mode = client.get_mode()
+ print(client_mode)
+ if "deploy" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_deploy},
+ {"Installation": client.vsperf_setup},
+ {"Upload Configuration Files": client.upload_config_files},
+ {"Manage DUT-System Configuration": client.manage_sysparam_config},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ elif "test" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_test},
+ {"Run Test": client.run_test},
+ {"Test Status": client.test_status_check},
+ {"Clean-Up": client.clean_up},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ elif "together" in client_mode.lower().strip():
+ menuitems = [
+ {"Establish Connections": client.establish_connection_both},
+ {"Installation": client.vsperf_setup},
+ {"Upload Configuration Files": client.upload_config_files},
+ {"Manage DUT-System Configuration": client.manage_sysparam_config},
+ {"Run Test": client.run_test},
+ {"Test Status": client.test_status_check},
+ {"Clean-Up": client.clean_up},
+ {"Exit": sys.exit}
+ ]
+ #ip_add, port = client.get_channel_info()
+ #channel = grpc.insecure_channel(ip_add + ':' + port)
+ while True:
+ # os.system('clear')
+ print(colorize(HEADER, 'blue'))
+ print(colorize('version 0.1\n', 'pink'))
+ for item in menuitems:
+ print(colorize("[" +
+ str(menuitems.index(item)) + "]", 'green') +
+ list(item.keys())[0])
+ choice = input(">> ")
+ try:
+ if int(choice) < 0:
+ raise ValueError
+ list(menuitems[int(choice)].values())[0]()
+ except (ValueError, IndexError):
+ pass
+
+ else:
+ print("You have not defined client mode in vsperfclient.conf [!]")
+
+
+if __name__ == '__main__':
+ run()
diff --git a/tools/docker/client/vsperfclient.conf b/tools/docker/client/vsperfclient.conf
new file mode 100644
index 00000000..12a657d7
--- /dev/null
+++ b/tools/docker/client/vsperfclient.conf
@@ -0,0 +1,39 @@
+[DeployServer]
+ip = 127.0.0.1
+port = 50051
+
+[TestServer]
+ip = 127.0.0.1
+port = 50052
+
+[Mode]
+#Deploy: To perform only for the vsperf-setup purpose
+#Test: To perform only test
+#Together: To perform as well as test.
+#assign any value from the above option according to your requirement
+mode = Together
+
+[Host]
+ip = 10.10.120.24
+uname = opnfv
+pwd = opnfv
+
+[TGen]
+ip = 10.10.120.25
+uname = root
+pwd = P@ssw0rd
+params = -i --no-scapy-server --nc --no-watchdog
+
+[HugepageConfig]
+HpMax = 8192
+HpRequested = 1024
+
+#provide appropriate location for configuration files
+[ConfFile]
+path =
+tgenpath =
+collectdpath =
+
+[Testcase]
+test = phy2phy_tput
+conffile = vsperf.conf
diff --git a/tools/docker/deployment/auto/controller/Dockerfile b/tools/docker/deployment/auto/controller/Dockerfile
new file mode 100644
index 00000000..e849d8f2
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/Dockerfile
@@ -0,0 +1,23 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip && apt-get -y install openssh-server
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50051
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
+
diff --git a/tools/docker/deployment/auto/controller/list.env b/tools/docker/deployment/auto/controller/list.env
new file mode 100644
index 00000000..ab4404b7
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/list.env
@@ -0,0 +1,14 @@
+DUT_IP_ADDRESS=10.10.120.24
+DUT_USERNAME=opnfv
+DUT_PASSWORD=opnfv
+
+TGEN_IP_ADDRESS=10.10.120.25
+TGEN_USERNAME=root
+TGEN_PASSWORD=P@ssw0rd
+TGEN_PARAMS= -i --no-scapy-server --nc --no-watchdog
+
+HUGEPAGE_MAX=8192
+HUGEPAGE_REQUESTED=1024
+
+SANITY_CHECK=NO
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/__init__.py b/tools/docker/deployment/auto/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/deployment/auto/controller/vsperf/collectd.conf b/tools/docker/deployment/auto/controller/vsperf/collectd.conf
new file mode 100644
index 00000000..9cefc8c5
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/collectd.conf
@@ -0,0 +1,49 @@
+Hostname "pod12-node4"
+Interval 1
+LoadPlugin intel_rdt
+LoadPlugin processes
+LoadPlugin interface
+LoadPlugin network
+LoadPlugin ovs_stats
+LoadPlugin cpu
+LoadPlugin memory
+LoadPlugin csv
+#LoadPlugin dpdkstat
+##############################################################################
+# Plugin configuration #
+##############################################################################
+<Plugin processes>
+ ProcessMatch "ovs-vswitchd" "ovs-vswitchd"
+ ProcessMatch "ovsdb-server" "ovsdb-server"
+ ProcessMatch "collectd" "collectd"
+</Plugin>
+<Plugin network>
+ Server "10.10.120.22" "25826"
+</Plugin>
+
+<Plugin ovs_stats>
+ Port "6640"
+ Address "127.0.0.1"
+ Socket "/usr/local/var/run/openvswitch/db.sock"
+ Bridges "vsperf-br0"
+</Plugin>
+
+<Plugin "intel_rdt">
+ Cores "2" "4-5" "6-7" "8" "9" "22" "23" "24" "25" "26" "27"
+</Plugin>
+
+<Plugin csv>
+ DataDir "/tmp/csv"
+ StoreRates false
+</Plugin>
+
+#<Plugin dpdkstat>
+# <EAL>
+# Coremask "0x1"
+# MemoryChannels "4"
+# FilePrefix "rte"
+# </EAL>
+# SharedMemObj "dpdk_collectd_stats_0"
+# EnabledPortMask 0xffff
+#</Plugin>
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml b/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml
new file mode 100644
index 00000000..8bb8e341
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/trex_cfg.yaml
@@ -0,0 +1,20 @@
+- port_limit : 2
+ version : 2
+ interfaces : ["81:00.0", "81:00.1"] # list of the interfaces to bind # node 4
+ port_bandwidth_gb : 10 #10G nics
+ port_info : # set eth mac addr
+ - dest_mac : "3c:fd:fe:b4:41:09" # port 0
+ src_mac : "3c:fd:fe:b4:41:08"
+ - dest_mac : "3c:fd:fe:b4:41:08" # port 1
+ src_mac : "3c:fd:fe:b4:41:09"
+ platform :
+ master_thread_id : 17
+ latency_thread_id : 16
+ dual_if :
+ - socket : 1
+ threads : [22,23,24,25,26,27]
+ - socket : 0
+ threads : [10,11,12,13,14,15]
+
+
+
diff --git a/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py b/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..b6865272
--- /dev/null
+++ b/tools/docker/deployment/auto/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,392 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF_deploy_auto
+"""
+# pylint: disable=W0603
+
+import os
+import sys
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+
+TGEN_IP = os.getenv('TGEN_IP_ADDRESS')
+TGEN_USER = os.getenv('TGEN_USERNAME')
+TGEN_PWD = os.getenv('TGEN_PASSWORD')
+TGEN_PARAM = os.getenv('TGEN_PARAMS')
+
+HPMAX = int(os.getenv('HUGEPAGE_MAX'))
+HPREQUESTED = int(os.getenv('HUGEPAGE_REQUESTED'))
+
+SANITY = str(os.getenv('SANITY_CHECK'))#
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT-Host Successfully Connected .........................................[OK] \n ")
+
+def tgen_connect():
+ """
+ Handle Tgen Connection to Trex
+ """
+ global TGEN_CLIENT
+ TGEN_CLIENT = ssh.SSH(host=TGEN_IP, user=TGEN_USER, password=TGEN_PWD)
+ print("Traffic Generator Successfully Connected ...............................[OK] \n ")
+
+
+def vsperf_install():
+ """
+ Perform actual installation
+ """
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; "
+ vsperf_check_command += "cd vswitchperf && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ print(
+ "VSPERF is Already Installed on DUT-Host..........................."\
+ ".......[OK]\n")
+ else:
+ download_cmd = "git clone https://gerrit.opnfv.org/gerrit/vswitchperf"
+ DUT_CLIENT.run(download_cmd)
+ install_cmd = "cd vswitchperf/systems ; "
+ install_cmd += "echo '{}' | sudo -S ./build_base_machine.sh ".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(install_cmd)
+ print(
+ "Vsperf Installed on DUT-Host ....................................[OK]\n")
+
+
+def tgen_install():
+ """
+ Install T-rex traffic gen on TGen
+ """
+ kill_cmd = "pkill -f ./t-rex"
+ TGEN_CLIENT.send_command(kill_cmd)
+ tgen_start_check = "cd trex/scripts && ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(TGEN_CLIENT.execute(tgen_start_check)[0])
+ if tgen_start_cmd_result == 0:
+ print(
+ "The Host has T-rex Installed....................................[OK]\n")
+ else:
+ download_cmd = "git clone https://github.com/cisco-system-traffic-generator/trex-core trex"
+ TGEN_CLIENT.run(download_cmd)
+ install_cmd = "cd trex-core/linux_dpdk ; ./b configure ; ./b build"
+ TGEN_CLIENT.run(install_cmd)
+ print(
+ "The Host has now T-rex Installed...........................[OK]\n")
+
+def upload_tgen_config_file():
+ """
+ Upload Tgen Config File on T-rex
+ """
+ localpath = '/usr/src/app/vsperf/trex_cfg.yaml'
+ if not os.path.exists(localpath):
+ print("TGEN config File does not exist................[Failed]")
+ return
+ remotepath = '~/trex_cfg.yaml'
+ check_trex_config_cmd = "echo {} | sudo -S find /etc -maxdepth 1 -name '{}'".format(
+ TGEN_PWD, remotepath[2:])
+ check_test_result = str(TGEN_CLIENT.execute(check_trex_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f /etc/{}".format(remotepath[2:]))
+ TGEN_CLIENT.put_file(localpath, remotepath)
+ TGEN_CLIENT.run(
+ "echo {} | sudo -S mv ~/{} /etc/".format(TGEN_PWD, remotepath[2:]), pty=True)
+ print(
+ "T-rex Configuration File Uploaded on TGen-Host...........................[OK]\n")
+
+
+def install_collectd():
+ """
+ installation of the collectd
+ """
+ check_collectd_config_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_config_cmd)[1])
+ if "collectd" in check_test_result:
+ print(
+ 'Collectd Installed Successfully on DUT-Host..............................[OK]\n')
+ else:
+ download_cmd = "git clone https://github.com/collectd/collectd.git"
+ DUT_CLIENT.run(download_cmd)
+ build_cmd = "cd collectd ; "
+ build_cmd += "./build.sh"
+ DUT_CLIENT.run(build_cmd)
+ config_cmd = "cd collectd ; ./configure --enable-syslog --enable-logfile "
+ config_cmd += "--enable-hugepages --enable-debug ; "
+ DUT_CLIENT.run(config_cmd)
+ install_cmd = "cd collectd ; make ; "
+ install_cmd += "echo '{}' | sudo -S make install".format(DUT_PWD)
+ DUT_CLIENT.run(install_cmd, pty=True)
+ print(
+ 'Collectd Installed Successfully on DUT-Host.............................[OK]\n ')
+
+
+def collectd_upload_config():
+ """
+ Upload Configuration file of Collectd on DUT
+ """
+ localpath = '/usr/src/app/vsperf/collectd.conf'
+ if not os.path.exists(localpath):
+ print("Collectd config File does not exist.......................[Failed]")
+ return
+ remotepath = '~/collectd.conf'
+ collectd_config_cmd = "echo {} | sudo -S find /opt/collectd/etc -maxdepth 1 -name '{}'".\
+ format(DUT_PWD, remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(collectd_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run(
+ "echo {} | sudo -S rm -f /opt/collectd/etc/{}".format(DUT_PWD, remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ DUT_CLIENT.run("echo {} | sudo -S mv ~/{} /opt/collectd/etc/".\
+ format(DUT_PWD, remotepath[2:]), pty=True)
+ print(
+ "Collectd Configuration File Uploaded on DUT-Host.........................[OK]\n ")
+
+def start_tgen():
+ """
+ It will start the Traffic generetor
+ """
+ kill_cmd = "pkill -f ./t-rex"
+ TGEN_CLIENT.send_command(kill_cmd)
+ run_cmd = "cd trex_2.37/scripts && "
+ run_cmd += "screen ./t-rex-64 "
+ run_cmd += TGEN_PARAM
+ TGEN_CLIENT.send_command(run_cmd)
+ print(
+ "T-Rex Successfully running...............................................[OK]\n")
+
+
+def dut_hugepage_config():
+ """
+ Configure the DUT system hugepage parameter from client
+ """
+ if not HPMAX or not HPREQUESTED:
+ print("HPMAX and HPREQUESTED not defined ...................[Failed]")
+ return
+ hugepage_cmd = "echo '{}' | sudo -S mkdir -p /mnt/huge ; ".format(
+ DUT_PWD)
+ hugepage_cmd += "echo '{}' | sudo -S mount -t hugetlbfs nodev /mnt/huge".format(
+ DUT_PWD)
+ DUT_CLIENT.run(hugepage_cmd, pty=True)
+ hp_nr_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
+ hp_free_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/free_hugepages"
+ hp_nr = int(DUT_CLIENT.execute(hp_nr_cmd)[1])
+ hp_free = int(DUT_CLIENT.execute(hp_free_cmd)[1])
+ if hp_free <= HPREQUESTED:
+ hp_nr_new = hp_nr + (HPREQUESTED - hp_free)
+ if hp_nr_new > HPMAX:
+ hp_nr_new = HPMAX
+
+ nr_hugepage_cmd = "echo '{}' | sudo -S bash -c \"echo 'vm.nr_hugepages={}' >> ".\
+ format(DUT_PWD, hp_nr_new)
+ nr_hugepage_cmd += "/etc/sysctl.conf\""
+ DUT_CLIENT.run(nr_hugepage_cmd, pty=True)
+
+ dict_cmd = "cat /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages"
+ dict_check = int(DUT_CLIENT.execute(dict_cmd)[0])
+ if dict_check == 0:
+ node1_hugepage_cmd = "echo '{}' | sudo -s bash -c \"echo 0 > ".format(DUT_PWD)
+ node1_hugepage_cmd += "/sys/devices/system/node/node1/hugepages"
+ node1_hugepage_cmd += "/hugepages-2048kB/nr_hugepages\""
+ DUT_CLIENT.run(node1_hugepage_cmd, pty=True)
+ print("DUT-Host system configured with {} No of Hugepages.....................[OK] \n ".\
+ format(hp_nr_new))
+
+
+def sanity_nic_check():
+ """
+ Check either NIC PCI ids are Correctly placed or not
+ """
+ trex_conf_path = "cat /etc/trex_cfg.yaml | grep interfaces"
+ trex_conf_read = TGEN_CLIENT.execute(trex_conf_path)[1]
+ nic_pid_ids_list = [trex_conf_read.split("\"")[1], trex_conf_read.split("\"")[3]]
+ trex_nic_pic_id_cmd = "lspci | egrep -i --color 'network|ethernet'"
+ trex_nic_pic_id = str(TGEN_CLIENT.execute(trex_nic_pic_id_cmd)[1]).split('\n')
+ acheck = 0
+ for k in trex_nic_pic_id:
+ for j in nic_pid_ids_list:
+ if j in k:
+ acheck += 1
+ else:
+ pass
+ if acheck == 2:
+ print("Both the NIC PCI Ids are Correctly"\
+ " configured on TGen-Host...............[OK]\n")
+ else:
+ print("You configured NIC PCI Ids Wrong in "\
+ "TGen-Host............................[OK]\n")
+
+
+def sanity_collectd_check():
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(
+ DUT_PWD)
+ DUT_CLIENT.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(
+ DUT_CLIENT.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ print(
+ "Collectd is working Fine ................................................[OK] \n ")
+ else:
+ print(
+ "Collectd Fail to Start, Install correctly before running Test....[Failed]\n ")
+ else:
+ print(
+ "Collectd is not installed yet........................................[Failed]\n")
+
+def sanity_vsperf_check():
+ """
+ We have to make sure that VSPERF install correctly
+ """
+ if not DUT_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ vsperf_check_cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_cmd)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ print(
+ "VSPERF Installed Correctly and Working fine.........................."\
+ "....[OK]\n")
+ else:
+ print(
+ "VSPERF Does Not Installed Correctly , INSTALL IT AGAIN........[Critical]\n")
+ else:
+ print(
+ "VSPERF Does Not Installed Correctly , INSTALL IT AGAIN............[Critical]\n")
+ break
+
+
+def sanity_tgen_conn_dut_check():
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ if not DUT_CLIENT or not TGEN_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(TGEN_IP)
+ tgen_connectivity_check_result = int(
+ DUT_CLIENT.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ print(
+ "DUT-Host is successfully reachable to Traffic Generator Host.............[OK]\n")
+ else:
+ print(
+ "DUT-host is unsuccessful to reach the Traffic Generator Host..............[Failed]")
+ print(
+ "Make sure to establish connection before running Test...............[Critical]\n")
+
+
+def sanity_tgen_check():
+ """
+ It will check Trex properly running or not
+ """
+ if not TGEN_CLIENT:
+ print("The Client is disconnected................................[Failed]")
+ return
+ tgen_start_cmd_check = "cd trex/scripts &&"
+ tgen_start_cmd_check += " ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(TGEN_CLIENT.execute(tgen_start_cmd_check)[0])
+ if tgen_start_cmd_result == 0:
+ print(
+ "TGen-Host successfully running........................................[OK]\n")
+ else:
+ print("TGen-Host is unable to start t-rex ..................[Failed]")
+ print("Make sure you install t-rex correctly ...............[Critical]\n")
+
+
+def dut_vsperf_test_availability():
+ """
+ Before running test we have to make sure there is no other test running
+ """
+ vsperf_ava_cmd = "ps -ef | grep -v grep | grep ./vsperf | awk '{print $2}'"
+ vsperf_ava_result = len(
+ (DUT_CLIENT.execute(vsperf_ava_cmd)[1]).split("\n"))
+ if vsperf_ava_result == 1:
+ print("DUT-Host is available for performing VSPERF Test\n\
+ You can perform Test!")
+ else:
+ print("DUT-Host is busy right now, Wait for some time\n\
+ Always Check availability before Running Test!\n")
+
+if DUT_IP:
+ host_connect()
+if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ sys.exit()
+else:
+ vsperf_install()
+ install_collectd()
+ collectd_upload_config()
+ dut_hugepage_config()
+ dut_vsperf_test_availability()
+if TGEN_IP:
+ tgen_connect()
+if not TGEN_CLIENT:
+ print('Failed to connect to TGEN_HOST.............[Critical]')
+ sys.exit()
+else:
+ tgen_install()
+ upload_tgen_config_file()
+ sanity_nic_check()
+ start_tgen()
+
+print("\n\nIF you are getting any Failed or Critical message!!!\n" \
+ "Please follow this steps:\n"
+ "1. Make necessory changes before running VSPERF TEST\n"\
+ "2. Re-Run the auto deployment container")
+
+if SANITY and 'yes' in SANITY.lower():
+ sanity_collectd_check()
+ sanity_vsperf_check()
+ sanity_tgen_check()
+ sanity_tgen_conn_dut_check()
diff --git a/tools/docker/deployment/auto/docker-compose.yml b/tools/docker/deployment/auto/docker-compose.yml
new file mode 100644
index 00000000..b5b808d2
--- /dev/null
+++ b/tools/docker/deployment/auto/docker-compose.yml
@@ -0,0 +1,22 @@
+version: '2'
+
+services:
+ deploy:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ env_file:
+ - ./controller/list.env
+ ports:
+ - 50051
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/deployment/interactive/controller/Dockerfile b/tools/docker/deployment/interactive/controller/Dockerfile
new file mode 100644
index 00000000..3d9fca42
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/Dockerfile
@@ -0,0 +1,21 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+
+
diff --git a/tools/docker/deployment/interactive/controller/vsperf/__init__.py b/tools/docker/deployment/interactive/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py b/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..b192c493
--- /dev/null
+++ b/tools/docker/deployment/interactive/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,360 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=R0902
+# Sixteen is reasonable instance attributes
+# pylint: disable=W0221
+"""
+VSPER docker-controller.
+"""
+
+import io
+import time
+from concurrent import futures
+import grpc
+
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+
+# pylint: disable=too-few-public-methods,no-self-use
+class PseudoFile(io.RawIOBase):
+ """
+ Handle ssh command output.
+ """
+
+ def write(self, chunk):
+ """
+ Write to file
+ """
+ if "error" in chunk:
+ return
+ with open("./output.txt", "a") as fref:
+ fref.write(chunk)
+
+
+class VsperfController(vsperf_pb2_grpc.ControllerServicer):
+ """
+ Main Controller Class
+ """
+
+ def __init__(self):
+ """
+ Initialization
+ """
+ self.client = None
+ self.dut = None
+ self.dut_check = None
+ self.tgen_check = None
+ self.user = None
+ self.pwd = None
+ self.tgen_client = None
+ self.tgen = None
+ self.tgen_user = None
+ self.tgenpwd = None
+ self.tgen_conf = None
+ self.scenario = None
+ self.hpmax = None
+ self.hprequested = None
+ self.tgen_ip_address = None
+ self.trex_conf = None
+ # Default TGen is T-Rex
+ self.trex_conffile = "trex_cfg.yml"
+ self.collectd_conffile = "collectd.conf"
+
+ def setup(self):
+ """
+ Performs Setup of the client.
+ """
+ # Just connect to VM.
+ self.client = ssh.SSH(host=self.dut, user=self.user,
+ password=self.pwd)
+ self.client.wait()
+
+ def install_vsperf(self):
+ """
+ Perform actual installation
+ """
+ download_cmd = "git clone https://gerrit.opnfv.org/gerrit/vswitchperf"
+ self.client.run(download_cmd)
+ install_cmd = "cd vswitchperf/systems ; "
+ install_cmd += "echo '{}' | sudo -S ./build_base_machine.sh ".format(
+ self.pwd)
+ #install_cmd += "./build_base_machine.sh"
+ self.client.run(install_cmd)
+
+ def VsperfInstall(self, request, context):
+ """
+ Handle VSPERF install command from client
+ """
+ # print("Installing VSPERF")
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_check_cmd = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && ./vsperf --help"
+ vsperf_check_cmd_result = str(self.client.execute(vsperf_check_cmd)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ return vsperf_pb2.StatusReply(
+ message="VSPERF is Already Installed on DUT-Host")
+ self.install_vsperf()
+ return vsperf_pb2.StatusReply(message="VSPERF Successfully Installed DUT-Host")
+
+ def HostConnect(self, request, context):
+ """
+ Handle host connectivity command from client
+ """
+ self.dut = request.ip
+ self.user = request.uname
+ self.pwd = request.pwd
+ self.setup()
+ check_cmd = "ls -l"
+ self.dut_check = int(self.client.execute(check_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def save_chunks_to_file(self, chunks, filename):
+ """
+ Write the output to file
+ """
+ with open(filename, 'wb') as fref:
+ for chunk in chunks:
+ fref.write(chunk.Content)
+
+###### Traffic Generator Related functions ####
+ def TGenHostConnect(self, request, context):
+ """
+ Connect to TGen-Node
+ """
+ self.tgen = request.ip
+ self.tgen_user = request.uname
+ self.tgenpwd = request.pwd
+ self.tgen_setup()
+ check_tgen_cmd = "ls"
+ self.tgen_check = int(self.tgen_client.execute(check_tgen_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def tgen_setup(self):
+ """
+ Setup the T-Gen Client
+ """
+ # Just connect to VM.
+ self.tgen_client = ssh.SSH(host=self.tgen, user=self.tgen_user,
+ password=self.tgenpwd)
+ self.tgen_client.wait()
+
+ def TGenInstall(self, request, context):
+ """
+ Install Traffic generator on the node.
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ kill_cmd = "pkill -f t-rex"
+ self.tgen_client.send_command(kill_cmd)
+ tgen_start_cmd = "cd trex_2.37/scripts && ./t-rex-64 -f cap2/dns.yaml -d 100 -m 1 --nc"
+ tgen_start_cmd_result = int(self.tgen_client.execute(tgen_start_cmd)[0])
+ kill_cmd = "pkill -f t-rex"
+ self.tgen_client.send_command(kill_cmd)
+ if tgen_start_cmd_result == 0:
+ return vsperf_pb2.StatusReply(
+ message="Traffic Generetor has T-rex Installed")
+ download_cmd = "git clone https://github.com/cisco-system-traffic-generator/trex-core"
+ self.tgen_client.run(download_cmd)
+ install_cmd = "cd trex-core/linux_dpdk ; ./b configure ; ./b build"
+ self.tgen_client.run(install_cmd)
+ # before you setup your trex_cfg.yml make sure to do sanity check
+ # NIC PICs and establish route between your DUT and Test Device.
+ return vsperf_pb2.StatusReply(message="Traffic Generetor has now T-rex Installed")
+
+ def TGenUploadConfigFile(self, request, context):
+ """
+ Handle upload config-file command from client
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ filename = self.trex_conffile
+ self.save_chunks_to_file(request, filename)
+ check_trex_config_cmd = "echo {} | sudo -S find /etc -maxdepth 1 -name trex_cfg.yaml".\
+ format(self.tgenpwd)
+ check_test_result = str(
+ self.tgen_client.execute(check_trex_config_cmd)[1])
+ if "trex_cfg.yaml" in check_test_result:
+ self.tgen_client.run("rm -f /etc/trex_cfg.yaml")
+ self.upload_tgen_config()
+ self.tgen_client.run(
+ "echo {} | sudo -S mv ~/trex_cfg.yaml /etc/".format(self.tgenpwd), pty=True)
+ return vsperf_pb2.UploadStatus(Message="Successfully Uploaded",
+ Code=1)
+
+ def upload_tgen_config(self):
+ """
+ Perform file upload.
+ """
+ self.tgen_client.put_file(self.trex_conffile, '/root/trex_cfg.yaml')
+
+# Tool-Chain related Functions####3
+
+ def install_collectd(self):
+ """
+ installation of the collectd
+ """
+ check_collectd_config_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(
+ self.client.execute(check_collectd_config_cmd)[1])
+ if "collectd" in check_test_result:
+ pass
+ else:
+ download_cmd = "git clone https://github.com/collectd/collectd.git"
+ self.client.run(download_cmd)
+ build_cmd = "cd collectd ; "
+ build_cmd += "./build.sh"
+ self.client.run(build_cmd)
+ config_cmd = "cd collectd ; ./configure --enable-syslog "
+ config_cmd += "--enable-logfile --enable-hugepages --enable-debug ; "
+ self.client.run(config_cmd)
+ install_cmd = "cd collectd ; make ; "
+ install_cmd += "echo '{}' | sudo -S make install".format(self.pwd)
+ self.client.run(install_cmd, pty=True)
+
+ def CollectdInstall(self, request, context):
+ """
+ Install Collectd on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.install_collectd()
+ return vsperf_pb2.StatusReply(
+ message="Collectd Successfully Installed on DUT-Host")
+
+ def upload_collectd_config(self):
+ """
+ Perform file upload.
+ """
+ self.client.put_file(self.collectd_conffile, '~/collectd.conf')
+ move_cmd = "echo '{}' | sudo -S mv ~/collectd.conf /opt/collectd/etc".format(
+ self.pwd)
+ self.client.run(move_cmd, pty=True)
+
+ def CollectdUploadConfig(self, request, context):
+ """
+ Upload collectd config-file on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ filename = self.collectd_conffile
+ self.save_chunks_to_file(request, filename)
+ self.upload_collectd_config()
+ return vsperf_pb2.UploadStatus(
+ Message="Successfully Collectd Configuration Uploaded", Code=1)
+
+###System Configuration related functions###
+
+ def DutHugepageConfig(self, request, context):
+ """
+ Configure the DUT system hugepage parameter from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.hpmax = int(request.hpmax)
+ self.hprequested = int(request.hprequested)
+ hugepage_cmd = "echo '{}' | sudo -S mkdir -p /mnt/huge ; ".format(
+ self.pwd)
+ hugepage_cmd += "echo '{}' | sudo -S mount -t hugetlbfs nodev /mnt/huge".format(
+ self.pwd)
+ self.client.run(hugepage_cmd, pty=True)
+ hp_nr_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages"
+ hp_free_cmd = "cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/free_hugepages"
+ hp_nr = int(self.client.execute(hp_nr_cmd)[1])
+ hp_free = int(self.client.execute(hp_free_cmd)[1])
+ if hp_free <= self.hprequested:
+ hp_nr_new = hp_nr + (self.hprequested - hp_free)
+ if hp_nr_new > self.hpmax:
+ hp_nr_new = self.hpmax
+
+ nr_hugepage_cmd = "echo '{}' | sudo -S bash -c \"echo 'vm.nr_hugepages={}' >>".\
+ format(self.pwd, hp_nr_new)
+ nr_hugepage_cmd += " /etc/sysctl.conf\""
+ self.client.run(nr_hugepage_cmd, pty=True)
+
+ dict_cmd = "cat /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages"
+ dict_check = int(self.client.execute(dict_cmd)[0])
+ if dict_check == 0:
+ node1_hugepage_cmd = "echo '{}' | sudo -s bash -c \"echo 0 >".format(self.pwd)
+ node1_hugepage_cmd += " /sys/devices/system/node/node1/"
+ node1_hugepage_cmd += "hugepages/hugepages-2048kB/nr_hugepages\""
+ return vsperf_pb2.StatusReply(
+ message="DUT-Host system configured with {} No of Hugepages".format(hp_nr_new))
+
+ def CheckDependecies(self, request, context):
+ """
+ Check and Install required packages on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ packages = ['python34-tkinter', 'sysstat', 'bc']
+ for pkg in packages:
+ # pkg_check_cmd = "dpkg -s {}".format(pkg) for ubuntu
+ pkg_check_cmd = "rpm -q {}".format(pkg)
+ pkg_cmd_response = self.client.execute(pkg_check_cmd)[0]
+ if pkg_cmd_response == 1:
+ install_pkg_cmd = "echo '{}' | sudo -S yum install -y {}".format(
+ self.pwd, pkg)
+ #install_pkg_cmd = "echo '{}' | sudo -S apt-get install -y {}".format(self.pwd,pkg)
+ self.client.run(install_pkg_cmd, pty=True)
+
+ return vsperf_pb2.StatusReply(message="Python34-tkinter, sysstat and bc Packages"\
+ "are now Installed")
+
+def serve():
+ """
+ Start servicing the client
+ """
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ vsperf_pb2_grpc.add_ControllerServicer_to_server(
+ VsperfController(), server)
+ server.add_insecure_port('[::]:50051')
+ server.start()
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except (SystemExit, KeyboardInterrupt, MemoryError, RuntimeError):
+ server.stop(0)
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/tools/docker/deployment/interactive/docker-compose.yml b/tools/docker/deployment/interactive/docker-compose.yml
new file mode 100644
index 00000000..cbf894c5
--- /dev/null
+++ b/tools/docker/deployment/interactive/docker-compose.yml
@@ -0,0 +1,21 @@
+version: '2'
+
+services:
+ deploy:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ ports:
+ - 50051:50051
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/docs/architecture.txt b/tools/docker/docs/architecture.txt
new file mode 100644
index 00000000..a0b29e05
--- /dev/null
+++ b/tools/docker/docs/architecture.txt
@@ -0,0 +1,70 @@
+Architecture diagrams of the VSPERF-Containers.
+
+Figure-1: Deploy-Auto
+ ++++++++++++ +++++++++++++++++++++++++++
+ +Container + + +
+ + + + +
+ + + |--- + DUT - HOST +
+ + + | + +
+ + + | + +
+ + Deploy + | +++++++++++++++++++++++++++
+ + [AUTO] + --|
+ + + | +++++++++++++++++++++++++++
+ + + | + +
+ + + | + +
+ + + |--- + TGEN (HOST) +
+ + + + +
+ + + + +
+ ++++++++++++ +++++++++++++++++++++++++++
+
+Figure-2: Deploy-Interactive
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++
+ + + + Container+ + +
+ + + + + + +
+ + + + + |----+ DUT - HOST +
+ + + + + | + +
+ + + + + | + +
+ + CLIENT + + Deploy + | +++++++++++++++++++++++++++
+ + +<------>+ [INTER +---|
+ + + + ACTIVE]+ | +++++++++++++++++++++++++++
+ + + + + | + +
+ + + + + | + +
+ + + + + |----+ TGEN (HOST) +
+ + + + + + +
+ + + + + + +
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++
+
+Figure-3: TestControl Auto
+ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+ + Container+ + + +Container +
+ + + + + + +
+ + + |--- + DUT - HOST +----| + +
+ + + | + + | + +
+ + + | + + | + +
+ + Test + | +++++++++++++++++++++++++++ | + Results +
+ + Control +---| |---+ +
+ + [AUTO] + | +++++++++++++++++++++++++++ | + +
+ + + | + + | + +
+ + + | + + | + +
+ + + |--- + TGEN (HOST) +----| + +
+ + + + + + +
+ + + + + + +
+ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+
+Figure-4: TestControl Interactive
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
+ + + +Container + + + + Container+
+ + + + + + + + +
+ + + + + |----+ DUT - HOST +----| + +
+ + + + + | + + | + +
+ + + + + | + + | + +
+ + CLIENT + + Test + | + + | + Results +
+ + + + Control + | +++++++++++++++++++++++++++ | + +
+ + +<------>+ [INTER +---| |---+ +
+ + + + ACTIVE]+ | +++++++++++++++++++++++++++ | + +
+ + + + + | + + | + +
+ + + + + | + + | + +
+ + + + + |----+ TGEN (HOST) +----| + +
+ + + + + + + + +
+ + + + + + + + +
+ ++++++++++++ ++++++++++++ +++++++++++++++++++++++++++ ++++++++++++
diff --git a/tools/docker/docs/client.rst b/tools/docker/docs/client.rst
new file mode 100644
index 00000000..1483ff40
--- /dev/null
+++ b/tools/docker/docs/client.rst
@@ -0,0 +1,99 @@
+VSPERF Client
+--------------
+VSPERF client is a simple python application, which can be used to work with interactive deploy and testcontrol containers.
+
+============
+Description
+============
+
+VSPERF client is used for both set-up of DUT-Host and TGen-Host as well as to run multiple tests. User can perform different operations by selecting the available options and their sub-options.
+
+VSPERF client provides following options to User.
+
+* Establish Connections
+This option allows user to initialize the connections.
+
+[0]Connect to DUT Host: It will establish connection with DUT-HOST. DUT-HOST refers to system where the DUT - vswitch and vnfs - run. The vsperf application also runs on DUT-HOST.
+[1]Connect to Tgen Host: This option will establish connection with TGEN-HOST. TGEN-HOST refers to system where the traffic generator runs. As of now, only T-Rex is support for installation and configuration.
+
+* Installation
+After establishing the connections, user can perform installations to set up the test environment. Under this, we have 3 options:
+
+[0]Install VSPERF : This option will first check either vsperf is installed on DUT-Host or not. If VSPERF is not installed, it will perform VSPERF installation process on DUT-Host
+
+[1]Install TGen: This option will check whether t-rex is installed on Tgen-host or not. If t-rex is already installed then it will also check either is working fine or not. If t-rex is not installed, then configured version of t-rex will be installed.
+
+[2]Install Collectd: This is option will install collectd on DUT-Host.
+
+* Upload Configuration Files
+Once the installation process is completed, User can upload configuration files. Two uploads are supported:
+
+[0]Upload TGen Configuration File: It will upload trex_cfg.yaml configuration file to Tgen-Host.[User can either specify path in vsperfclient.conf or provide path during runtime for the trex_cfg.yaml]. This file will be used to run T-rex traffic generator.
+
+[1]Upload Collectd Configuration File: This is option is use to uplaod collectd configuration file.
+
+* Manage DUT-System Configuration
+Following upload of configuration files, user can perform some basic configuration of the DUT-Host. The available options are:
+
+[0]DUT-Host hugepages configuration: This option allows User to manage hugepages of DUT-Host. [User need to provide values for HpMax and HpRequested in vsperfclient.conf]
+
+[1]Check VSPERF dependencies: Using this option user can check library dependencies on DUT-Host.
+
+* Run Test
+Once the above steps are completed, user can perform sanity checks and run the tests. The available options are:
+
+[0]Upload Test Configuration File : This option will upload the vsperf test configuration file.
+
+[1]Perform Sanity Checks before running tests : This option has certain sub-options, user must perform all sanity checks before running test. User may not able to start the Vsperf test until all sanity checks are passed. The sanity check option contains following sub-options: (a) check VSPERF is installed correctly, (b) check if VNF path is available on DUT-Host, (c) check if configured NIC-PCIs is available on TGen and DUT hosts (d) check if Collectd is installed correctly (e) check if connection between DUT-Host and TGen-Host is OK, (f) check CPU-allocation on DUT-host is done correctly.
+
+[2]Check if DUT-HOST is available : User can check if DUT-Host is available for Test or not. If DUT-Host is available for performing Vsperf user can go ahead and start performing test.
+
+[3]Start TGen : This option will start t-rex traffic generator for test.
+
+[4]Start Beats : This option will start beats on DUT-Host
+
+[5]Start Test : If all the sanity checks are passed, and traffic generator is running, then this option will start the vsperf test. Whatever test is defined in vsperfclient.conf will be performed. Note: User can also perform multiple tests.
+
+* Test Status
+Once user has started a test, he can check on the status. The following sub-options are available.
+
+[0]Test status : Check whether the test has completed successfully or failed. If user is running multiple tests, they can identify the failed test-name using this option.
+
+[1]Get Test Configuration file from DUT-host: User can also able to read the test configuration file content they uploaded.
+
+* Clean-Up
+When all tests are done, user can perform cleanup of the systems, using the following sub-options:
+
+[0]Remove VSPERF: This option will completely remove the vsperfenv on DUT-Host
+
+[1]Terminate VSPERF: This option will keep vsperfenv on DUT-Host. If there is any process still running related with the vsperf then this option will terminate all those processes like ovs-vswitchd,ovsdb-server,vppctl,stress,qemu-system-x86_64.
+
+[2]Remove Results from DUT-Host : This is option will remove all the test results located in /tmp folder.
+
+[3]Remove Uploaded Configuration Files: This option will remove all uploaded test configuration file
+
+[4]Remove Collectd: This option will uninstall collectd from the DUT-Host
+
+[5]Remove Everything: This option will execute all the options listed above.
+
+=============================
+How To Use
+=============================
+
+Prerequisites before running vsperf client
+^^^^^^^^^^^^^^^^^^^^^
+
+1. User must install grpcio, grpcio-tools and configparser for python3 environment.
+
+2. User has to prepare the client-configuration file by providing appropriate values.
+
+3. User has to prepare the configuration files that will be uploaded to either DUT-host or TGen-Host systems.
+
+4. T-rex and collectd configuration files should be named as trex_cfg.yaml and collectd.conf, respectively.
+
+5. Start the deployment-interactive container and testcontrol-interactive container, which will run the servers on ports 50051 and 50052, respectively.
+
+Run vsperf client
+^^^^^^^^^^^^^^^^^^^^^
+Locate and run the vsperf_client.py with python3.
+
diff --git a/tools/docker/docs/test.rst b/tools/docker/docs/test.rst
new file mode 100644
index 00000000..d002ddbe
--- /dev/null
+++ b/tools/docker/docs/test.rst
@@ -0,0 +1,86 @@
+Before using VSPERF client and VSPERF containers, user must run the prepare.sh script which will prepare their local environment.
+
+locate vsperf-docker/prepare.sh and run:
+bash prepare.sh
+
+VSPERF Containers
+------------------
+
+============
+deployment
+============
+Users have two choices for deployment, auto and interactive.
+
+1. auto
+^^^^^^^^^^^^^^^^^^^^^
+This auto deployment container will do everything related with VSPERF set-up automatically. It includes, installation of VSPERF, T-rex and collectd, uploading collectd configuration file on DUT-Host, uploading t-rex configuration files and starting the t-rex traffic generator. Before installing vsperf and t-rex, the container will perform verification process, which includes basic sanity checks such as checking for old installations, huge-page checks, necessary folders and software, etc. User should modify the t-rex(trex_cfg.yaml)and collectd(collectd.conf) configuration files depending on their needs before running the containers.
+
+
+Pre-Deployment Configuration
+******************
+User has to provide the following in list.env file:
+1.DUT-Host and TGen-Host related credentials and IP address
+2.Values for HUGEPAGE_MAX and HUGEPAGE_REQUESTED
+3.Option for sanity check - YES or NO.
+
+Build
+******************
+Use **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container's service with **docker-compose run deploy** command.
+
+
+2. interactive
+^^^^^^^^^^^^^^^^^^^^^
+The interactive container must run before using the vsperf client. It will start the server on port 50051 for the vsperf client to send commands. Deployment interactive container handles all vsperf set-up related commands from vsperf client and performs the corresponding operation.
+
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container with **docker-compose up deploy** command.
+Once the server is running user have to run testcontrol interactive container and then user can run the vsperf client.
+
+
+===============
+testcontrol
+===============
+For testcontrol too, user has two choices- auto and interactive.
+
+1. auto
+^^^^^^^^^^^^^^^^^^^^^
+This auto testcontrol container will perform test automatically on DUT-Host. This container also performing sanity checks automatically. User will also able to get test-status for all tests. If all sanity check doesn't satisfy then test will not run and container gracefully stopped. User can modify the vsperf.conf file which will be upload on DUT-Host automatically by container and used for performing the vsperf test.
+
+Pre-Deployment Configuration
+******************
+1.User have to provide all the DUT-Host credentials and IP address of TGen-host in list.env.
+2.Provide name for VSPERF_TESTS and VSPERF_CONFFILE in list.env.
+3.Provide option for VSPERF_TRAFFICGEN_MODE and CLEAN_UP [YES or NO] in list.env file.
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container's service with **docker-compose run testcontrol** command.
+User can observe the results and perform the another test by just changing the VSPERF_TEST environment variable in list.env file.
+
+
+2. interactive
+^^^^^^^^^^^^^^^^^^^^^
+This interactive testcontrol container must run before using the vsperf client. It will start the server on port 50052 for the vsperf client. This testcontrol interactive container handle all the test related commands from vsperf client and do the operations. Testcontrol interactive container running server on localhost port 50052.
+
+Build
+******************
+Run **docker-compose build** command to build the container.
+
+Run
+******************
+Run the container with **docker-compose up testcontrol** command.
+After running this container user can use the vsperf client.
diff --git a/tools/docker/libs/proto/__init__.py b/tools/docker/libs/proto/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/libs/proto/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/libs/proto/vsperf.proto b/tools/docker/libs/proto/vsperf.proto
new file mode 100755
index 00000000..0fc45df3
--- /dev/null
+++ b/tools/docker/libs/proto/vsperf.proto
@@ -0,0 +1,109 @@
+// Copyright 2018-2019 .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+syntax = "proto3";
+package vsperf;
+
+service Controller {
+ rpc HostConnect (HostInfo) returns (StatusReply) {}
+ rpc VsperfInstall (HostInfo) returns (StatusReply) {}
+ rpc TGenHostConnect (HostInfo) returns (StatusReply) {}
+ rpc TGenInstall (HostVerInfo) returns (StatusReply) {}
+ rpc TGenUploadConfigFile (stream ConfFile) returns (UploadStatus) {}
+ rpc CollectdInstall (HostInfo) returns (StatusReply) {}
+ rpc CollectdUploadConfig (stream ConfFile) returns (UploadStatus) {}
+ rpc DutHugepageConfig (HugepConf) returns (StatusReply) {}
+ rpc CheckDependecies (HostInfo) returns (StatusReply) {}
+ rpc UploadConfigFile (ConfFileTest) returns (UploadStatus) {}
+ rpc StartTest (ControlVsperf) returns (StatusReply) {}
+ rpc TestStatus (StatusQuery) returns (StatusReply) {}
+ rpc StartTGen (ControlTGen) returns (StatusReply) {}
+ rpc StartBeats (HostInfo) returns (StatusReply) {}
+ rpc RemoveVsperf (HostInfo) returns (StatusReply) {}
+ rpc RemoveResultFolder (HostInfo) returns (StatusReply) {}
+ rpc RemoveUploadedConfig (HostInfo) returns (StatusReply) {}
+ rpc RemoveCollectd (HostInfo) returns (StatusReply) {}
+ rpc RemoveEverything (HostInfo) returns (StatusReply) {}
+ rpc TerminateVsperf (HostInfo) returns (StatusReply) {}
+ rpc SanityNICCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityCollectdCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityVNFpath (HostInfo) returns (StatusReply) {}
+ rpc SanityVSPERFCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityTgenConnDUTCheck (HostInfo) returns (StatusReply) {}
+ rpc SanityCPUAllocationCheck (HostInfo) returns (StatusReply) {}
+ rpc DUTvsperfTestAvailability (HostInfo) returns (StatusReply) {}
+ rpc GetVSPERFConffromDUT (HostInfo) returns (StatusReply) {}
+}
+
+message ControlVsperf {
+ string testtype = 1;
+ string conffile = 2;
+}
+
+message ControlTGen {
+ string params = 1;
+ string conffile = 2;
+}
+
+message LogDir {
+ string directory = 1;
+}
+
+message ConfFile {
+ bytes Content = 1;
+}
+
+message ConfFileTest {
+ string Content = 1;
+ string Filename = 2;
+}
+
+message HostInfo {
+ string ip = 1;
+ string uname = 2;
+ string pwd = 3;
+}
+
+message HugepConf {
+ string hpmax = 1;
+ string hprequested = 2;
+}
+
+message HostVerInfo {
+ string ip = 1;
+ string uname = 2;
+ string pwd = 3;
+ string version = 4;
+}
+
+message StatusQuery {
+ string testtype = 1;
+}
+
+message StatusReply {
+ string message = 1;
+}
+
+enum UploadStatusCode {
+ Unknown = 0;
+ Ok = 1;
+ Failed = 2;
+}
+
+message UploadStatus {
+ string Message = 1;
+ UploadStatusCode Code = 2;
+}
+
diff --git a/tools/docker/libs/utils/__init__.py b/tools/docker/libs/utils/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/libs/utils/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/libs/utils/exceptions.py b/tools/docker/libs/utils/exceptions.py
new file mode 100644
index 00000000..c4e0e097
--- /dev/null
+++ b/tools/docker/libs/utils/exceptions.py
@@ -0,0 +1,65 @@
+"""
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+#pylint: disable=import-error
+from oslo_utils import excutils
+
+
+class VsperfCException(Exception):
+ """Base VSPERF-C Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VsperfCException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VsperfCException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """ #pylint: disable=no-self-use
+ return False
+
+
+class InvalidType(VsperfCException):
+ """Invalid type"""
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
+class SSHError(VsperfCException):
+ """ssh error"""
+ message = '%(error_msg)s'
+
+
+class SSHTimeout(SSHError):
+ """ssh timeout""" #pylint: disable=unnecessary-pass
+ pass
diff --git a/tools/docker/libs/utils/ssh.py b/tools/docker/libs/utils/ssh.py
new file mode 100644
index 00000000..a4df13b0
--- /dev/null
+++ b/tools/docker/libs/utils/ssh.py
@@ -0,0 +1,546 @@
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#pylint: disable=I,C,R,locally-disabled
+#pylint: disable=import-error,arguments-differ
+
+# this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+ ssh = sshclient.SSH("root", "example.com", port=33)
+ status, stdout, stderr = ssh.execute("ps ax")
+ if status:
+ raise Exception("Command failed with non-zero status.")
+ print(stdout.splitlines())
+
+Execute command with huge output:
+
+ class PseudoFile(io.RawIOBase):
+ def write(chunk):
+ if "error" in chunk:
+ email_admin(chunk)
+
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+ ssh = sshclient.SSH("user", "example.com")
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
+
+Upload file:
+
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+ eventlet.monkey_patch(select=True, time=True)
+ or
+ eventlet.monkey_patch()
+ or
+ sshclient = eventlet.import_patched("vsperf.ssh")
+
+"""
+from __future__ import print_function
+import io
+import logging
+import os
+import re
+import select
+import socket
+import time
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+
+# When building container change this to
+import utils.exceptions as exceptions
+#else keep it as
+#import exceptions
+# When building container change this to
+from utils.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+#else keep it as
+#from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+
+
+def convert_key_to_str(key):
+ if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+ return key
+ k = io.StringIO()
+ key.write_private_key(k)
+ return k.getvalue()
+
+
+# class SSHError(Exception):
+# pass
+#
+#
+# class SSHTimeout(SSHError):
+# pass
+
+
+class SSH(object):
+ """Represent ssh connection."""
+ #pylint: disable=no-member
+
+ SSH_PORT = paramiko.config.SSH_PORT
+ DEFAULT_WAIT_TIMEOUT = 120
+
+ @staticmethod
+ def gen_keys(key_filename, bit_count=2048):
+ rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+ rsa_key.write_private_key_file(key_filename)
+ print("Writing %s ..." % key_filename)
+ with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+ pubkey_file.write(rsa_key.get_name())
+ pubkey_file.write(' ')
+ pubkey_file.write(rsa_key.get_base64())
+ pubkey_file.write('\n')
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else
+ # refers to the calling class
+ # i.e. the subclass, not the superclass
+ return SSH
+
+ @classmethod
+ def get_arg_key_map(cls):
+ return {
+ 'user': ('user', NON_NONE_DEFAULT),
+ 'host': ('ip', NON_NONE_DEFAULT),
+ 'port': ('ssh_port', cls.SSH_PORT),
+ 'pkey': ('pkey', None),
+ 'key_filename': ('key_filename', None),
+ 'password': ('password', None),
+ 'name': ('name', None),
+ }
+
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None):
+ """Initialize SSH client.
+
+ :param user: ssh username
+ :param host: hostname or ip address of remote ssh server
+ :param port: remote ssh port
+ :param pkey: RSA or DSS private key string or file object
+ :param key_filename: private key filename
+ :param password: password
+ """
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
+
+ self.wait_timeout = self.DEFAULT_WAIT_TIMEOUT
+ self.user = user
+ self.host = host
+ # everybody wants to debug this in the caller, do it here instead
+ self.log.debug("user:%s host:%s", user, host)
+
+ # we may get text port from YAML, convert to int
+ self.port = try_int(port, self.SSH_PORT)
+ self.pkey = self._get_pkey(pkey) if pkey else None
+ self.password = password
+ self.key_filename = key_filename
+ self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
+
+ @classmethod
+ def args_from_node(cls, node, overrides=None, defaults=None):
+ if overrides is None:
+ overrides = {}
+ if defaults is None:
+ defaults = {}
+
+ params = ChainMap(overrides, node, defaults)
+ return make_dict_from_map(params, cls.get_arg_key_map())
+
+ @classmethod
+ def from_node(cls, node, overrides=None, defaults=None):
+ return cls(**cls.args_from_node(node, overrides, defaults))
+
+ def _get_pkey(self, key):
+ if isinstance(key, six.string_types):
+ key = six.moves.StringIO(key)
+ errors = []
+ for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+ try:
+ return key_class.from_private_key(key)
+ except paramiko.SSHException as e:
+ errors.append(e)
+ raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors)
+
+ @property
+ def is_connected(self):
+ return bool(self._client)
+
+ def _get_client(self):
+ if self.is_connected:
+ return self._client
+ try:
+ self._client = paramiko.SSHClient()
+ self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.connect(self.host, username=self.user,
+ port=self.port, pkey=self.pkey,
+ key_filename=self.key_filename,
+ password=self.password,
+ allow_agent=False, look_for_keys=False,
+ timeout=1)
+ return self._client
+ except Exception as e:
+ message = ("Exception %(exception_type)s was raised "
+ "during connect. Exception value is: %(exception)r" %
+ {"exception": e, "exception_type": type(e)})
+ self._client = False
+ raise exceptions.SSHError(error_msg=message)
+
+ def _make_dict(self):
+ return {
+ 'user': self.user,
+ 'host': self.host,
+ 'port': self.port,
+ 'pkey': self.pkey,
+ 'key_filename': self.key_filename,
+ 'password': self.password,
+ 'name': self.name,
+ }
+
+ def copy(self):
+ return self.get_class()(**self._make_dict())
+
+ def close(self):
+ if self._client:
+ self._client.close()
+ self._client = False
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ """Execute specified command on the server.
+
+ :param cmd: Command to be executed.
+ :type cmd: str
+ :param stdin: Open file or string to pass to stdin.
+ :param stdout: Open file to connect to stdout.
+ :param stderr: Open file to connect to stderr.
+ :param raise_on_error: If False then exit code will be return. If True
+ then exception will be raized if non-zero code.
+ :param timeout: Timeout in seconds for command execution.
+ Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
+ :param pty: Request a pseudo terminal for this connection.
+ This allows passing control characters.
+ Default False.
+ :type pty: bool
+ """
+
+ client = self._get_client()
+
+ if isinstance(stdin, six.string_types):
+ stdin = six.moves.StringIO(stdin)
+
+ return self._run(client, cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, raise_on_error=raise_on_error,
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open, pty=pty)
+
+ def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+
+ transport = client.get_transport()
+ session = transport.open_session()
+ if pty:
+ session.get_pty()
+ session.exec_command(cmd)
+ start_time = time.time()
+
+ # encode on transmit, decode on receive
+ data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+ stderr_data = None
+
+ # If we have data to be sent to stdin then `select' should also
+ # check for stdin availability.
+ if stdin and not stdin.closed:
+ writes = [session]
+ else:
+ writes = []
+
+ while True:
+ # Block until data can be read/write.
+ e = select.select([session], writes, [session], 1)[2]
+
+ if session.recv_ready():
+ data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+ self.log.debug("stdout: %r", data)
+ if stdout is not None:
+ stdout.write(data)
+ continue
+
+ if session.recv_stderr_ready():
+ stderr_data = encodeutils.safe_decode(
+ session.recv_stderr(4096), 'utf-8')
+ self.log.debug("stderr: %r", stderr_data)
+ if stderr is not None:
+ stderr.write(stderr_data)
+ continue
+
+ if session.send_ready():
+ if stdin is not None and not stdin.closed:
+ if not data_to_send:
+ stdin_txt = stdin.read(4096)
+ if stdin_txt is None:
+ stdin_txt = ''
+ data_to_send = encodeutils.safe_encode(
+ stdin_txt, incoming='utf-8')
+ if not data_to_send:
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
+
+ if session.exit_status_ready():
+ break
+
+ if timeout and (time.time() - timeout) > start_time:
+ message = ('Timeout executing command %(cmd)s on host %(host)s'
+ % {"cmd": cmd, "host": self.host})
+ raise exceptions.SSHTimeout(error_msg=message)
+ if e:
+ raise exceptions.SSHError(error_msg='Socket error')
+
+ exit_status = session.recv_exit_status()
+ if exit_status != 0 and raise_on_error:
+ fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+ details = fmt % {"cmd": cmd, "status": exit_status}
+ if stderr_data:
+ details += " Last stderr data: '%s'." % stderr_data
+ raise exceptions.SSHError(error_msg=details)
+ return exit_status
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ """Execute the specified command on the server.
+
+ :param cmd: (str) Command to be executed.
+ :param stdin: (StringIO) Open file to be sent on process stdin.
+ :param timeout: (int) Timeout for execution of the command.
+ :param raise_on_error: (bool) If True, then an SSHError will be raised
+ when non-zero exit code.
+
+ :returns: tuple (exit_status, stdout, stderr)
+ """
+ stdout = six.moves.StringIO()
+ stderr = six.moves.StringIO()
+
+ exit_status = self.run(cmd, stderr=stderr,
+ stdout=stdout, stdin=stdin,
+ timeout=timeout, raise_on_error=raise_on_error)
+ stdout.seek(0)
+ stderr.seek(0)
+ return exit_status, stdout.read(), stderr.read()
+
+ def wait(self, timeout=None, interval=1):
+ """Wait for the host will be available via ssh."""
+ if timeout is None:
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self.execute("uname")
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
+
+ def get(self, remote_path, local_path='/tmp/', recursive=True):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.get(remote_path, local_path, recursive)
+
+ # keep shell running in the background, e.g. screen
+ def send_command(self, command):
+ client = self._get_client()
+ client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ self.run("&& ".join(cmd), stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)
+
+ def put_file_obj(self, file_obj, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.putfo(file_obj, remotepath)
+ if mode is not None:
+ sftp.chmod(remotepath, mode)
+
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+ @classmethod
+ def get_arg_key_map(cls):
+ arg_key_map = super(AutoConnectSSH, cls).get_arg_key_map()
+ arg_key_map['wait'] = ('wait', True)
+ return arg_key_map
+
+ # always wait or we will get OpenStack SSH errors
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None, wait=True):
+ super(AutoConnectSSH, self).__init__(user, host, port, pkey,
+ key_filename, password, name)
+ if wait and wait is not True:
+ self.wait_timeout = int(wait)
+
+ def _make_dict(self):
+ data = super(AutoConnectSSH, self)._make_dict()
+ data.update({
+ 'wait': self.wait_timeout
+ })
+ return data
+
+ def _connect(self):
+ if not self.is_connected:
+ interval = 1
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self._get_client()
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def drop_connection(self):
+ """ Don't close anything, just force creation of a new client """
+ self._client = False
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ self._connect()
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ self._connect()
+ return super(AutoConnectSSH, self).run(cmd, stdin, stdout,
+ stderr, raise_on_error,
+ timeout, keep_stdin_open, pty)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ self._connect()
+ return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+ def put_file(self, local_path, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file(local_path,
+ remote_path, mode)
+
+ def put_file_obj(self, file_obj, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file_obj(file_obj,
+ remote_path, mode)
+
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+ @staticmethod
+ def get_class():
+ # must return static class name,
+ # anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return AutoConnectSSH
diff --git a/tools/docker/libs/utils/utils.py b/tools/docker/libs/utils/utils.py
new file mode 100644
index 00000000..d945381e
--- /dev/null
+++ b/tools/docker/libs/utils/utils.py
@@ -0,0 +1,41 @@
+"""
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+
+
+NON_NONE_DEFAULT = object()
+
+
+def get_key_with_default(data, key, default=NON_NONE_DEFAULT):
+ """get default key"""
+ value = data.get(key, default)
+ if value is NON_NONE_DEFAULT:
+ raise KeyError(key)
+ return value
+
+
+def make_dict_from_map(data, key_map):
+ """mapping dict"""
+ return {dest_key: get_key_with_default(data, src_key, default)
+ for dest_key, (src_key, default) in key_map.items()}
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ #pylint: disable=invalid-name
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
diff --git a/tools/docker/prepare.sh b/tools/docker/prepare.sh
new file mode 100755
index 00000000..7afdbd6b
--- /dev/null
+++ b/tools/docker/prepare.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+#This script will used to prepare local host to use vsperf client and containers.
+
+#first change the permission for prepare.sh file
+chmod a+x prepare.sh
+
+#Install python3 for local host
+sudo apt-get install python3
+
+#Install python3-pip
+sudo apt-get install python3-pip
+
+#Install grpcio, grpcio-tools and configparser
+pip3 install grpcio==1.4.0 grpcio-tools==1.4.0 configparser
+
+# Build .proto to create python library
+cd libs/proto && python3 -m grpc_tools.protoc -I./ --python_out=. --grpc_python_out=. vsperf.proto
+sed -i 's/import vsperf_pb2 as vsperf__pb2/from . import vsperf_pb2 as vsperf__pb2/g' vsperf_pb2_grpc.py
+cd ../..
+
+#copy libs/proto and libs/utils in deployment and testcontrol container at appropriate location.
+cp -r libs/proto deployment/interactive/controller/vsperf/proto
+cp -r libs/utils deployment/interactive/controller/vsperf/utils
+cp -r libs/proto testcontrol/interactive/controller/vsperf/proto
+cp -r libs/utils testcontrol/interactive/controller/vsperf/utils
+
+#copy libs/utils into deployment and testcontrol auto container at appropriate location.
+cp -r libs/utils deployment/auto/controller/vsperf/utils
+cp -r libs/utils testcontrol/auto/controller/vsperf/utils
+
+#copy libs/proto into client
+cp -r libs/proto client/proto
diff --git a/tools/docker/results/README.md b/tools/docker/results/README.md
new file mode 100644
index 00000000..15d28b15
--- /dev/null
+++ b/tools/docker/results/README.md
@@ -0,0 +1,48 @@
+## Please set the limit on mmap counts equal to 262144 or more.
+
+There are two options. Run this command:
+```sh
+
+sysctl -w vm.max_map_count = 262144
+
+```
+or, to set it permanently, update the
+```sh
+
+vm.max_map_count
+
+```
+setting in
+
+```sh
+
+/etc/sysctl.conf
+
+```
+
+### Update the IP address.
+You may want to modify the IP address from 0.0.0.0 to appropriate host-ip in
+```sh
+docker-compose.yml
+
+```
+
+### Changes made to sebp/elk
+The vsperf/elk image is same as sebp/elk with a minor change - the inclusion of collectd codec to logstash.
+In the Dockerfile of sebp/elk, under logstash configuration, following lines are added:
+```sh
+ WORKDIR ${LOGSTASH_HOME}
+ RUN gosu logstash bin/logstash-plugin install logstash-codec-collectd
+ WORKDIR /
+
+```
+
+The resultsdb directory contains the source from Dovetail/Dovetail-webportal project.
+Once the results container is deployed, please run the python script as follows, to ensure that results can be pushed and queried correctly.
+```sh
+python init_db.py host_ip_address testapi_port
+```
+For example, if the host on which the container is running is 10.10.120.22, and container is exposing 8000 as the port, the command should be:
+```sh
+python init_db.py 10.10.120.22 8000
+```
diff --git a/tools/docker/results/docker-compose.yml b/tools/docker/results/docker-compose.yml
new file mode 100644
index 00000000..87ba7fc0
--- /dev/null
+++ b/tools/docker/results/docker-compose.yml
@@ -0,0 +1,80 @@
+version: '3'
+volumes:
+ elk-data:
+ influx-data:
+ grafana-data:
+ mongo-data:
+ jupyter-data:
+ testapi-logs:
+services:
+ influxdb:
+ image: influxdb:latest
+ ports:
+ - "25826:25826/udp"
+ - "25826:25826"
+ - "8083:8083"
+ - "8086:8086"
+ expose:
+ - "25826"
+ - "8086"
+ - "8083"
+ volumes:
+ - influx-data:/var/lib/influxdb
+ grafana:
+ image: opnfv/barometer-grafana
+ volumes:
+ - grafana-data:/var/lib/grafana
+ - ./grafana/dashboards:/opt/grafana/dashboards
+ ports:
+ - "3000:3000"
+ elk:
+ image: vsperf/elk
+ ports:
+ - "5601:5601"
+ - "9200:9200"
+ - "5044:5044"
+ volumes:
+ - elk-data:/var/lib/elasticsearch
+ - ./logstash/pipeline/30-output.conf:/etc/logstash/conf.d/30-output.conf
+ - ./logstash/pipeline/02-beats-input.conf:/etc/logstash/conf.d/02-beats-input.conf
+ - ./logstash/pipeline/20-collectd-input.conf:/etc/logstash/conf.d/20-collectd-input.conf
+ environment:
+ - discovery.type=single-node
+ mongo:
+ image: mongo:3.2.1
+ ports:
+ - "27017:27017"
+ volumes:
+ - mongo-data:/data/db
+ container_name: opnfv-mongo
+ testapi:
+ image: opnfv/testapi:latest
+ container_name: opnfv-testapi
+ volumes:
+ - testapi-logs:/home/testapi/logs
+ environment:
+ - mongodb_url=mongodb://opnfv-mongo:27017/
+ - base_url=http://0.0.0.0:8000
+ ports:
+ - "8000:8000"
+ - "8001:8001"
+ links:
+ - mongo
+ jupyter:
+ build:
+ context: ./jupyter
+ ports:
+ - "8888:8888"
+ links:
+ - postgres
+ volumes:
+ - ./notebooks:/notebooks
+ - ./notebooks/testresult-analysis.ipynb:/notebooks/testresult-analysis.ipynb
+ - jupyter-data:/data
+ postgres:
+ image: postgres
+ restart: always
+ environment:
+ POSTGRES_USER: data
+ POSTGRES_PASSWORD: data
+ POSTGRES_DB: data
diff --git a/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json b/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json
new file mode 100644
index 00000000..ef0b32a1
--- /dev/null
+++ b/tools/docker/results/grafana/dashboards/container_metrics_dashboard.json
@@ -0,0 +1,1291 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": 3,
+ "links": [],
+ "refresh": "5s",
+ "rows": [
+ {
+ "collapse": false,
+ "height": 234,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "Total CPU usage of container",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 1,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Total",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "hide": false,
+ "measurement": "cpu_usage_total",
+ "orderByTime": "ASC",
+ "policy": "monitor",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Total Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "Core 0": "#0a50a1",
+ "Core 1": "#890f02",
+ "Core 2": "#f9934e",
+ "Core 3": "#3f6833"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "CPU usage per core",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 2,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeat": null,
+ "scopedVars": {
+ "core": {
+ "selected": false,
+ "text": "0",
+ "value": "0"
+ }
+ },
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Core $tag_instance",
+ "dsType": "influxdb",
+ "groupBy": [
+ {
+ "params": [
+ "instance"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "cpu_usage_per_cpu",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT difference(\"value\") / 1000000000 FROM \"monitor\".\"cpu_usage_per_cpu\" WHERE (\"container_name\" =~ /^$container$/) AND $timeFilter GROUP BY \"instance\"",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Usage per Core",
+ "tooltip": {
+ "shared": false,
+ "sort": 1,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "Kernel": "#890f02",
+ "User": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "influxdb",
+ "description": "CPU usage per User/Kernel",
+ "fill": 0,
+ "height": "",
+ "hideTimeOverride": false,
+ "id": 3,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "minSpan": null,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "User",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "hide": false,
+ "measurement": "cpu_usage_user",
+ "orderByTime": "ASC",
+ "policy": "monitor",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "Kernel",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "cpu_usage_system",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1000000000"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Usage Breakdown",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "Cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "decimals": null,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "CPU",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {
+ "Hot": "#890f02",
+ "Total": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Memory Usage",
+ "fill": 1,
+ "height": "",
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 250,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "Total",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "memory_usage",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ " / 1024/1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "Hot",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "memory_working_set",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ " / 1024/1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Memory",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decmbytes",
+ "label": "Megabytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Memory",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {
+ "rx_bytes": "#890f02",
+ "tx_bytes": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "description": "Rx/Tx",
+ "fill": 0,
+ "height": "",
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "rx_bytes",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "rx_bytes",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "tx_bytes",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "tx_bytes",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ },
+ {
+ "params": [
+ " / 1024"
+ ],
+ "type": "math"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Throughput",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "KBs",
+ "label": "Kilobytes per second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": {
+ "rx_errors": "#890f02",
+ "tx_bytes": "#0a50a1",
+ "tx_errors": "#0a50a1"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 0,
+ "height": "",
+ "id": 6,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "rx_errors",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "rx_errors",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ },
+ {
+ "alias": "tx_errors",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "tx_errors",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "difference"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": "1m",
+ "timeShift": null,
+ "title": "Errors",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Errors per second",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Network",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 30,
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": null,
+ "decimals": 2,
+ "format": "decbytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "200px",
+ "hideTimeOverride": false,
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 6,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "fs_usage",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "container_name",
+ "operator": "=~",
+ "value": "/^$container$/"
+ }
+ ]
+ }
+ ],
+ "thresholds": "",
+ "timeFrom": "1m",
+ "title": "Storage usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Storage",
+ "titleSize": "h3"
+ },
+ {
+ "collapse": false,
+ "height": 250,
+ "panels": [
+ {
+ "columns": [],
+ "datasource": "influxdb",
+ "description": "Runtime table",
+ "fontSize": "80%",
+ "height": "",
+ "id": 8,
+ "links": [],
+ "pageSize": 5,
+ "scroll": false,
+ "showHeader": true,
+ "sort": {
+ "col": 0,
+ "desc": true
+ },
+ "span": 12,
+ "styles": [
+ {
+ "alias": "Time",
+ "dateFormat": "MM/DD/YY h:mm:ss a",
+ "pattern": "Time",
+ "type": "date"
+ },
+ {
+ "alias": "",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "alias": "$col",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "limit": "5",
+ "measurement": "runtime",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "Alloc"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Frees"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapAlloc"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapIdle"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapObjects"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapReleased"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "HeapSys"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Lookups"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Mallocs"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "NumGC"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "NumGoroutine"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "PauseTotalNs"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "Sys"
+ ],
+ "type": "field"
+ }
+ ],
+ [
+ {
+ "params": [
+ "TotalAlloc"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "timeFrom": "1m",
+ "title": "Runtime Metrics",
+ "transform": "timeseries_to_columns",
+ "type": "table"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "vsperf",
+ "container"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "tags": [],
+ "text": "nginx",
+ "value": "nginx"
+ },
+ "datasource": "influxdb",
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "container",
+ "options": [],
+ "query": "show tag values with key = container_name",
+ "refresh": 1,
+ "regex": "[a-zA-Z0-9_/]*",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "tags": [],
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "influxdb",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [],
+ "query": "show tag values with key = instance",
+ "refresh": 1,
+ "regex": "",
+ "sort": 3,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "Container Metrics",
+ "version": 12
+} \ No newline at end of file
diff --git a/tools/docker/results/jupyter/Dockerfile b/tools/docker/results/jupyter/Dockerfile
new file mode 100644
index 00000000..94f9bd36
--- /dev/null
+++ b/tools/docker/results/jupyter/Dockerfile
@@ -0,0 +1,16 @@
+FROM jupyter/scipy-notebook
+
+RUN python --version
+
+RUN conda install --quiet --yes -c \
+ conda-forge osmnx dask
+
+RUN pip install -U graphviz paramiko
+
+RUN echo "c.NotebookApp.token=''" >> $HOME/.jupyter/jupyter_notebook_config.py
+
+VOLUME /notebooks
+VOLUME /data
+
+RUN mkdir /data/results
+WORKDIR /notebooks
diff --git a/tools/docker/results/logstash/pipeline/02-beats-input.conf b/tools/docker/results/logstash/pipeline/02-beats-input.conf
new file mode 100644
index 00000000..a00d3f5b
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/02-beats-input.conf
@@ -0,0 +1,6 @@
+input {
+ beats {
+ port => 5044
+ ssl => false
+ }
+}
diff --git a/tools/docker/results/logstash/pipeline/20-collectd-input.conf b/tools/docker/results/logstash/pipeline/20-collectd-input.conf
new file mode 100644
index 00000000..990903f9
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/20-collectd-input.conf
@@ -0,0 +1,14 @@
+input {
+ udp {
+ port => 25826
+ buffer_size => 1452
+ type => collectd
+ codec => collectd { }
+ }
+}
+
+filter {
+ mutate {
+ remove_field => [ "host" ]
+ }
+}
diff --git a/tools/docker/results/logstash/pipeline/30-output.conf b/tools/docker/results/logstash/pipeline/30-output.conf
new file mode 100644
index 00000000..0e3161a8
--- /dev/null
+++ b/tools/docker/results/logstash/pipeline/30-output.conf
@@ -0,0 +1,7 @@
+output {
+ elasticsearch {
+ hosts => "http://localhost:9200"
+ manage_template => false
+ codec => collectd { }
+ }
+}
diff --git a/tools/docker/results/notebooks/testresult-analysis.ipynb b/tools/docker/results/notebooks/testresult-analysis.ipynb
new file mode 100644
index 00000000..6ce58dd8
--- /dev/null
+++ b/tools/docker/results/notebooks/testresult-analysis.ipynb
@@ -0,0 +1,783 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "hide_input": true
+ },
+ "source": [
+ "# OPNFV VSPERF\n",
+ "# Beyond Performance Metrics: Towards Causation Analysis"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### sridhar.rao@spirent.com and acm@research.att.com"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import packages\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import seaborn as sns\n",
+ "from graphviz import Digraph\n",
+ "import collections\n",
+ "import glob\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Get the results to analyze: \n",
+ "Getting Latest one, if ``directory_to_download`` is empty"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import paramiko\n",
+ "import tarfile\n",
+ "import os\n",
+ "from stat import S_ISDIR\n",
+ "RECV_BYTES = 4096\n",
+ "hostname = '10.10.120.24'\n",
+ "port = 22\n",
+ "uname='opnfv'\n",
+ "pwd='opnfv' \n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "client = paramiko.Transport((hostname, port))\n",
+ "client.connect(username=uname, password=pwd)\n",
+ "session = client.open_channel(kind='session')\n",
+ "directory_to_download = ''\n",
+ "\n",
+ "session.exec_command('ls /tmp | grep results')\n",
+ "if not directory_to_download:\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stdout_data:\n",
+ " line = stdout_data[0]\n",
+ " filenames = line.decode(\"utf-8\").rstrip('\\n').split('\\n')\n",
+ " filenames = sorted(filenames)\n",
+ " latest = filenames[-1]\n",
+ " directory_to_download = os.path.join('/tmp', latest).replace(\"\\\\\",\"/\")\n",
+ " print(directory_to_download)\n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "if directory_to_download:\n",
+ " # zip the collectd results to make the download faster\n",
+ " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + '/tmp/csv .'\n",
+ " session = client.open_channel(kind='session')\n",
+ " session.get_pty()\n",
+ " session.exec_command(zip_command)\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stderr_data:\n",
+ " print(stderr_data[0])\n",
+ " if stdout_data:\n",
+ " print(stdout_data[0])\n",
+ "\n",
+ " # Begin the actual downlaod\n",
+ " sftp = paramiko.SFTPClient.from_transport(client)\n",
+ " def sftp_walk(remotepath):\n",
+ " path=remotepath\n",
+ " files=[]\n",
+ " folders=[]\n",
+ " for f in sftp.listdir_attr(remotepath):\n",
+ " if S_ISDIR(f.st_mode):\n",
+ " folders.append(f.filename)\n",
+ " else:\n",
+ " files.append(f.filename)\n",
+ " if files:\n",
+ " yield path, files\n",
+ " # Filewise download happens here\n",
+ " for path,files in sftp_walk(directory_to_download):\n",
+ " for file in files:\n",
+ " remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
+ " local = os.path.join('/data/results', file).replace(\"\\/\",\"/\")\n",
+ " sftp.get(remote, local)\n",
+ "# Untar the collectd results if we got it.\n",
+ "path = os.path.join('/data/results', 'collectd.tar.gz')\n",
+ "if os.path.exists(path):\n",
+ " tar = tarfile.open(path)\n",
+ " tar.extractall()\n",
+ " tar.close()\n",
+ "# Ready to work with downloaded data, close the session and client.\n",
+ "session.close()\n",
+ "client.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
+ " '* Memory:', '* Virtual Switch Set-up:',\n",
+ " '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.rst')[0])\n",
+ "info_dict = {}\n",
+ "with open(os.path.join('/data/results', filename), 'r') as file:\n",
+ " for line in file:\n",
+ " if any(s in line for s in strings):\n",
+ " info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
+ "df = pd.DataFrame.from_dict(info_dict, orient='index', columns=['Value'])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Understand the configuration used for the test."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = os.path.basename(glob.glob('/data/results/vsperf*.conf')[0])\n",
+ "file = os.path.join('/data/results', filename)\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('TRAFFICGEN_DURATION'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " traffic_duration = int(value)\n",
+ " print(traffic_duration)\n",
+ " elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " pmd_cores_mask = value.lstrip()\n",
+ " print(pmd_cores_mask)\n",
+ " elif line.startswith('GUEST_CORE_BINDING'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " guest_cores = value[1:-2]\n",
+ " print(guest_cores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## OVS-Ports and Cores"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import collections\n",
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "# import graphviz\n",
+ "from graphviz import Digraph\n",
+ "ps = Digraph(name='ovs-ports-cores', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='green')\n",
+ " c.node('t0', 'TGen-Port-0')\n",
+ " c.node('t1', 'TGen-Port-1')\n",
+ " c.attr(label='TGEN')\n",
+ " c.attr(color='blue')\n",
+ "with ps.subgraph(name=\"cluster_1\") as c:\n",
+ " c.node_attr.update(style='filled', color='yellow')\n",
+ " c.node('v0', 'VNF-Port-0')\n",
+ " c.node('v1', 'VNF-Port-1')\n",
+ " c.attr(label='VNF')\n",
+ " c.attr(color='blue')\n",
+ " \n",
+ "with ps.subgraph(name='cluster_2') as c: \n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " c.node(id, port+'\\nCore-ID:'+ core)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ " if 'dpdkvhost' in port:\n",
+ " ps.edge(id, 'v'+num)\n",
+ " else:\n",
+ " ps.edge(id, 't'+num)\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dropped Packets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "ps = Digraph(name='ovs-dropped', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "\n",
+ "def get_dropped(port_id):\n",
+ " # port_id = 'dpdk0'\n",
+ " if glob.glob('./pod12-node4/*'+port_id):\n",
+ " dirname = os.path.basename(glob.glob('./pod12-node4/*'+port_id)[0])\n",
+ " if dirname:\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " filename = os.path.basename(glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0])\n",
+ " if filename:\n",
+ " with open(os.path.join('./pod12-node4', dirname, filename), 'r') as f:\n",
+ " line = f.readlines()[-1]\n",
+ " fields = line.split(',')\n",
+ " return fields[1], fields[2]\n",
+ " return 'NA','NA'\n",
+ "\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " rx,tx = get_dropped(port)\n",
+ " c.node(id, port+'\\nRX-Dropped:'+ rx + '\\nTX-Dropped:' + tx)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Plotting Live Results - T-Rex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "lines_seen = set() # holds lines already seen\n",
+ "outfile = open('./counts.dat', \"w\")\n",
+ "file = os.path.join('/data/results', 'trex-liveresults-counts.dat')\n",
+ "for line in open(file, \"r\"):\n",
+ " if line not in lines_seen: # not a duplicate\n",
+ " outfile.write(line)\n",
+ " lines_seen.add(line)\n",
+ "outfile.close()\n",
+ "tdf = pd.read_csv('./counts.dat')\n",
+ "print(tdf.columns)\n",
+ "ax = tdf.loc[(tdf.rx_port == 1)].plot(y='rx_pkts')\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i][0], indices[i][1], facecolor='RED', edgecolor='BLUE', alpha=.2)\n",
+ " i+=1\n",
+ "\n",
+ "ind = 0\n",
+ "indv = tdf.ts[0]\n",
+ "ax.set_xlabel(\"Index\")\n",
+ "ax.set_ylabel('Count')\n",
+ "for i in range(len(tdf.ts)):\n",
+ " if tdf.ts[i] - indv > int(traffic_duration):\n",
+ " highlight([(ind, i)], ax)\n",
+ " ind = i\n",
+ " indv = tdf.ts[i]\n",
+ "highlight([(ind,i)], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## IRQ Latency Histogram"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "file = os.path.join('/data/results', 'RUNirq.irq.log')\n",
+ "tdf = pd.read_csv(file)\n",
+ "tdf.columns\n",
+ "exclude = [' <1', ' < 5', ' < 10',' < 50', ' < 100', ' < 500', ' < 1000']\n",
+ "ax = tdf.loc[:, tdf.columns.difference(exclude)].plot(x=' number', xticks=tdf[' number'], figsize=(20,10))\n",
+ "ax.set_xlabel('Core #')\n",
+ "ax.set_ylabel('Count')\n",
+ "#tdf.plot(x='number')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Sample Collectd Metric Display - L3 Cache Occupancy in Bytes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import math\n",
+ "def cpumask2coreids(mask):\n",
+ " intmask = int(mask, 16)\n",
+ " i = 1\n",
+ " coreids = []\n",
+ " while (i < intmask):\n",
+ " if (i & intmask):\n",
+ " coreids.append(str(math.frexp(i)[-1]-1))\n",
+ " i = i << 1\n",
+ " return (coreids)\n",
+ "\n",
+ "vswitch_cpus = \"['2']\"\n",
+ "ps = Digraph(name='cpu-map', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='CPU-MAPPINGS')\n",
+ " c.attr(color='blue')\n",
+ " c.node('vscpus', 'vSwitch: \\n' + vswitch_cpus)\n",
+ " # vnf_cpus = cpumask2coreids(guest_cores)\n",
+ " c.node('vncpus', 'VNF: \\n' + guest_cores)\n",
+ " pmd_cpus = cpumask2coreids(pmd_cores_mask[1:-1])\n",
+ " c.node('pmcpus', 'PMDs: \\n' + str(pmd_cpus))\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "# Path where collectd results are stored.\n",
+ "mypath = \"./pod12-node4\"\n",
+ "file_count = 0\n",
+ "cpu_names = []\n",
+ "for level1 in os.listdir(mypath):\n",
+ " if \"intel_rdt\" in level1:\n",
+ " l2path = os.path.join(mypath, level1)\n",
+ " for level2 in os.listdir(l2path):\n",
+ " if \"bytes\" in level2:\n",
+ " l3path = os.path.join(l2path, level2)\n",
+ " if file_count == 0:\n",
+ " file_count += 1\n",
+ " df = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " # nn = 'cpu-'+ level1.split('-')[1]\n",
+ " cpu_names.append(nn)\n",
+ " # print(nn)\n",
+ " df.rename(columns={'value': nn}, inplace=True)\n",
+ " else:\n",
+ " file_count += 1\n",
+ " tdf = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " cpu_names.append(nn)\n",
+ " tdf.rename(columns={'value': nn}, inplace=True)\n",
+ " df[nn] = tdf[nn] \n",
+ "\n",
+ "ax = df.plot(x='epoch', y=cpu_names)\n",
+ "ax.set_ylabel(\"MBytes\")\n",
+ "ax.set_xlabel('Time')\n",
+ "\n",
+ "\n",
+ " \n",
+ "# df = pd.read_csv()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Events "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "filename = os.path.basename(glob.glob('/data/results/vsperf-overall*.log')[0])\n",
+ "logfile = os.path.join('/data/results', filename)\n",
+ "linecnt = 0\n",
+ "times = {}\n",
+ "with open(logfile) as f:\n",
+ " for line in f:\n",
+ " line = line.strip('\\n')\n",
+ " if linecnt == 0:\n",
+ " times['Start-Test'] = line.split(\" : \")[0]\n",
+ " linecnt += 1\n",
+ " if 'Binding NICs' in line:\n",
+ " times['Binding-NICs'] = line.split(\" : \")[0]\n",
+ " if 'Starting traffic at' in line:\n",
+ " sline = line.split(\" : \")[1]\n",
+ " time = line.split(\" : \")[0]\n",
+ " speed = sline.split('at',1)[1]\n",
+ " times[speed] = time \n",
+ " elif 'Starting vswitchd' in line:\n",
+ " times['vSwitch-Start'] = line.split(\" : \")[0]\n",
+ " elif 'Starting ovs-vswitchd' in line:\n",
+ " times['ovsvswitch-start'] = line.split(\" : \")[0]\n",
+ " elif 'Adding Ports' in line:\n",
+ " times['Ports-Added'] = line.split(\" : \")[0]\n",
+ " elif 'Flows Added' in line:\n",
+ " times['Flows-Added'] = line.split(\" : \")[0]\n",
+ " elif 'send_traffic with' in line:\n",
+ " times['Traffic Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1280' in line:\n",
+ " times['Traffic-Start-1280'] = line.split(\" : \")[0]\n",
+ " elif 'Starting qemu' in line:\n",
+ " times['VNF-Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 64' in line:\n",
+ " times['Traffic-Start-64'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 128' in line:\n",
+ " times['Traffic-Start-128'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 256' in line:\n",
+ " times['Traffic-Start-256'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 512' in line:\n",
+ " times['Traffic-Start-512'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1024' in line:\n",
+ " times['Traffic-Start-1024'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1518' in line:\n",
+ " times['Traffic-Start-1518'] = line.split(\" : \")[0]\n",
+ " elif 'dump flows' in line:\n",
+ " times['Traffic-End'] = line.split(\" : \")[0]\n",
+ " elif 'Wait for QEMU' in line:\n",
+ " times['VNF-Stop'] = line.split(\" : \")[0]\n",
+ " elif 'delete flow' in line:\n",
+ " times['flow-removed'] = line.split(\" : \")[0]\n",
+ " elif 'delete port' in line:\n",
+ " times['port-removed'] = line.split(\" : \")[0]\n",
+ " elif 'Killing ovs-vswitchd' in line:\n",
+ " times['vSwitch-Stop'] = line.split(\" : \")[0]\n",
+ "\n",
+ "times['Test-Stop'] = line.split(\" : \")[0]\n",
+ "#print(times)\n",
+ "ddf = pd.DataFrame.from_dict(times, orient='index', columns=['timestamp'])\n",
+ "names = ddf.index.values\n",
+ "dates = ddf['timestamp'].tolist()\n",
+ "datefmt=\"%Y-%m-%d %H:%M:%S,%f\"\n",
+ "dates = [datetime.strptime(ii, datefmt) for ii in dates]\n",
+ "# print(names)\n",
+ "# print(dates)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.dates as mdates\n",
+ "from matplotlib import ticker\n",
+ "\n",
+ "levels = np.array([-5, 5, -3, 3, -1, 1])\n",
+ "fig, ax = plt.subplots(figsize=(40, 5))\n",
+ "\n",
+ "# Create the base line\n",
+ "start = min(dates)\n",
+ "stop = max(dates)\n",
+ "ax.plot((start, stop), (0, 0), 'k', alpha=.5)\n",
+ "\n",
+ "pos_list = np.arange(len(dates))\n",
+ "\n",
+ "# Iterate through releases annotating each one\n",
+ "for ii, (iname, idate) in enumerate(zip(names, dates)):\n",
+ " level = levels[ii % 6]\n",
+ " vert = 'top' if level < 0 else 'bottom'\n",
+ " ax.scatter(idate, 0, s=100, facecolor='w', edgecolor='k', zorder=9999)\n",
+ " # Plot a line up to the text\n",
+ " ax.plot((idate, idate), (0, level), c='r', alpha=.7)\n",
+ " # Give the text a faint background and align it properly\n",
+ " ax.text(idate, level, iname,\n",
+ " horizontalalignment='right', verticalalignment=vert, fontsize=14,\n",
+ " backgroundcolor=(1., 1., 1., .3))\n",
+ "ax.set(title=\"VSPERF Main Events\")\n",
+ "# Set the xticks formatting\n",
+ "ax.get_xaxis().set_major_locator(mdates.SecondLocator(interval=30))\n",
+ "ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%M %S\"))\n",
+ "fig.autofmt_xdate()\n",
+ "plt.setp((ax.get_yticklabels() + ax.get_yticklines() +\n",
+ " list(ax.spines.values())), visible=False)\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Current and old."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Current Result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import glob\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.csv')[0])\n",
+ "filename\n",
+ "tdf = pd.read_csv(os.path.join('/data/results', filename))\n",
+ "pkts = ['tx_frames', 'rx_frames']\n",
+ "fps = ['tx_rate_fps', 'throughput_rx_fps']\n",
+ "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",
+ "pcents = ['tx_rate_percent', 'throughput_rx_percent', 'frame_loss_percent']\n",
+ "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))\n",
+ "tdf.plot.bar(y= pkts,ax=axes[0,0])\n",
+ "tdf.plot.bar(y= fps,ax=axes[0,1])\n",
+ "tdf.plot.bar(y= mbps,ax=axes[1,0])\n",
+ "tdf.plot.bar(y= pcents,ax=axes[1,1])\n",
+ "current_pkt_size = str(tdf['packet_size'].iloc[-1])\n",
+ "current_rx_fps = str(tdf['throughput_rx_fps'].iloc[-1])\n",
+ "print(current_rx_fps)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## How Current Result compares to Previous ones?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import urllib\n",
+ "import json\n",
+ "import requests\n",
+ "#json_data = requests.get('http://testresults.opnfv.org/test/api/v1/results?project=vsperf').json()\n",
+ "json_data = requests.get('http://10.10.120.22:8000/api/v1/results?project=vsperf').json()\n",
+ "res = json_data['results']\n",
+ "df1 = pd.DataFrame(res)\n",
+ "sort_by_date = df1.sort_values('start_date')\n",
+ "details = df1['details'].apply(pd.Series)\n",
+ "details[current_pkt_size] = pd.to_numeric(pd.Series(details[current_pkt_size]))\n",
+ "# details.plot.bar(y = current_pkt_size)\n",
+ "details_cur_pkt = details[[current_pkt_size]].copy()\n",
+ "details_cur_pkt.loc[-1]= float(current_rx_fps)\n",
+ "details_cur_pkt.index = details_cur_pkt.index + 1 # shifting index\n",
+ "details_cur_pkt.sort_index(inplace=True) \n",
+ "ax = details_cur_pkt.plot.bar()\n",
+ "ax.set_ylabel(\"Frames per sec\")\n",
+ "ax.set_xlabel(\"Run Number\")\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i]-0.5, indices[i]+0.5, facecolor='RED', edgecolor='none', alpha=.2)\n",
+ " i+=1\n",
+ "highlight([0], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Heatmaps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "array_of_dfs = []\n",
+ "for dirs in glob.glob('./pod12-node4/ovs_stats-vsperf*'):\n",
+ " dirname = os.path.basename(dirs)\n",
+ " if dirname:\n",
+ " port = dirname.split('.')[1]\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " full_path = glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0]\n",
+ " filename = os.path.basename(full_path)\n",
+ " if filename:\n",
+ " df = pd.read_csv(full_path)\n",
+ " df.rename(index=str, columns={\"rx\": port+\"-rx\" , \"tx\": port+\"-tx\"}, inplace=True)\n",
+ " df = df.drop(columns=['epoch'])\n",
+ " array_of_dfs.append(df)\n",
+ "master_df = pd.concat(array_of_dfs, axis=1, sort=True)\n",
+ "master_df.columns\n",
+ "\n",
+ "# get the correlation coefficient between the different columns\n",
+ "corr = master_df.iloc[:, 0:].corr()\n",
+ "arr_corr = corr.values\n",
+ "# mask out the top triangle\n",
+ "arr_corr[np.triu_indices_from(arr_corr)] = np.nan\n",
+ "fig, ax = plt.subplots(figsize=(18, 12))\n",
+ "sns.set(font_scale=3.0)\n",
+ "hm = sns.heatmap(arr_corr, cbar=True, vmin=-0.5, vmax=0.5,\n",
+ " fmt='.2f', annot_kws={'size': 20}, annot=True, \n",
+ " square=True, cmap=plt.cm.Reds)\n",
+ "ticks = np.arange(corr.shape[0]) + 0.5\n",
+ "ax.set_xticks(ticks)\n",
+ "ax.set_xticklabels(corr.columns, rotation=90, fontsize=20)\n",
+ "ax.set_yticks(ticks)\n",
+ "ax.set_yticklabels(corr.index, rotation=360, fontsize=20)\n",
+ "\n",
+ "ax.set_title('Heatmap')\n",
+ "plt.tight_layout()\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "author": {
+ "@type": "Person",
+ "name": "Sridhar K. N. Rao",
+ "worksFor": {
+ "@type": "Organization",
+ "name": "Spirent Communications"
+ }
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/tools/docker/results/resultsdb/cases.json b/tools/docker/results/resultsdb/cases.json
new file mode 100644
index 00000000..e7576dbf
--- /dev/null
+++ b/tools/docker/results/resultsdb/cases.json
@@ -0,0 +1 @@
+{"testcases": [{"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovsdpdk", "_id": "565feb6b514bc5087f3cfe2e", "catalog_description": "Packet Loss Ratio for DPDK OVS (RFC2544)", "creation_date": "2015-12-03 07:12:43.925943", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005d9514bc5087f3cfe30", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:29.686136", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005ed514bc5087f3cfe31", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:49.363961", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovsdpdk", "_id": "566006c8514bc5087f3cfe32", "catalog_description": "Back To Back Frames for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:09:28.927130", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovs", "_id": "5660071e514bc5087f3cfe33", "catalog_description": "Back To Back Frames for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:10:54.473180", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "b2b_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovsdpdk", "_id": "566007a9514bc5087f3cfe34", "catalog_description": "Packet Loss Ratio Frame Modification for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:13:13.600168", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovs", "_id": "566007ec514bc5087f3cfe35", "catalog_description": "Packet Loss Ratio Frame Modification for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:14:20.594501", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "tput_mod_vlan_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovsdpdk", "_id": "56600870514bc5087f3cfe36", "catalog_description": ".Scalability Packet Loss for DPDK Ovs", "creation_date": "2015-12-03 09:16:32.491960", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovsdpdk"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovs", "_id": "566008b3514bc5087f3cfe37", "catalog_description": "Scalability Packet Loss for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:17:39.501079", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "scalability_ovs"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsdpdkuser", "_id": "5660095a514bc5087f3cfe38", "catalog_description": "PVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:20:26.244843", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsvirtio", "_id": "566009ae514bc5087f3cfe39", "catalog_description": "PVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:21:50.251212", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsdpdkuser", "_id": "56600a1a514bc5087f3cfe3a", "catalog_description": "PVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:23:38.269821", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsvirtio", "_id": "56600a5f514bc5087f3cfe3b", "catalog_description": "PVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:24:47.990062", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsdpdkuser", "_id": "56600ab3514bc5087f3cfe3c", "catalog_description": "PVVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:26:11.657515", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsvirtio", "_id": "56600ae9514bc5087f3cfe3d", "catalog_description": "PVVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:27:05.466374", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_tput_ovsvirtio"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsdpdkuser", "_id": "56600b2a514bc5087f3cfe3e", "catalog_description": "PVVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:28:10.150217", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsdpdkuser"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsvirtio", "_id": "56600b4f514bc5087f3cfe3f", "catalog_description": "PVVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:28:47.108529", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": null, "blocking": null, "name": "pvvp_b2b_ovsvirtio"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs,dpdk", "url": "", "_id": "591e8a8f41b755000a68c831", "catalog_description": "Phy2Phy Continuous Stream DPDK", "creation_date": "2017-05-19 06:02:55.177254", "domains": "compute", "dependencies": "", "version": ">euphrates", "criteria": "", "tier": "performance", "trust": null, "blocking": "", "name": "cont_ovsdpdk"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs", "url": "", "_id": "5980d1b073ce050010c339ca", "catalog_description": "Phy2Phy Continuous Stream", "creation_date": "2017-08-01 19:08:32.518983", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "cont_ovs"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovsdpdk", "_id": "565feb6b514bc5087f3cfe2e", "catalog_description": "Packet Loss Ratio for DPDK OVS (RFC2544)", "creation_date": "2015-12-03 07:12:43.925943", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_ovs", "_id": "566005d9514bc5087f3cfe30", "catalog_description": "Packet Loss Ratio for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:05:29.686136", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovsdpdk", "_id": "566006c8514bc5087f3cfe32", "catalog_description": "Back To Back Frames for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:09:28.927130", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://b2b_ovs", "_id": "5660071e514bc5087f3cfe33", "catalog_description": "Back To Back Frames for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:10:54.473180", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "back2back_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovsdpdk", "_id": "566007a9514bc5087f3cfe34", "catalog_description": "Packet Loss Ratio Frame Modification for DPDK Ovs (RFC2544)", "creation_date": "2015-12-03 09:13:13.600168", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_mod_vlan_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Throughput.RFC2544.PacketLossRatioFrameModification for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://tput_mod_vlan_ovs", "_id": "566007ec514bc5087f3cfe35", "catalog_description": "Packet Loss Ratio Frame Modification for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:14:20.594501", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_tput_mod_vlan_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for DPDK Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovsdpdk", "_id": "56600870514bc5087f3cfe36", "catalog_description": ".Scalability Packet Loss for DPDK Ovs", "creation_date": "2015-12-03 09:16:32.491960", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_scalability_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "LTD.Scalability.RFC2544.0PacketLoss for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://scalability_ovs", "_id": "566008b3514bc5087f3cfe37", "catalog_description": "Scalability Packet Loss for Vanilla Ovs (RFC2544)", "creation_date": "2015-12-03 09:17:39.501079", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "phy2phy_scalability_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsdpdkuser", "_id": "5660095a514bc5087f3cfe38", "catalog_description": "PVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:20:26.244843", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_tput_ovsvirtio", "_id": "566009ae514bc5087f3cfe39", "catalog_description": "PVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:21:50.251212", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsdpdkuser", "_id": "56600a1a514bc5087f3cfe3a", "catalog_description": "PVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:23:38.269821", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvp_b2b_ovsvirtio", "_id": "56600a5f514bc5087f3cfe3b", "catalog_description": "PVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:24:47.990062", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvp_back2back_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsdpdkuser", "_id": "56600ab3514bc5087f3cfe3c", "catalog_description": "PVVP Packet Loss Ratio for DPDK User Ovs", "creation_date": "2015-12-03 09:26:11.657515", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_tput_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.PacketLossRatio for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_tput_ovsvirtio", "_id": "56600ae9514bc5087f3cfe3d", "catalog_description": "PVVP Packet Loss Ratio for Vanilla Ovs", "creation_date": "2015-12-03 09:27:05.466374", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_tput_ovsvanilla"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for DPDK User Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsdpdkuser", "_id": "56600b2a514bc5087f3cfe3e", "catalog_description": "PVVP Back To Back Frames for DPDK User Ovs", "creation_date": "2015-12-03 09:28:10.150217", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_back2back_ovsdpdkvhost"}, {"project_name": "vsperf", "run": null, "description": "PVVP LTD.Throughput.RFC2544.BackToBackFrames for Vanilla Ovs", "ci_loop": null, "tags": null, "url": "http://pvvp_b2b_ovsvirtio", "_id": "56600b4f514bc5087f3cfe3f", "catalog_description": "PVVP Back To Back Frames for Vanilla Ovs", "creation_date": "2015-12-03 09:28:47.108529", "domains": null, "dependencies": null, "version": null, "criteria": null, "tier": null, "trust": "Silver", "blocking": null, "name": "pvvp_back2back_ovsvanilla"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs,dpdk", "url": "", "_id": "591e8a8f41b755000a68c831", "catalog_description": "Phy2Phy Continuous Stream DPDK", "creation_date": "2017-05-19 06:02:55.177254", "domains": "compute", "dependencies": "", "version": ">euphrates", "criteria": "", "tier": "performance", "trust": null, "blocking": "", "name": "phy2phy_cont_ovsdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "ovs", "url": "", "_id": "5980d1b073ce050010c339ca", "catalog_description": "Phy2Phy Continuous Stream", "creation_date": "2017-08-01 19:08:32.518983", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_cont_ovsvanilla"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a48f18dc5815000e54a624", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 21:46:00.448859", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a493e7dc5815000e54a62e", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:06:31.415776", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "phy2phy_back2back_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a4946ddc5815000e54a630", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 22:08:45.830223", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvp_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a494cbdc5815000e54a632", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:10:19.882545", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvp_back2back_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a495cfdc5815000e54a635", "catalog_description": "LTD.Throughput.RFC2544.PacketLossRatio VPP DPDK", "creation_date": "2017-08-28 22:14:39.603143", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvvp_tput_vpp_vppdpdkvhost"}, {"project_name": "vsperf", "run": "", "description": "", "ci_loop": "", "tags": "", "url": "", "_id": "59a4964edc5815000e54a637", "catalog_description": "LTD.Throughput.RFC2544.BackToBackFrames VPP DPDK", "creation_date": "2017-08-28 22:16:46.066477", "domains": "compute", "dependencies": "", "version": "euphrates", "criteria": "", "tier": null, "trust": null, "blocking": "", "name": "pvvp_back2back_vpp_vppdpdkvhost"}]}
diff --git a/tools/docker/results/resultsdb/init_db.py b/tools/docker/results/resultsdb/init_db.py
new file mode 100644
index 00000000..40bb4ee2
--- /dev/null
+++ b/tools/docker/results/resultsdb/init_db.py
@@ -0,0 +1,110 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""
+Preload the results database with testcases.
+"""
+
+from __future__ import print_function
+import json
+import sys
+import requests
+
+DB_HOST_IP = sys.argv[1]
+TESTAPI_PORT = sys.argv[2]
+
+TARGET_URL = 'http://{}:{}/api/v1'.format(DB_HOST_IP, TESTAPI_PORT)
+
+
+def get(url):
+ """
+ Get the http response.
+ """
+ return requests.get(url).json()
+
+
+def post(url, data):
+ """
+ Post HTTP request.
+ """
+ headers = {'Content-Type': 'application/json'}
+ res = requests.post(url, data=json.dumps(data), headers=headers)
+ print(res.text)
+
+
+def pod():
+ """
+ Get the PODs.
+ """
+ target = '{}/pods'.format(TARGET_URL)
+
+ with open('pods.json', 'r') as podref:
+ pods = json.load(podref)
+ for apod in pods:
+ post(target, apod)
+
+ add_pod('master', 'metal')
+ add_pod('virtual_136_2', 'virtual')
+
+
+def project():
+ """
+ Get the Projects
+ """
+ target = '{}/projects'.format(TARGET_URL)
+ with open('projects.json', 'r') as projref:
+ projects = json.load(projref)
+ for proj in projects:
+ post(target, proj)
+
+
+def cases():
+ """
+ Get the Cases
+ """
+ with open('cases.json', 'r') as caseref:
+ for line in caseref:
+ subcases = json.loads(line)
+ for cas in subcases["testcases"]:
+ target = '{}/projects/{}/cases'.format(TARGET_URL,
+ cas['project_name'])
+ post(target, cas)
+ add_case("functest", "tempest_custom")
+
+
+def add_pod(name, mode):
+ """
+ Add the Pods.
+ """
+ data = {
+ "role": "",
+ "name": name,
+ "details": '',
+ "mode": mode,
+ "creation_date": "2017-2-23 11:23:03.765581"
+ }
+ pod_url = '{}/pods'.format(TARGET_URL)
+ post(pod_url, data)
+
+
+def add_case(projectname, casename):
+ """
+ Add a testcase
+ """
+ data = {
+ "project_name": projectname,
+ "name": casename,
+ }
+ case_url = '{}/projects/{}/cases'.format(TARGET_URL, projectname)
+ post(case_url, data)
+
+
+if __name__ == '__main__':
+ pod()
+ project()
+ cases()
diff --git a/tools/docker/results/resultsdb/pods.json b/tools/docker/results/resultsdb/pods.json
new file mode 100644
index 00000000..3cd1dadb
--- /dev/null
+++ b/tools/docker/results/resultsdb/pods.json
@@ -0,0 +1,382 @@
+[
+ {
+ "name": "lf-pod2",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617f98e514bc5355b51f6b5",
+ "details": ""
+ },
+ {
+ "name": "lf-pod1",
+ "creation_date": "2015-01-01 08:00:00.476549",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "5617fa5a514bc5355b51f6b6",
+ "details": ""
+ },
+ {
+ "name": "orange-pod2",
+ "creation_date": "2015-10-27 15:27:30.312012",
+ "role": "",
+ "mode": "metal",
+ "_id": "562f97e2514bc5174d053d38",
+ "details": "https://wiki.opnfv.org/opnfv-orange"
+ },
+ {
+ "name": "unknown-pod",
+ "creation_date": "2015-11-30 08:55:02.550465",
+ "role": "",
+ "mode": "undefined",
+ "_id": "565c0ee6514bc5087f2ddcf7",
+ "details": null
+ },
+ {
+ "name": "huawei-pod1",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea58514bc5068a345d4b",
+ "details": ""
+ },
+ {
+ "name": "intel-pod5",
+ "creation_date": "2015-12-15 10:24:53.476549",
+ "role": "",
+ "mode": "metal",
+ "_id": "566fea75514bc5068a345d4c",
+ "details": null
+ },
+ {
+ "name": "intel-pod3",
+ "creation_date": "2015-12-21 17:38:31.435593",
+ "role": "",
+ "mode": "metal",
+ "_id": "56783917514bc5068a345d97",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod1",
+ "creation_date": "2015-12-22 07:21:03.765581",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9df514bc5068a345d98",
+ "details": null
+ },
+ {
+ "name": "ericsson-pod2",
+ "creation_date": "2015-12-22 07:21:18.173966",
+ "role": "",
+ "mode": "metal",
+ "_id": "5678f9ee514bc5068a345d99",
+ "details": null
+ },
+ {
+ "name": "dell-us-testing-bm-1",
+ "creation_date": "2016-01-08 12:41:54.097114",
+ "role": "",
+ "mode": "metal",
+ "_id": "568fae92514bc5068a60e7d2",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-3",
+ "creation_date": "2016-01-08 14:13:16.740415",
+ "role": "",
+ "mode": null,
+ "_id": "568fc3fc514bc5068a60e7d4",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm-2",
+ "creation_date": "2016-01-08 14:15:54.037500",
+ "role": "",
+ "mode": null,
+ "_id": "568fc49a514bc5068a60e7d5",
+ "details": null
+ },
+ {
+ "name": "dell-us-deploying-bm3",
+ "creation_date": "2016-01-15 12:14:20.956198",
+ "role": "",
+ "mode": "metal",
+ "_id": "5698e29c514bc56e65a47bc8",
+ "details": null
+ },
+ {
+ "name": "intel-pod6",
+ "creation_date": "2016-01-22 13:32:18.767326",
+ "role": "",
+ "mode": "metal",
+ "_id": "56a22f62514bc541f885b2c0",
+ "details": null
+ },
+ {
+ "name": "huawei-virtual2",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9d7ac851d7e6a0f74930d",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual1",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56a9f411851d7e6a0f749313",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual3",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67ba6851d7e4b188676bc",
+ "details": ""
+ },
+ {
+ "name": "huawei-virtual4",
+ "creation_date": "",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67bb6851d7e4b188676bd",
+ "details": ""
+ },
+ {
+ "name": "intel-pod8",
+ "creation_date": "2016-03-14 08:52:47.576623",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bdf851d7e4b188676be",
+ "details": null
+ },
+ {
+ "name": "intel-pod7",
+ "creation_date": "2016-03-14 08:53:00.757525",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67bec851d7e4b188676c0",
+ "details": null
+ },
+ {
+ "name": "huawei-pod2",
+ "creation_date": "",
+ "role": "",
+ "mode": "metal",
+ "_id": "56e67c35851d7e4b188676c1",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual1",
+ "creation_date": "2016-03-14 08:58:06.432105",
+ "role": "",
+ "mode": "virtual",
+ "_id": "56e67d1e851d7e4b188676c2",
+ "details": null
+ },
+ {
+ "name": "arm-pod1",
+ "creation_date": "2016-05-05 09:18:54.879497",
+ "role": "",
+ "mode": "metal",
+ "_id": "572b0ffe9377c51472b7878f",
+ "details": null
+ },
+ {
+ "name": "zte-pod1",
+ "creation_date": "2016-05-12 03:36:56.091397",
+ "role": "",
+ "mode": "metal",
+ "_id": "5733fa589377c548e8df3834",
+ "details": null
+ },
+ {
+ "name": "intel-virtual1",
+ "creation_date": "2016-08-23 17:22:30.901081",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86561d2c6e000ab19d93",
+ "details": null
+ },
+ {
+ "name": "intel-virtual2",
+ "creation_date": "2016-08-23 17:24:23.143681",
+ "role": null,
+ "mode": "virtual",
+ "_id": "57bc86c71d2c6e000ab19d94",
+ "details": null
+ },
+ {
+ "name": "zte-pod2",
+ "creation_date": "2016-09-06 09:49:20.228736",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91201d2c6e000ab1c261",
+ "details": ""
+ },
+ {
+ "name": "zte-pod3",
+ "creation_date": "2016-09-06 09:49:26.019816",
+ "role": "",
+ "mode": "metal",
+ "_id": "57ce91261d2c6e000ab1c263",
+ "details": ""
+ },
+ {
+ "name": "arm-pod3",
+ "creation_date": "2016-09-12 09:47:50.791351",
+ "role": "",
+ "mode": "metal",
+ "_id": "57d679c61d2c6e000ab1d6bd",
+ "details": "ARM POD3"
+ },
+ {
+ "name": "cisco-pod1",
+ "creation_date": "2016-09-13 13:01:21.906958",
+ "role": "Community lab",
+ "mode": "metal",
+ "_id": "57d7f8a11d2c6e000ab1db88",
+ "details": "not yet declared in CI but needed to validate vpp scenarios for Colorado"
+ },
+ {
+ "name": "ool-virtual1",
+ "creation_date": "2016-09-19 12:43:50.313032",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57dfdd861d2c6e000ab1f37b",
+ "details": "Okinawa lab"
+ },
+ {
+ "name": "ericsson-pod3",
+ "creation_date": "2016-09-26 09:45:40.565795",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee441d2c6e000ab20fa9",
+ "details": ""
+ },
+ {
+ "name": "ericsson-pod4",
+ "creation_date": "2016-09-26 09:45:48.980198",
+ "role": "",
+ "mode": "metal",
+ "_id": "57e8ee4c1d2c6e000ab20faa",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual2",
+ "creation_date": "2016-09-26 09:46:05.508776",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee5d1d2c6e000ab20fac",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual3",
+ "creation_date": "2016-09-26 09:46:10.244443",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee621d2c6e000ab20fad",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual4",
+ "creation_date": "2016-09-26 09:46:14.734383",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee661d2c6e000ab20fae",
+ "details": ""
+ },
+ {
+ "name": "ericsson-virtual5",
+ "creation_date": "2016-09-26 09:46:19.477110",
+ "role": "",
+ "mode": "virtual",
+ "_id": "57e8ee6b1d2c6e000ab20faf",
+ "details": ""
+ },
+ {
+ "name": "intel-pod9",
+ "creation_date": "2016-11-23 14:07:35.963037",
+ "role": "",
+ "mode": "metal",
+ "_id": "5835a2a71d2c6e000ab2bb4b",
+ "details": "https://wiki.opnfv.org/display/pharos/Intel+Pod9"
+ },
+ {
+ "name": "huawei-pod3",
+ "creation_date": "2017-01-17 13:36:03.908341",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dc38cf551000c780eda",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod4",
+ "creation_date": "2017-01-17 13:36:10.759860",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dca8cf551000c780edb",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod5",
+ "creation_date": "2017-01-17 13:36:15.447849",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587e1dcf8cf551000c780edc",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod6",
+ "creation_date": "2017-01-18 10:53:10.586724",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f49168cf551000c780f5e",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod7",
+ "creation_date": "2017-01-18 10:53:15.373953",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "587f491b8cf551000c780f5f",
+ "details": ""
+ },
+ {
+ "name": "huawei-pod12",
+ "creation_date": "2017-02-09 07:22:46.425836",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "589c18c68cf551000c7820e8",
+ "details": ""
+ },
+ {
+ "name": "intel-pod12",
+ "creation_date": "2017-05-17 14:11:18.852731",
+ "role": "production-ci",
+ "details": "performance",
+ "query": "<function query at 0x7f574c29c500>",
+ "mode": "metal",
+ "_id": "591c5a06ee2e3f000a50f0b4",
+ "miss_fields": [
+ "name"
+ ]
+ },
+ {
+ "name": "cisco-vina-pod10",
+ "creation_date": "2017-05-29 09:13:20.818497",
+ "role": "production-ci",
+ "mode": "metal",
+ "_id": "592be63078a2ad000ae6aad7",
+ "details": ""
+ },
+ {
+ "name": "zte-virtual1",
+ "creation_date": "2017-05-30 14:11:04.264967",
+ "role": "",
+ "mode": "baremetal",
+ "_id": "592d7d7878a2ad000ae6ac49",
+ "details": ""
+ }
+] \ No newline at end of file
diff --git a/tools/docker/results/resultsdb/projects.json b/tools/docker/results/resultsdb/projects.json
new file mode 100644
index 00000000..81c3d77f
--- /dev/null
+++ b/tools/docker/results/resultsdb/projects.json
@@ -0,0 +1,8 @@
+[
+ {
+ "_id": "5641e12d514bc5174df3d77e",
+ "description": "OPNFV vsperf project",
+ "name": "vsperf",
+ "creation_date": "2015-11-10 12:21:01.464979"
+ }
+]
diff --git a/tools/docker/testcontrol/auto/controller/Dockerfile b/tools/docker/testcontrol/auto/controller/Dockerfile
new file mode 100644
index 00000000..4fbf7294
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/Dockerfile
@@ -0,0 +1,23 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip && apt-get -y install openssh-server
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50052
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
+
diff --git a/tools/docker/testcontrol/auto/controller/list.env b/tools/docker/testcontrol/auto/controller/list.env
new file mode 100644
index 00000000..2883021b
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/list.env
@@ -0,0 +1,13 @@
+DUT_IP_ADDRESS=10.10.120.24
+DUT_USERNAME=opnfv
+DUT_PASSWORD=opnfv
+
+TGEN_IP_ADDRESS=10.10.120.25
+
+VSPERF_TESTS=phy2phy_tput,pvp_tput
+VSPERF_CONFFILE=vsperf.conf
+
+VSPERF_TRAFFICGEN_MODE=NO
+
+CLEAN_UP=NO
+
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/__init__.py b/tools/docker/testcontrol/auto/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf b/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf
new file mode 100644
index 00000000..50d40f49
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/vsperf.conf
@@ -0,0 +1,21 @@
+VSWITCH_BRIDGE_NAME = 'vsperf-br0'
+WHITELIST_NICS = ['02:00.0', '02:00.1']
+TRAFFICGEN = 'Trex'
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.37/scripts/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_DURATION=1
+TRAFFICGEN_LOSSRATE=0
+TRAFFICGEN_RFC2544_TESTS=10
+#TRAFFICGEN_PKT_SIZES=(64,128,256,512,1024,1280,1518)
+TRAFFICGEN_PKT_SIZES=(64,)
+GUEST_TESTPMD_FWD_MODE = ['io']
+GUEST_IMAGE = ['/home/opnfv/vnfs/vloop-vnf-ubuntu-18.04_20180920.qcow2']
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = True
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
+
diff --git a/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py b/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..1b088fea
--- /dev/null
+++ b/tools/docker/testcontrol/auto/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,469 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF-controller
+"""
+
+# Fetching Environment Variable for controller, You can configure or
+# modifies list.env file for setting your environment variable.
+
+#pylint: disable=global-statement,no-else-continue
+#pylint: disable=too-many-branches
+
+import os
+import time
+import math
+import ast
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+TIMER = float()
+
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+
+TGEN_IP = os.getenv('TGEN_IP_ADDRESS')
+
+VSPERF_TEST = os.getenv('VSPERF_TESTS')
+VSPERF_CONF = os.getenv('VSPERF_CONFFILE')
+VSPERF_TRAFFICGEN_MODE = str(os.getenv('VSPERF_TRAFFICGEN_MODE'))
+
+START_COLLECTD = os.getenv('START_COLLECTD')
+START_BEATS = os.getenv('START_BEATS')
+CLEAN_UP = os.getenv('CLEAN_UP')
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+SANITY_CHECK_DONE_LIST = list()
+
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT Successfully Connected ..............................................[OK] \n ")
+
+def upload_test_config_file():
+ """
+ #Upload Test Config File on DUT
+ """
+ localpath = '/usr/src/app/vsperf/vsperf.conf'
+ if not os.path.exists(localpath):
+ print("VSPERF Test config File does not exists.......................[Failed]")
+ return
+ remotepath = '~/vsperf.conf'
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f {}".format(remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ check_test_config_cmd_1= "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result_1= str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result_1:
+ print(
+ "Test Configuration File Uploaded on DUT-Host.............................[OK] \n ")
+ else:
+ print("VSPERF Test config file upload failed.....................................[Critical]")
+
+def start_beats():
+ """
+ Start fileBeats on DUT
+ """
+ run_cmd = "echo '{}' | sudo -S service filebeat start".format(DUT_PWD)
+ DUT_CLIENT.run(run_cmd, pty=True)
+ print(
+ "Beats are started on DUT-Host............................................[OK] \n")
+
+def start_collectd():
+ """
+ start the collectd
+ """
+ run_cmd = "echo '{}' | sudo -S service collectd start".format(DUT_PWD)
+ DUT_CLIENT.run(run_cmd, pty=True)
+ print(
+ "Collectd is started on DUT-Host............................................[OK] \n")
+
+def run_vsperf_test():
+ """
+ Here we will perform the actual vsperf test
+ """
+ global TIMER
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(DUT_PWD)
+ DUT_CLIENT.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf && "
+ cmd += "./vsperf "
+ if VSPERF_CONF:
+ cmd += "--conf-file ~/vsperf.conf "
+ if "yes" in VSPERF_TRAFFICGEN_MODE.lower():
+ cmd += "--mode trafficgen"
+ vsperf_test_list = VSPERF_TEST.split(",")
+ print(vsperf_test_list)
+ for test in vsperf_test_list:
+ atest = cmd
+ atest += test
+ DUT_CLIENT.run(atest, pty=True)
+ print(
+ "Test Successfully running................................................[OK]\n ")
+
+
+def test_status():
+ """
+ Chechk for the test status after performing test
+ """
+ testtype_list = VSPERF_TEST.split(",")
+ num_test = len(testtype_list)
+ test_success = []
+ test_failed = []
+ testtype_list_len = len(testtype_list)
+ for test in testtype_list:
+ passed_minutes = 5
+ latest_result_cmd = "find /tmp -mindepth 1 -type d -cmin -{} -printf '%f'".format(
+ passed_minutes)
+ test_result_dir = str(
+ (DUT_CLIENT.execute(latest_result_cmd)[1]).split('find')[0])
+ test_date_cmd = "date +%F"
+ test_date = str(DUT_CLIENT.execute(test_date_cmd)[1]).replace("\n", "")
+ if test_date in test_result_dir:
+ testcase_check_cmd = "cd /tmp && cd `ls -t | grep results | head"
+ testcase_check_cmd += " -{} | tail -1` && find . -maxdepth 1 -name '*{}*'".\
+ format(testtype_list_len, test)
+ testcase_check_output = str(
+ DUT_CLIENT.execute(testcase_check_cmd)[1]).split('\n', 2)
+ check = 0
+ for i in testcase_check_output:
+ if (".csv" in i) or (".md" in i) or (".rst" in i):
+ check += 1
+ if check == 3:
+ test_success.append(test)
+ else:
+ test_failed.append(test)
+ testtype_list_len -= 1
+ if num_test == len(test_success):
+ print("All Test Successfully Completed on DUT-Host Results... [OK]")
+ elif not test_success:
+ print("All Test Failed on DUT-Host \nResults... [Failed]")
+ else:
+ print(
+ "Only {} Test failed Results ... [Failed]\n"\
+ "All other Test Successfully Completed on DUT-Host Results... [OK] ".\
+ format(test_failed))
+
+
+def vsperf_remove():
+ """
+ Actual removal of the VSPERF
+ """
+ vsperf_rm_cmd = "echo '{}' | sudo -S rm -r ~/vswitchperf".format(DUT_PWD)
+ DUT_CLIENT.run(vsperf_rm_cmd)
+ vsperfenv_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/vsperfenv".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(vsperfenv_rm_cmd)
+
+
+def remove_uploaded_config():
+ """
+ Remove all the uploaded configuration files
+ """
+ vconfig_rm_cmd = "rm ~/vsperf.conf"
+ DUT_CLIENT.run(vconfig_rm_cmd)
+ cdconfig_rm_cmd = "echo '{}' | sudo -S rm /opt/collectd/etc/collectd.conf".\
+ format(DUT_PWD)
+ DUT_CLIENT.run(cdconfig_rm_cmd)
+
+
+def result_folders_remove():
+ """
+ Remove result folder on DUT
+ """
+ remove_cmd = "rm -r /tmp/*results*"
+ DUT_CLIENT.run(remove_cmd)
+
+
+def collectd_remove():
+ """
+ Remove collectd from DUT
+ """
+ collectd_dwn_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/collectd".format(
+ DUT_PWD)
+ DUT_CLIENT.run(collectd_dwn_rm_cmd)
+ collectd_rm_cmd = "echo '{}' | sudo -S rm -r -f /opt/collectd".format(
+ DUT_PWD)
+ DUT_CLIENT.run(collectd_rm_cmd)
+
+
+def terminate_vsperf():
+ """
+ Terminate the VSPERF and kill processes
+ """
+ stress_kill_cmd = "echo '{}' | sudo -S pkill stress &> /dev/null".format(
+ DUT_PWD)
+ python3_kill_cmd = "echo '{}' | sudo -S pkill python3 &> /dev/null".format(
+ DUT_PWD)
+ qemu_kill_cmd = "echo '{}' | sudo -S killall -9 qemu-system-x86_64 &> /dev/null".format(
+ DUT_PWD)
+ DUT_CLIENT.run(stress_kill_cmd)
+ DUT_CLIENT.run(python3_kill_cmd)
+ DUT_CLIENT.run(qemu_kill_cmd)
+
+ # sometimes qemu resists to terminate, so wait a bit and kill it again
+ qemu_check_cmd = "pgrep qemu-system-x86_64"
+ qemu_cmd_response = DUT_CLIENT.execute(qemu_check_cmd)[1]
+
+ if qemu_cmd_response != '':
+ time.sleep(5)
+ DUT_CLIENT.run(qemu_kill_cmd)
+ time.sleep(5)
+
+ ovs_kill_cmd = "echo '{}' | sudo pkill ovs-vswitchd &> /dev/null".format(
+ DUT_PWD)
+ ovsdb_kill_cmd = "echo '{}' | sudo pkill ovsdb-server &> /dev/null".format(
+ DUT_PWD)
+ vppctl_kill_cmd = "echo '{}' | sudo pkill vppctl &> /dev/null".format(
+ DUT_PWD)
+ vpp_kill_cmd = "echo '{}' | sudo pkill vpp &> /dev/null".format(DUT_PWD)
+ vpp_cmd = "echo '{}' | sudo pkill -9 vpp &> /dev/null".format(DUT_PWD)
+
+ DUT_CLIENT.run(ovs_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(ovsdb_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vppctl_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vpp_kill_cmd)
+ time.sleep(1)
+ DUT_CLIENT.run(vpp_cmd)
+ time.sleep(1)
+
+ print(
+ "All the VSPERF related process terminated successfully..............[OK]")
+
+
+def sanity_collectd_check():
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ global SANITY_CHECK_DONE_LIST
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(DUT_CLIENT.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(
+ DUT_PWD)
+ DUT_CLIENT.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(
+ DUT_CLIENT.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ SANITY_CHECK_DONE_LIST.append(int(1))
+ print(
+ "Collectd is working Fine ................................................[OK] \n ")
+ else:
+ print(
+ "Collectd Fail to Start, Install correctly before running Test....[Failed]\n ")
+ else:
+ print(
+ "Collectd is not installed yet........................................[Failed]\n")
+
+
+def sanity_vnf_path():
+ """
+ Check if VNF image is available on the configured path in Test Config File
+ """
+ # fetch the VNF path we placed in vsperf.conf file
+ global SANITY_CHECK_DONE_LIST
+ vsperf_conf_path = open('/usr/src/app/vsperf/vsperf.conf')
+ vsperf_conf_read = vsperf_conf_path.readlines()
+ for i in vsperf_conf_read:
+ if 'GUEST_IMAGE' in i:
+ vnf_image_path = i.split("'")[1]
+ vnf_path_check_cmd = "find {}".format(vnf_image_path)
+ vnf_path_check_result = str(
+ DUT_CLIENT.execute(vnf_path_check_cmd)[1])
+ if vnf_image_path in vnf_path_check_result:
+ SANITY_CHECK_DONE_LIST.append(int(2))
+ print(
+ "Test Configratuion file has Correct VNF path information on DUT-Host.." \
+ "...[OK]\n ")
+ else:
+ print(
+ "Test Configuration file has incorrect VNF path information......" \
+ "....[FAILED]\n")
+
+def sanity_vsperf_check():
+ """
+ We have to make sure that VSPERF is installed correctly
+ """
+ global SANITY_CHECK_DONE_LIST
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && ./vsperf --help"
+ vsperf_check_cmd_result = str(DUT_CLIENT.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ SANITY_CHECK_DONE_LIST.append(int(3))
+ print("VSPERF Installed Correctly and Working fine......................." \
+ ".......[OK]\n")
+ else:
+ print(
+ "VSPERF DID Not Installed Correctly , INSTALL IT AGAIN...........[Critical]\n")
+ else:
+ print(
+ "VSPERF DID Not Installed Correctly , INSTALL IT AGAIN................[Critical]\n")
+ break
+
+def variable_from_test_config(aparameter):
+ """This function can be use to read any configuration paramter from vsperf.conf"""
+ read_cmd = 'cat ~/vsperf.conf | grep "{}"'.format(aparameter)
+ read_cmd_output = str(DUT_CLIENT.execute(read_cmd)[1])
+ print(read_cmd_output)
+ if not read_cmd_output or '#' in read_cmd_output:
+ return 0
+ return read_cmd_output.split("=")[1].strip()
+
+def cpumask2coreids(mask):
+ """conver mask to coreids"""
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i < intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1]-1))
+ i = i << 1
+ return coreids
+
+def sanity_cpu_allocation_check():
+ """It will check the cpu allocation before run test"""
+ global SANITY_CHECK_DONE_LIST
+ read_setting_cmd = "source vsperfenv/bin/activate ; cd vswitchperf* && "
+ read_setting_cmd += './vsperf --list-settings'
+ default_vsperf_settings = ast.literal_eval(str(DUT_CLIENT.execute(read_setting_cmd)[1]))
+ default_cpu_map = default_vsperf_settings["VSWITCH_VHOST_CPU_MAP"]
+ default_vswitch_pmd_cpu_mask = str(default_vsperf_settings["VSWITCH_PMD_CPU_MASK"])
+ default_vswitch_vhost_cpu_map = [str(x) for x in default_cpu_map]
+ vswitch_pmd_cpu_mask = variable_from_test_config("VSWITCH_PMD_CPU_MASK")
+ vswitch_cpu_map = (variable_from_test_config("VSWITCH_VHOST_CPU_MAP"))
+ vswitch_vhost_cpu_map = 0
+ if vswitch_cpu_map != 0:
+ vswitch_vhost_cpu_map = [str(x) for x in ast.literal_eval(vswitch_cpu_map)]
+
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map == 0:
+ print("CPU allocation Check Done,"\
+ "\nNo vswitch_pmd_cpu_mask or vswitch_vhost_cpu_map assign in test config file\n" \
+ "Using Default Settings ..................................................[OK]\n")
+ elif vswitch_pmd_cpu_mask != 0 and vswitch_vhost_cpu_map == 0:
+ core_id = cpumask2coreids(vswitch_pmd_cpu_mask)
+ print(core_id)
+ if len(default_vswitch_vhost_cpu_map) >= len(core_id):
+ if all(elem in default_vswitch_vhost_cpu_map for elem in core_id):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ elif vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map != 0:
+ core_id_1 = cpumask2coreids(default_vswitch_pmd_cpu_mask)
+ print(core_id_1)
+ if len(vswitch_vhost_cpu_map) >= len(core_id_1):
+ if all(elem in vswitch_vhost_cpu_map for elem in core_id_1):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ core_id_2 = cpumask2coreids(vswitch_pmd_cpu_mask)
+ print(core_id_2)
+ if len(vswitch_vhost_cpu_map) >= len(core_id_2):
+ if all(elem in vswitch_vhost_cpu_map for elem in core_id_2):
+ print("CPU allocation properly done on DUT-Host.................[OK]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+ else:
+ print("CPU allocation not done properly on DUT-Host............[Failed]\n")
+
+
+
+def sanity_dut_conn_tgen_check():
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ global SANITY_CHECK_DONE_LIST
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(TGEN_IP)
+ tgen_connectivity_check_result = int(DUT_CLIENT.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ SANITY_CHECK_DONE_LIST.append(int(5))
+ print(
+ "DUT-Host is successfully reachable to Traffic Generator Host.............[OK]\n")
+ else:
+ print(
+ "DUT-host is unsuccessful to reach the Traffic Generator Host.............[Failed]")
+ print(
+ "Make sure to establish connection before running Test...............[Critical]\n")
+
+if DUT_IP:
+ host_connect()
+if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ sys.exit()
+else:
+ upload_test_config_file()
+ sanity_vnf_path()
+ sanity_cpu_allocation_check()
+ sanity_collectd_check()
+ sanity_vsperf_check()
+ sanity_dut_conn_tgen_check()
+ if "yes" in START_COLLECTD.lower():
+ start_collectd()
+ if "yes" in START_BEATS.lower():
+ start_beats()
+
+if 'v' in VSPERF_TEST:
+ if len(SANITY_CHECK_DONE_LIST) != 4:
+ print("Certain Sanity Checks Failed\n" \
+ "You can make changes based on the outputs and run" \
+ "the testcontrol auto container again")
+ else:
+ run_vsperf_test()
+ test_status()
+else:
+ if len(SANITY_CHECK_DONE_LIST) != 3:
+ print("Certain Sanity Checks Failed\n" \
+ "You can make changes based on the outputs and run" \
+ "the testcontrol auto container again")
+ else:
+ run_vsperf_test()
+ test_status()
+
+
+if "yes" in CLEAN_UP.lower():
+ vsperf_remove()
+ remove_uploaded_config()
+ result_folders_remove()
+ collectd_remove()
+ terminate_vsperf()
diff --git a/tools/docker/testcontrol/auto/docker-compose.yml b/tools/docker/testcontrol/auto/docker-compose.yml
new file mode 100644
index 00000000..50c528a6
--- /dev/null
+++ b/tools/docker/testcontrol/auto/docker-compose.yml
@@ -0,0 +1,22 @@
+version: '2'
+
+services:
+ testcontrol:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ env_file:
+ - ./controller/list.env
+ ports:
+ - 50052
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/testcontrol/interactive/controller/Dockerfile b/tools/docker/testcontrol/interactive/controller/Dockerfile
new file mode 100644
index 00000000..16cf59fd
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/Dockerfile
@@ -0,0 +1,22 @@
+FROM python:3.6
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ENV GRPC_PYTHON_VERSION 1.4.0
+RUN apt-get update && apt-get -y install python3-pip
+RUN pip3 install grpcio==${GRPC_PYTHON_VERSION} grpcio-tools==${GRPC_PYTHON_VERSION}
+RUN pip3 install paramiko
+RUN pip3 install chainmap
+RUN pip3 install oslo.utils
+RUN pip3 install scp
+
+WORKDIR /usr/src/app
+
+COPY ./vsperf ./vsperf
+
+VOLUME ["/usr/src/app/vsperf"]
+
+EXPOSE 50052
+
+CMD ["python3", "./vsperf/vsperf_controller.py"]
+
+#CMD tail -f /dev/null
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py b/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py
new file mode 100644
index 00000000..ad0ebec3
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/__init__.py
@@ -0,0 +1 @@
+#### Empty
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/output.txt b/tools/docker/testcontrol/interactive/controller/vsperf/output.txt
new file mode 100644
index 00000000..912c877b
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/output.txt
@@ -0,0 +1 @@
+[INFO ] 2019-08-27 18:09:46,085 : (root) - Overall test report written to "/tmp/results_2019-08-27_18-08-53/OvsDpdkVhost_test_report.rst"
diff --git a/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py b/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py
new file mode 100644
index 00000000..d1c3838d
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/controller/vsperf/vsperf_controller.py
@@ -0,0 +1,706 @@
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=R0904
+# pylint: disable=R0902
+# twenty-two is reasonable in this script
+
+"""
+VSPER docker-controller.
+"""
+
+import io
+import time
+import ast
+import math
+
+from concurrent import futures
+
+import grpc
+from proto import vsperf_pb2
+from proto import vsperf_pb2_grpc
+from utils import ssh
+
+_ONE_DAY_IN_SECONDS = 60 * 60 * 24
+
+
+# pylint: disable=too-few-public-methods,no-self-use
+class PseudoFile(io.RawIOBase):
+ """
+ Handle ssh command output.
+ """
+
+ def write(self, chunk):
+ """
+ Write to file
+ """
+ if "error" in chunk:
+ return
+ with open("./output.txt", "w") as fref:
+ fref.write(chunk)
+
+
+class VsperfController(vsperf_pb2_grpc.ControllerServicer):
+ """
+ Main Controller Class
+ """
+
+ def __init__(self):
+ """
+ Initialization
+ """
+ self.client = None
+ self.dut_check = None
+ self.dut = None
+ self.user = None
+ self.pwd = None
+ self.vsperf_conf = None
+ self.tgen_client = None
+ self.tgen_check = None
+ self.tgen = None
+ self.tgen_user = None
+ self.tgenpwd = None
+ self.tgen_conf = None
+ self.scenario = None
+ self.testcase = None
+ self.tgen_ip_address = None
+ self.testtype = None
+ self.trex_conf = None
+ self.trex_params = None
+ self.conffile = None
+ self.tests_run_check = None
+ self.tgen_start_check = None
+ # Default TGen is T-Rex
+ self.trex_conffile = "trex_cfg.yml"
+ self.collectd_conffile = "collectd.conf"
+ self.test_upload_check = 0
+ self.sanity_check_done_list = list()
+
+ def setup(self):
+ """
+ Performs Setup of the client.
+ """
+ # Just connect to VM.
+ self.client = ssh.SSH(host=self.dut, user=self.user,
+ password=self.pwd)
+ self.client.wait()
+
+ def upload_config(self):
+ """
+ Perform file upload.
+ """
+ # self.client._put_file_shell(self.conffile, '~/vsperf.conf')
+ self.client.put_file(self.conffile, '~/{}'.format(self.conffile))
+ print("No")
+
+ def run_test(self):
+ """
+ Run test
+ """
+ # Sometimes hugepage store in /mnt/huge in order to free up the
+ # hugepage removing this stored hugepage is necessory
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(self.pwd)
+ self.client.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf* && "
+ cmd += "./vsperf "
+ if self.vsperf_conf:
+ cmd += "--conf-file ~/{} ".format(self.conffile)
+ # cmd += self.conffile
+ cmd += self.scenario
+ with PseudoFile() as pref:
+ self.client.run(cmd, stdout=pref, pty=True, timeout=0)
+
+ def TestStatus(self, request, context):
+ """
+ Chechk for the test status after performing test
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tests_run_check != 1:
+ return vsperf_pb2.StatusReply(message="No test have ran yet. [!]")
+ testtype_list = request.testtype.split(",")
+ test_success = []
+ test_failed = []
+ testtype_list_len = len(testtype_list)
+ for test in testtype_list:
+ #latest_result_cmd = "find /tmp -mindepth 1 -type d -cmin -5 -printf '%f'"
+ test_result_dir = str((self.client.\
+ execute("find /tmp -mindepth 1 -type d -cmin -5 -printf '%f'")[1]).\
+ split('find')[0])
+ #test_date_cmd = "date +%F"
+ test_date = str(self.client.execute("date +%F")[1]).replace("\n", "")
+ if test_date in test_result_dir:
+ testcase_check_cmd = "cd /tmp && cd `ls -t | grep results | head -{} | tail -1`".\
+ format(testtype_list_len)
+ testcase_check_cmd += " && find . -maxdepth 1 -name '*{}*'".\
+ format(test)
+ testcase_check_output = str(self.client.execute(testcase_check_cmd)[1]).\
+ split('\n', 2)
+ check = 0
+ for i in testcase_check_output:
+ if (".csv" in i) or (".md" in i) or (".rst" in i):
+ check += 1
+ if check == 3:
+ test_success.append(test)
+ else:
+ test_failed.append(test)
+ testtype_list_len -= 1
+ if len(testtype_list) == len(test_success):
+ return vsperf_pb2.StatusReply(message="All Test Successfully Completed on DUT-Host" \
+ "\nResults... [OK]")
+ if not test_success:
+ return vsperf_pb2.StatusReply(
+ message="All Test Failed on DUT-Host \nResults... [Failed]")
+ return vsperf_pb2.StatusReply(message="Only {} Test failed Results ... [Failed]\n"\
+ "All other Test Successfully Completed on DUT-Host Results... [OK] ".\
+ format(test_failed))
+
+ def HostConnect(self, request, context):
+ """
+ Handle host connectivity command from client
+ """
+ self.dut = request.ip
+ self.user = request.uname
+ self.pwd = request.pwd
+ self.setup()
+ check_cmd = "ls -l"
+ self.dut_check = int(self.client.execute(check_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def save_chunks_to_file(self, chunks, filename):
+ """
+ Write the output to file
+ """
+ with open(filename, 'w+') as fref:
+ fref.write(chunks)
+
+ def UploadConfigFile(self, request, context):
+ """
+ Handle upload config-file command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ chunks = request.Content
+ filename = request.Filename
+ self.conffile = filename
+ self.save_chunks_to_file(chunks, filename)
+ # This is chechking if vsperf.conf already exist first remove that and
+ # then upload the new file.
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name {}".format(filename)
+ check_test_result = str(self.client.execute(check_test_config_cmd)[1])
+ if "{}".format(filename) in check_test_result:
+ self.client.run("rm -f {}".format(filename))
+ self.upload_config()
+ self.test_upload_check = 1
+ print("Hello")
+ return vsperf_pb2.UploadStatus(Message="Successfully Uploaded", Code=1)
+
+ def StartTest(self, request, context):
+ """
+ Handle start-test command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ sanity_dict = {1:"Check installed VSPERF",
+ 2:"Check Test Config's VNF path is available on DUT-Host",
+ 3:"Check NIC PCIs is available on Traffic Generator",
+ 4:"Check CPU allocation on DUT-Host",
+ 5:"Check installed Collectd",
+ 6:"Check Connection between DUT-Host and Traffic Generator Host"}
+ sanity_dict_option_list = list(sanity_dict.keys())
+ remaining_sanity = [item for item in sanity_dict_option_list if item not in \
+ self.sanity_check_done_list]
+ if remaining_sanity:
+ sanity_return_msg = ""
+ for i_sanity in remaining_sanity:
+ sanity_return_msg += sanity_dict[i_sanity] + "\n"
+ return vsperf_pb2.StatusReply(message="The following sanity checks are either not"\
+ " performed yet or Does not satisfy test requirements" \
+ "\n{}".format(sanity_return_msg))
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ if self.tgen_start_check != 1:
+ return vsperf_pb2.StatusReply(message="Traffic Generator has not started yet [!]")
+ self.vsperf_conf = request.conffile
+ self.testtype = request.testtype
+ testtype_list = self.testtype.split(",")
+ self.tests_run_check = 1
+ for test in testtype_list:
+ self.scenario = test
+ self.run_test()
+ return vsperf_pb2.StatusReply(message="Test Successfully Completed")
+
+###### Traffic Generator Related functions ####
+ def TGenHostConnect(self, request, context):
+ """
+ Connect to TGen-Node
+ """
+ self.tgen = request.ip
+ self.tgen_user = request.uname
+ self.tgenpwd = request.pwd
+ self.tgen_setup()
+ check_tgen_cmd = "ls"
+ self.tgen_check = int(self.tgen_client.execute(check_tgen_cmd)[0])
+ return vsperf_pb2.StatusReply(message="Successfully Connected")
+
+ def tgen_setup(self):
+ """
+ Setup the T-Gen Client
+ """
+ # Just connect to VM.
+ self.tgen_client = ssh.SSH(host=self.tgen, user=self.tgen_user,
+ password=self.tgenpwd)
+ self.tgen_client.wait()
+
+ def StartBeats(self, request, context):
+ """
+ Start fileBeats on DUT
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ run_cmd = "echo '{}' | sudo -S service filebeat start".format(self.pwd)
+ #run_cmd = "sudo service filebeat start"
+ self.client.run(run_cmd, pty=True)
+ return vsperf_pb2.StatusReply(message="Beats are started on DUT-Host")
+
+ def DUTvsperfTestAvailability(self, request, context):
+ """
+ Before running test we have to make sure there is no other test running
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_ava_cmd = "ps -ef | grep -v grep | grep ./vsperf | awk '{print $2}'"
+ vsperf_ava_result = len((self.client.execute(vsperf_ava_cmd)[1]).split("\n"))
+ if vsperf_ava_result == 1:
+ return vsperf_pb2.StatusReply(message="DUT-Host is available for performing" \
+ " VSPERF Test\nYou can perform Test!")
+ return vsperf_pb2.StatusReply(message="DUT-Host is busy right now, Wait for some time\n\
+ Always Check availability before Running Test!")
+
+
+###Clean-UP process related functions####
+
+
+ def vsperf_remove(self):
+ """
+ Actual removal of the VSPERF
+ """
+ vsperf_rm_cmd = "echo '{}' | sudo -S rm -r ~/vswitchperf".format(
+ self.pwd)
+ self.client.run(vsperf_rm_cmd, pty=True)
+ vsperfenv_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/vsperfenv".format(
+ self.pwd)
+ self.client.run(vsperfenv_rm_cmd, pty=True)
+
+ def remove_uploaded_config(self):
+ """
+ Remove all the uploaded test configuration file
+ """
+ vconfig_rm_cmd = "rm ~/{}".format(self.conffile)
+ self.client.run(vconfig_rm_cmd, pty=True)
+
+ def result_folder_remove(self):
+ """
+ Remove result folder on DUT
+ """
+ remove_cmd = "rm -r /tmp/*results*"
+ self.client.run(remove_cmd, pty=True)
+
+ def collectd_remove(self):
+ """
+ Remove collectd from DUT
+ """
+ collectd_dwn_rm_cmd = "echo '{}' | sudo -S rm -r -f ~/collectd".format(
+ self.pwd)
+ self.client.run(collectd_dwn_rm_cmd, pty=True)
+ collectd_rm_cmd = "echo '{}' | sudo -S rm -r -f /opt/collectd".format(
+ self.pwd)
+ self.client.run(collectd_rm_cmd, pty=True)
+
+ def RemoveVsperf(self, request, context):
+ """
+ Handle VSPERF removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.vsperf_remove()
+ return vsperf_pb2.StatusReply(message="Successfully VSPERF Removed")
+
+ def TerminateVsperf(self, request, context):
+ """
+ Terminate the VSPERF and kill processes
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ stress_kill_cmd = "pkill stress"
+ python3_kill_cmd = "pkill python3"
+ qemu_kill_cmd = "killall -9 qemu-system-x86_64"
+ self.client.send_command(stress_kill_cmd)
+ self.client.send_command(python3_kill_cmd)
+ self.client.send_command(qemu_kill_cmd)
+
+ # sometimes qemu resists to terminate, so wait a bit and kill it again
+ qemu_check_cmd = "pgrep qemu-system-x86_64"
+ qemu_cmd_response = self.client.execute(qemu_check_cmd)[1]
+
+ if qemu_cmd_response != '':
+ time.sleep(5)
+ self.client.send_command(qemu_kill_cmds)
+ time.sleep(5)
+
+ ovs_kill_cmd = "pkill ovs-vswitchd"
+ ovsdb_kill_cmd = "pkill ovsdb-server"
+ vppctl_kill_cmd = "pkill vppctl"
+ vpp_kill_cmd = "pkill vpp"
+ vpp_cmd = "pkill -9".format(self.pwd)
+
+ self.client.send_command(ovs_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(ovsdb_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vppctl_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vpp_kill_cmd)
+ time.sleep(1)
+ self.client.send_command(vpp_cmd)
+ time.sleep(1)
+
+ return vsperf_pb2.StatusReply(
+ message="All the VSPERF related process terminated successfully")
+
+ def RemoveResultFolder(self, request, context):
+ """
+ Handle result folder removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.result_folder_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully VSPERF Results Removed")
+
+ def RemoveUploadedConfig(self, request, context):
+ """
+ Handle all configuration file removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ self.remove_uploaded_config()
+ return vsperf_pb2.StatusReply(
+ message="Successfully All Uploaded Config Files Removed")
+
+ def RemoveCollectd(self, request, context):
+ """
+ Handle collectd removal command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.collectd_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully Collectd Removed From DUT-Host")
+
+ def RemoveEverything(self, request, context):
+ """
+ Handle of removing everything from DUT command from client
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ self.vsperf_remove()
+ self.result_folder_remove()
+ self.remove_uploaded_config()
+ self.collectd_remove()
+ return vsperf_pb2.StatusReply(
+ message="Successfully Everything Removed From DUT-Host")
+
+ def StartTGen(self, request, context):
+ """
+ Handle start-Tgen command from client
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ self.trex_params = request.params
+ run_cmd = "cd trex_2.37/scripts ; "
+ run_cmd += "./t-rex-64 "
+ run_cmd += self.trex_params
+ self.tgen_client.send_command(run_cmd)
+ self.tgen_start_check = 1
+ return vsperf_pb2.StatusReply(message="T-Rex Successfully running...")
+
+ def SanityCollectdCheck(self, request, context):
+ """
+ Check and verify collectd is able to run and start properly
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ check_collectd_cmd = "find /opt -maxdepth 1 -name 'collectd'"
+ check_test_result = str(self.client.execute(check_collectd_cmd)[1])
+ if "collectd" in check_test_result:
+ check_collectd_run_cmd = "echo {} | sudo -S service collectd start".format(self.pwd)
+ self.client.run(check_collectd_run_cmd, pty=True)
+ check_collectd_status_cmd = "ps aux | grep collectd"
+ check_collectd_status = str(self.client.execute(check_collectd_status_cmd)[1])
+ if "/sbin/collectd" in check_collectd_status:
+ self.sanity_check_done_list.append(int(5))
+ return vsperf_pb2.StatusReply(message="Collectd is working Fine")
+ return vsperf_pb2.StatusReply(message="Collectd Fail to Start, \
+ Install correctly before running Test")
+ return vsperf_pb2.StatusReply(message="Collectd is not installed yet.")
+
+ def SanityVNFpath(self, request, context):
+ """
+ Check if VNF image available on the mention path in Test Config File
+ """
+ # fetch the VNF path we placed in vsperf.conf file
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ vsperf_conf_path = 'cat ~/{} | grep "GUEST_IMAGE"'.format(self.conffile)
+ vsperf_conf_read = self.client.execute(vsperf_conf_path)[1]
+ vnf_image_path = vsperf_conf_read.split("'")[1]
+ vnf_path_check_cmd = "find {}".format(vnf_image_path)
+ vfn_path_check_result = str(self.client.execute(vnf_path_check_cmd)[1])
+ if vnf_image_path in vfn_path_check_result:
+ self.sanity_check_done_list.append(int(2))
+ return vsperf_pb2.StatusReply(message="Test Configratuion file has Correct "\
+ "VNF path information on DUT-Host.....[OK]")
+ return vsperf_pb2.StatusReply(message='Test Configuration file has wrongly placed VNF '\
+ 'path information \n'\
+ 'VNF is not available on DUT-Host................................[Failed]\n ')
+
+ def SanityVSPERFCheck(self, request, context):
+ """
+ We have to make sure that VSPERF install correctly
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ vsperf_check_command = "source ~/vsperfenv/bin/activate ; cd vswitchperf* && "
+ vsperf_check_command += "./vsperf --help"
+ vsperf_check_cmd_result = str(self.client.execute(vsperf_check_command)[1])
+ vsperf_verify_list = [
+ 'usage',
+ 'positional arguments',
+ 'optional arguments',
+ 'test selection options',
+ 'test behavior options']
+ for idx, i in enumerate(vsperf_verify_list, start=1):
+ if str(i) in vsperf_check_cmd_result:
+ if idx < 5:
+ continue
+ elif idx == 5:
+ self.sanity_check_done_list.append(int(1))
+ return vsperf_pb2.StatusReply(
+ message="VSPERF Installed Correctly and Working fine")
+ return vsperf_pb2.StatusReply(message="VSPERF Does Not Installed Correctly ," \
+ "INSTALL IT AGAIN..............[Critical]")
+ return vsperf_pb2.StatusReply(message="VSPERF Does Not Installed Correctly ," \
+ "INSTALL IT AGAIN..............[Critical]")
+
+ def SanityNICCheck(self, request, context):
+ """
+ Check either NIC PCI ids are Correctly placed or not
+ """
+ if self.tgen_check != 0:
+ return vsperf_pb2.StatusReply(message="TGen-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " TGen-Host.")
+ trex_conf_path = "cat /etc/trex_cfg.yaml | grep interfaces"
+ trex_conf_read = self.tgen_client.execute(trex_conf_path)[1]
+ nic_pid_ids_list = [trex_conf_read.split("\"")[1], trex_conf_read.split("\"")[3]]
+ trex_nic_pic_id_cmd = "lspci | egrep -i --color 'network|ethernet'"
+ trex_nic_pic_id = str(self.tgen_client.execute(trex_nic_pic_id_cmd)[1]).split('\n')
+ acheck = 0
+ for k in trex_nic_pic_id:
+ for j in nic_pid_ids_list:
+ if j in k:
+ acheck += 1
+ else:
+ pass
+ if acheck == 2:
+ self.sanity_check_done_list.append(int(3))
+ return vsperf_pb2.StatusReply(message="Both the NIC PCI Ids are Correctly "\
+ "configured on TGen-Host..............")
+ return vsperf_pb2.StatusReply(message="You configured NIC PCI Ids Wrong in "\
+ "TGen-Host............................[OK]\n")
+
+ def SanityTgenConnDUTCheck(self, request, context):
+ """
+ We should confirm the DUT connectivity with the Tgen and Traffic Generator is working or not
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ self.tgen_ip_address = request.ip
+ tgen_connectivity_check_cmd = "ping {} -c 1".format(
+ self.tgen_ip_address)
+ tgen_connectivity_check_result = int(
+ self.client.execute(tgen_connectivity_check_cmd)[0])
+ if tgen_connectivity_check_result == 0:
+ self.sanity_check_done_list.append(int(6))
+ return vsperf_pb2.StatusReply(
+ message="DUT-Host is successfully reachable to Traffic Generator......")
+ return vsperf_pb2.StatusReply(message="DUT-Host is unsuccessful to reach the \
+ Traffic Generator \nMake sure to establish connection \
+ between DUT-Host and TGen-Host before running Test\
+ ............... ")
+
+ def variable_from_test_config(self, aparameter):
+ """This function can be use to read any configuration paramter from vsperf.conf"""
+ read_cmd = 'cat ~/{} | grep "{}"'.format(aparameter, self.conffile)
+ read_cmd_output = str(self.client.execute(read_cmd)[1])
+ print(read_cmd_output)
+ if not read_cmd_output or '#' in read_cmd_output:
+ return 0
+ return read_cmd_output.split("=")[1].strip()
+
+ def cpumask2coreids(self, mask):
+ """conver mask to coreids"""
+ intmask = int(mask, 16)
+ i = 1
+ coreids = []
+ while i < intmask:
+ if i & intmask:
+ coreids.append(str(math.frexp(i)[1]-1))
+ i = i << 1
+ return coreids
+
+ def cpu_allocation_check(self, list1, list2):
+ """compare to cpu_map list"""
+ if len(list1) >= len(list2):
+ if all(elem in list1 for elem in list2):
+ self.sanity_check_done_list.append(int(4))
+ return vsperf_pb2.StatusReply(message="CPU allocation properly done on" \
+ " DUT-Host.................[OK]")
+ return vsperf_pb2.StatusReply(message="CPU allocation not done properly on " \
+ "DUT-Host............[Failed]")
+ return vsperf_pb2.StatusReply(message="CPU allocation not done properly on" \
+ " DUT-Host............[Failed]")
+
+ def SanityCPUAllocationCheck(self, request, context):
+ """
+ check for cpu-allocation on DUT-Host
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ read_setting_cmd = "source vsperfenv/bin/activate ; cd vswitchperf* && "
+ read_setting_cmd += './vsperf --list-settings'
+ default_vsperf_settings = ast.literal_eval(str(self.client.execute(read_setting_cmd)[1]))
+ default_cpu_map = default_vsperf_settings["VSWITCH_VHOST_CPU_MAP"]
+ default_vswitch_pmd_cpu_mask = str(default_vsperf_settings["VSWITCH_PMD_CPU_MASK"])
+ default_vswitch_vhost_cpu_map = [str(x) for x in default_cpu_map]
+ vswitch_pmd_cpu_mask = self.variable_from_test_config("VSWITCH_PMD_CPU_MASK")
+ vswitch_cpu_map = (self.variable_from_test_config("VSWITCH_VHOST_CPU_MAP"))
+ vswitch_vhost_cpu_map = 0
+
+ if vswitch_cpu_map != 0:
+ vswitch_vhost_cpu_map = [str(x) for x in ast.literal_eval(vswitch_cpu_map)]
+
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map == 0:
+ self.sanity_check_done_list.append(int(4))
+ return vsperf_pb2.StatusReply(message="CPU allocation Check Done,"\
+ "\nNo vswitch_pmd_cpu_mask or vswitch_vhost_cpu_map assign in test " \
+ "configuration file.\nUsing Default Settings..[OK]\n")
+ if vswitch_pmd_cpu_mask != 0 and vswitch_vhost_cpu_map == 0:
+ core_id = self.cpumask2coreids(vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(default_vswitch_vhost_cpu_map, core_id)
+ if vswitch_pmd_cpu_mask == 0 and vswitch_vhost_cpu_map != 0:
+ core_id_1 = self.cpumask2coreids(default_vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(vswitch_vhost_cpu_map, core_id_1)
+ core_id_2 = self.cpumask2coreids(vswitch_pmd_cpu_mask)
+ return self.cpu_allocation_check(vswitch_vhost_cpu_map, core_id_2)
+
+ def GetVSPERFConffromDUT(self, request, context):
+ """
+ This will extract the vsperf test configuration from DUT-Host
+ """
+ if self.dut_check != 0:
+ return vsperf_pb2.StatusReply(message="DUT-Host is not Connected [!]" \
+ "\nMake sure to establish connection with" \
+ " DUT-Host.")
+ if self.test_upload_check == 0:
+ return vsperf_pb2.StatusReply(message="Test File is not uploaded yet [!] " \
+ "\nUpload Test Configuration File.")
+ read_cmd = "cat ~/{}".format(self.conffile)
+ read_cmd_output = str(self.client.execute(read_cmd)[1])
+ return vsperf_pb2.StatusReply(message="{}".format(read_cmd_output))
+
+
+def serve():
+ """
+ Start servicing the client
+ """
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ vsperf_pb2_grpc.add_ControllerServicer_to_server(
+ VsperfController(), server)
+ server.add_insecure_port('[::]:50052')
+ server.start()
+ try:
+ while True:
+ time.sleep(_ONE_DAY_IN_SECONDS)
+ except (SystemExit, KeyboardInterrupt, MemoryError, RuntimeError):
+ server.stop(0)
+
+
+if __name__ == "__main__":
+ serve()
diff --git a/tools/docker/testcontrol/interactive/docker-compose.yml b/tools/docker/testcontrol/interactive/docker-compose.yml
new file mode 100644
index 00000000..431de124
--- /dev/null
+++ b/tools/docker/testcontrol/interactive/docker-compose.yml
@@ -0,0 +1,20 @@
+version: '2'
+
+services:
+ testcontrol:
+ build:
+ context: ./controller
+ volumes:
+ - ./controller/vsperf:/vsperf
+ ports:
+ - 50052:50052
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/docker/vsperf/Dockerfile b/tools/docker/vsperf/Dockerfile
new file mode 100644
index 00000000..effce15b
--- /dev/null
+++ b/tools/docker/vsperf/Dockerfile
@@ -0,0 +1,37 @@
+# To Build
+# docker build --rm -t vsperf .
+
+# -------- Builder stage.
+FROM python:3.6.10-slim-buster
+MAINTAINER Sridhar Rao <sridhar.rao@spirent.com>
+
+# Create a directory
+RUN mkdir /home/opnfv
+#
+# Update and Install required packages
+#
+RUN apt-get -y update
+RUN apt-get -y install git iputils-ping openssh-client tk
+
+#
+# Get vswitchperf
+#
+RUN cd /home/opnfv && \
+ git clone https://gerrit.opnfv.org/gerrit/vswitchperf
+
+#
+# Remove unnecessary python packages.
+#
+RUN cd /home/opnfv/vswitchperf && \
+ sed -e '/numpy/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/matplotlib/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pycrypto/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pypsi/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/paramiko/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pyzmq/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e "\$apyzmq" -i requirements.txt
+
+#
+# Build VSPERF
+#
+RUN cd /home/opnfv/vswitchperf/systems && ./build_base_machine.sh --trafficgen
diff --git a/tools/k8s/cluster-deployment/k8scluster/.ansible-lint b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
new file mode 100644
index 00000000..036ecf52
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/.ansible-lint
@@ -0,0 +1,3 @@
+skip_list:
+ - '306'
+ - '301' \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/README.md b/tools/k8s/cluster-deployment/k8scluster/README.md
new file mode 100644
index 00000000..78fdbd03
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/README.md
@@ -0,0 +1,60 @@
+# OPNFV - k8s cluster setup
+
+This project aims to set up and programmatically deploy a Kubernetes cluster on CentOS 7 machines with the help of Kubeadm. It uses ansible and requires very little intervention.
+
+## Getting Started
+The following steps aim to describe the minimum required to successfully run this script.
+
+
+### Prerequisites
+
+Kubernetes and Ansible should be installed on the master node and docker and kubelet services should be running on the master and worker nodes.
+
+
+### Setup
+In order to configure the cluster an inventory file should be included. The inventory file (e.g.,`hosts`) has the following structure:
+
+```
+[master]
+master ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host={enter-master-ip} ansible_connection=ssh ansible_ssh_user={insert-user} ansible_ssh_pass={insert-password} ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+```
+In this configuration file, connection details should be filled in. In case more nodes within the cluster are needed, add lines as necessary to the workers group within the `hosts` file.
+
+
+### Usage
+In order to use the script, download or clone [this repository] (https://gerrit.opnfv.org/gerrit/vswitchperf) to the root of what will be the master node.
+
+Navigate to its contents and execute the following command as regular user (this will prevent errors throughout configuration and deployment) on whichever machine you wish to use as the master node (this host will be the one running kubectl):
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts –tags “deploy”
+
+```
+You can verify the installation by running:
+```
+kubectl get nodes
+```
+And verifying the readiness of the nodes. More information may be obtained with `kubectl describe nodes` if needed.
+
+
+To clear the cluster, execute the following command
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “clear”
+```
+
+To deploy only CNI plugins
+
+```
+ansible-playbook k8sclustermanagement.yml -i hosts_garr –tags “cni”
+```
+
+
+
+### Debugging
+
+In case a step goes wrong within the installation, ansible should display a message, however, there's also files to debug if the installation had something to do within k8s. In the case of the master node, we should be able to find a `log_init.txt` with necessary logs. On worker nodes, the relevant file is `node_joined.txt`.
diff --git a/tools/k8s/cluster-deployment/k8scluster/ansible.cfg b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
new file mode 100644
index 00000000..0cbe08f3
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/ansible.cfg
@@ -0,0 +1,9 @@
+[defaults]
+interpreter_python=/usr/bin/python3
+
+# enable logging
+log_path = ./cluster-deployment.log
+
+[ssh_connection]
+pipelining = True
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/hosts b/tools/k8s/cluster-deployment/k8scluster/hosts
new file mode 100644
index 00000000..dd928a8e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/hosts
@@ -0,0 +1,5 @@
+[master]
+master ansible_host=10.10.120.22 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
+
+[workers]
+worker ansible_host=10.10.120.21 ansible_connection=ssh ansible_ssh_user=ENTER_USER ansible_ssh_pass=ENTER_PASS ansible_ssh_common_args='-o StrictHostKeyChecking=no'
diff --git a/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
new file mode 100644
index 00000000..5430bed5
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/k8sclustermanagement.yml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ roles:
+ - clustermanager \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
new file mode 100644
index 00000000..15f1f186
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/defaults/main.yml
@@ -0,0 +1,28 @@
+#Edit these values only as per your environment
+#Enter your master node advertise ip address and cidr range for the pods.
+kube_ad_addr: "{{ ansible_host }}"
+kube_cidr_v: 10.244.0.0/16
+
+###################################################################################
+# Dont Edit these below values, these are mandatory to configure kubernetes cluster
+#packages:
+#- docker
+#- kubeadm
+#- kubectl
+
+#services:
+#- docker
+#- kubelet
+#- firewalld
+
+#ports:
+#- "6443/tcp"
+#- "10250/tcp"
+
+token_file: $HOME/log_init.txt
+###################################################################################
+# Dont Edit these above values, these are mandatory to configure kubernetes cluster
+
+
+
+PIP_executable_version: pip3.6 \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
new file mode 100644
index 00000000..4efeac61
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/configMap-sriov-device-plugin.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: sriovdp-config
+ namespace: kube-system
+data:
+ config.json: |
+ {
+ "resourceList": [{
+ "resourceName": "intel_sriov_dpdk_a",
+ "selectors": {
+ "vendors": ["8086"],
+ "devices": ["10ed"],
+ "drivers": ["ixgbevf"],
+ "pfNames": ["eno3"]
+ }
+ }
+ ]
+ }
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
new file mode 100644
index 00000000..00110ad6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/kube-flannel-daemonset.yml
@@ -0,0 +1,606 @@
+#
+# cloned from https://github.com/coreos/flannel/blob/v0.12.0/Documentation/kube-flannel.yml
+#
+---
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: psp.flannel.unprivileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
+ apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
+spec:
+ privileged: false
+ volumes:
+ - configMap
+ - secret
+ - emptyDir
+ - hostPath
+ allowedHostPaths:
+ - pathPrefix: "/etc/cni/net.d"
+ - pathPrefix: "/etc/kube-flannel"
+ - pathPrefix: "/run/flannel"
+ readOnlyRootFilesystem: false
+ # Users and groups
+ runAsUser:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ fsGroup:
+ rule: RunAsAny
+ # Privilege Escalation
+ allowPrivilegeEscalation: false
+ defaultAllowPrivilegeEscalation: false
+ # Capabilities
+ allowedCapabilities: ['NET_ADMIN']
+ defaultAddCapabilities: []
+ requiredDropCapabilities: []
+ # Host namespaces
+ hostPID: false
+ hostIPC: false
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ # SELinux
+ seLinux:
+ # SELinux is unused in CaaSP
+ rule: 'RunAsAny'
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+rules:
+ - apiGroups: ['extensions']
+ resources: ['podsecuritypolicies']
+ verbs: ['use']
+ resourceNames: ['psp.flannel.unprivileged']
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: flannel
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: flannel
+subjects:
+- kind: ServiceAccount
+ name: flannel
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: flannel
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: kube-flannel-cfg
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+data:
+ cni-conf.json: |
+ {
+ "name": "cbr0",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "flannel",
+ "delegate": {
+ "hairpinMode": true,
+ "isDefaultGateway": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ net-conf.json: |
+ {
+ "Network": "10.244.0.0/16",
+ "Backend": {
+ "Type": "vxlan"
+ }
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-amd64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm64
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm64
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-arm
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - arm
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-arm
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - ppc64le
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-ppc64le
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-flannel-ds-s390x
+ namespace: kube-system
+ labels:
+ tier: node
+ app: flannel
+spec:
+ selector:
+ matchLabels:
+ app: flannel
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: flannel
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: beta.kubernetes.io/os
+ operator: In
+ values:
+ - linux
+ - key: beta.kubernetes.io/arch
+ operator: In
+ values:
+ - s390x
+ hostNetwork: true
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: flannel
+ initContainers:
+ - name: install-cni
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - cp
+ args:
+ - -f
+ - /etc/kube-flannel/cni-conf.json
+ - /etc/cni/net.d/10-flannel.conflist
+ volumeMounts:
+ - name: cni
+ mountPath: /etc/cni/net.d
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ containers:
+ - name: kube-flannel
+ image: quay.io/coreos/flannel:v0.12.0-s390x
+ command:
+ - /opt/bin/flanneld
+ args:
+ - --ip-masq
+ - --kube-subnet-mgr
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: false
+ capabilities:
+ add: ["NET_ADMIN"]
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - name: run
+ mountPath: /run/flannel
+ - name: flannel-cfg
+ mountPath: /etc/kube-flannel/
+ volumes:
+ - name: run
+ hostPath:
+ path: /run/flannel
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: flannel-cfg
+ configMap:
+ name: kube-flannel-cfg
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
new file mode 100644
index 00000000..97990192
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/multus-daemonset.yml
@@ -0,0 +1,251 @@
+#
+# https://github.com/intel/multus-cni/blob/v3.4.1/images/multus-daemonset.yml
+#
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+spec:
+ group: k8s.cni.cncf.io
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ properties:
+ config:
+ type: string
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+rules:
+ - apiGroups: ["k8s.cni.cncf.io"]
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ verbs:
+ - get
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: multus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: multus
+subjects:
+- kind: ServiceAccount
+ name: multus
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: multus
+ namespace: kube-system
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: multus-cni-config
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+data:
+ # NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
+ # In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
+ # change the "args" line below from
+ # - "--multus-conf-file=auto"
+ # to:
+ # "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
+ # Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
+ # /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
+ cni-conf.json: |
+ {
+ "name": "multus-cni-network",
+ "type": "multus",
+ "capabilities": {
+ "portMappings": true
+ },
+ "delegates": [
+ {
+ "cniVersion": "0.3.1",
+ "name": "default-cni-network",
+ "plugins": [
+ {
+ "type": "flannel",
+ "name": "flannel.1",
+ "delegate": {
+ "isDefaultGateway": true,
+ "hairpinMode": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ ],
+ "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
+ }
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: amd64
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ image: nfvpe/multus:v3.4
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-multus-ds-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: multus
+ name: multus
+spec:
+ selector:
+ matchLabels:
+ name: multus
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: multus
+ name: multus
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ kubernetes.io/arch: ppc64le
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+ serviceAccountName: multus
+ containers:
+ - name: kube-multus
+ # ppc64le support requires multus:latest for now. support 3.3 or later.
+ image: nfvpe/multus:latest-ppc64le
+ command: ["/entrypoint.sh"]
+ args:
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "90Mi"
+ limits:
+ cpu: "100m"
+ memory: "90Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: multus-cni-config
+ items:
+ - key: cni-conf.json
+ path: 70-multus.conf
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
new file mode 100644
index 00000000..8a854c06
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/ovs-daemonset.yml
@@ -0,0 +1,101 @@
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-cr
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - nodes/status
+ verbs:
+ - get
+ - update
+ - patch
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: ovs-cni-marker-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ovs-cni-marker-cr
+subjects:
+- kind: ServiceAccount
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovs-cni-marker
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: ovs-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: ovs-cni
+spec:
+ selector:
+ matchLabels:
+ app: ovs-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: ovs-cni
+ spec:
+ serviceAccountName: ovs-cni-marker
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: ovs-cni-plugin
+ image: quay.io/kubevirt/ovs-cni-plugin:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: ovs-cni-marker
+ image: quay.io/kubevirt/ovs-cni-marker:latest
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ args:
+ - -node-name
+ - $(NODE_NAME)
+ - -ovs-socket
+ - /host/var/run/openvswitch/db.sock
+ volumeMounts:
+ - name: ovs-var-run
+ mountPath: /host/var/run/openvswitch
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: ovs-var-run
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
new file mode 100644
index 00000000..6a28c146
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-cni-daemonset.yaml
@@ -0,0 +1,47 @@
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-cni-ds-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriov-cni
+spec:
+ selector:
+ matchLabels:
+ name: sriov-cni
+ template:
+ metadata:
+ labels:
+ name: sriov-cni
+ tier: node
+ app: sriov-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: kube-sriov-cni
+ image: nfvpe/sriov-cni
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
new file mode 100644
index 00000000..9168b98c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/sriov-device-plugin-daemonset.yaml
@@ -0,0 +1,127 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sriov-device-plugin
+ namespace: kube-system
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: kube-sriov-device-plugin-ppc64le
+ namespace: kube-system
+ labels:
+ tier: node
+ app: sriovdp
+spec:
+ selector:
+ matchLabels:
+ name: sriov-device-plugin
+ template:
+ metadata:
+ labels:
+ name: sriov-device-plugin
+ tier: node
+ app: sriovdp
+ spec:
+ hostNetwork: true
+ hostPID: true
+ nodeSelector:
+ beta.kubernetes.io/arch: ppc64le
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: sriov-device-plugin
+ containers:
+ - name: kube-sriovdp
+ image: nfvpe/sriov-device-plugin:ppc64le
+ imagePullPolicy: IfNotPresent
+ args:
+ - --log-dir=sriovdp
+ - --log-level=10
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: devicesock
+ mountPath: /var/lib/kubelet/
+ readOnly: false
+ - name: log
+ mountPath: /var/log
+ - name: config-volume
+ mountPath: /etc/pcidp
+ volumes:
+ - name: devicesock
+ hostPath:
+ path: /var/lib/kubelet/
+ - name: log
+ hostPath:
+ path: /var/log
+ - name: config-volume
+ configMap:
+ name: sriovdp-config
+ items:
+ - key: config.json
+ path: config.json
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
new file mode 100644
index 00000000..74bb520c
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/files/userspace-daemonset.yml
@@ -0,0 +1,46 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: userspace-cni-amd64
+ namespace: kube-system
+ labels:
+ tier: node
+ app: userspace-cni
+spec:
+ selector:
+ matchLabels:
+ app: userspace-cni
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: userspace-cni
+ spec:
+ hostNetwork: true
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ containers:
+ - name: userspace-cni-plugin
+ image: parthyadav/userspace-cni:latest
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ volumes:
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
new file mode 100644
index 00000000..9d0ffda4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-flannel.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Kube-flannel
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
new file mode 100644
index 00000000..f797ddb6
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-master.yml
@@ -0,0 +1,22 @@
+---
+- name: Drain master node
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete master node
+ command: kubectl delete node {{ ansible_hostname }}
+
+- name: Kubeadm reset (master)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (master)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Delete $HOME/.kube/
+ file:
+ path: $HOME/.kube/
+ state: absent
+
+- name: Delete init log file
+ file:
+ path: "{{ token_file }}"
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
new file mode 100644
index 00000000..46ae50ec
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-drain.yml
@@ -0,0 +1,8 @@
+---
+- name: Drain worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl drain {{ ansible_hostname }} --delete-local-data --force --ignore-daemonsets
+
+- name: Delete worker node
+ delegate_to: "{{ groups['master'][0] }}"
+ command: kubectl delete node {{ ansible_hostname }}
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
new file mode 100644
index 00000000..62a8c01f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-k8s-workers-reset.yml
@@ -0,0 +1,11 @@
+---
+- name: Kubeadm reset (worker)
+ shell: yes y | sudo kubeadm reset
+
+- name: Delete /etc/cni/net.d/ (worker)
+ command: sudo rm -rf /etc/cni/net.d/
+
+- name: Remove node_joined.txt
+ file:
+ path: $HOME/node_joined.txt
+ state: absent \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
new file mode 100644
index 00000000..30740a44
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-kubevirt-ovs.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete ovs-cni-plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
new file mode 100644
index 00000000..44eabbd1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-multus.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete Multus
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
new file mode 100644
index 00000000..6d725ce8
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-sriov.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Delete SRIOV CNI Daemonset
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+- name: Delete SRIOV Device Plugin
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: absent
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
new file mode 100644
index 00000000..72b3d869
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/clear-userspace.yml
@@ -0,0 +1,8 @@
+---
+
+- name: Delete userspace-cni plugin
+ k8s:
+ state: absent
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
new file mode 100644
index 00000000..b2f280ef
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/cni-pre-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Install openshift python package
+ pip:
+ name: openshift
+ executable: "{{ PIP_executable_version }}"
+ when: inventory_hostname in groups['master']
+ become: yes
+
+- name: Check whether /etc/cni/net.d/ exists
+ stat:
+ path: /etc/cni/net.d
+ register: files_to_delete
+
+- name: Delete /etc/cni/net.d/
+ become: yes
+ command: sudo rm -r /etc/cni/net.d/
+ when: files_to_delete.stat.exists and files_to_delete.stat.isdir \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
new file mode 100644
index 00000000..4980e17e
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/configure_master_node.yml
@@ -0,0 +1,14 @@
+---
+- name: Pulling images required for setting up a Kubernetes cluster
+ become: yes
+ command: kubeadm config images pull
+
+- name: Initializing Kubernetes cluster
+ become: yes
+ command: kubeadm init --apiserver-advertise-address={{ kube_ad_addr }} --pod-network-cidr={{ kube_cidr_v }}
+ register: output
+
+- name: Storing Logs and Generated token for future purpose.
+ copy:
+ content: "{{ output.stdout }}"
+ dest: "{{ token_file }}" \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
new file mode 100644
index 00000000..367d682f
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-flannel.yml
@@ -0,0 +1,11 @@
+---
+
+- name: Clean flannel
+ import_tasks: clear-flannel.yml
+
+- name: Deploy Kube-flannel
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'kube-flannel-daemonset.yml') }}"
+ wait: yes
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
new file mode 100644
index 00000000..9913cae4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-kubevirt-ovs.yml
@@ -0,0 +1,12 @@
+---
+
+- name: Clean kubevirt-ovs
+ include: clear-kubevirt-ovs.yml
+
+- name: Deploy ovs-cni-plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'ovs-daemonset.yml') }}"
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
new file mode 100644
index 00000000..6fb77e42
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-multus.yml
@@ -0,0 +1,10 @@
+---
+
+- name: Clear Multus
+ include: clear-multus.yml
+
+- name: Deploy Multus
+ k8s:
+ state: present
+ definition: "{{ lookup('file', 'multus-daemonset.yml') }}"
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
new file mode 100644
index 00000000..aaff5cf0
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-sriov.yml
@@ -0,0 +1,26 @@
+---
+
+- name: clean sriov
+ include: clear-sriov.yml
+
+- name: Deploy SRIOV Device Plugin Config
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'configMap-sriov-device-plugin.yaml') }}"
+ wait: yes
+
+- name: Deploy SRIOV Device Plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-device-plugin-daemonset.yaml') }}"
+
+- name: Deploy SRIOV CNI
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'sriov-cni-daemonset.yaml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
new file mode 100644
index 00000000..32e3b9b1
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/deploy-userspace.yml
@@ -0,0 +1,13 @@
+---
+
+- name: Clean userspace-cni
+ include: clear-userspace.yml
+
+- name: Deploy userspace-cni plugin
+ k8s:
+ state: present
+ apply: yes
+ definition: "{{ lookup('file', 'userspace-daemonset.yml') }}"
+
+
+
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
new file mode 100644
index 00000000..1a8c1879
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/foldersettings.yml
@@ -0,0 +1,10 @@
+---
+- name: .kube directory creation in $HOME/
+ file:
+ path: $HOME/.kube
+ state: directory
+
+- name: Copying required files
+ shell: |
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
new file mode 100644
index 00000000..28c3f501
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/main.yml
@@ -0,0 +1,83 @@
+- name: include master tasks
+ import_tasks: configure_master_node.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include folder settings for kube config
+ import_tasks: foldersettings.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy
+
+- name: include join worker tasks
+ import_tasks: workers.yml
+ when: inventory_hostname in groups['workers']
+ tags: deploy, join
+
+- name: cni pre-deploy
+ import_tasks: cni-pre-deploy.yml
+ tags: deploy, cni
+
+- name: deploy flannel
+ import_tasks: deploy-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear flannel
+ import_tasks: clear-flannel.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy multus
+ import_tasks: deploy-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear multus
+ import_tasks: clear-multus.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy kubevirt-ovs
+ import_tasks: deploy-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear kubevirt-ovs
+ import_tasks: clear-kubevirt-ovs.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy sriov
+ import_tasks: deploy-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear sriov
+ import_tasks: clear-sriov.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: deploy userspace
+ import_tasks: deploy-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: deploy, cni
+
+- name: clear userspace
+ import_tasks: clear-userspace.yml
+ when: inventory_hostname in groups['master']
+ tags: clear
+
+- name: drain and delete workers from master
+ import_tasks: clear-k8s-workers-drain.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: reset workers
+ import_tasks: clear-k8s-workers-reset.yml
+ when: inventory_hostname in groups['workers']
+ tags: clear
+
+- name: clear master
+ import_tasks: clear-k8s-master.yml
+ when: inventory_hostname in groups['master']
+ tags: clear \ No newline at end of file
diff --git a/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
new file mode 100644
index 00000000..a0a815c4
--- /dev/null
+++ b/tools/k8s/cluster-deployment/k8scluster/roles/clustermanager/tasks/workers.yml
@@ -0,0 +1,15 @@
+---
+- name: check node is already in cluster
+ delegate_to: "{{ groups.master[0] }}"
+ command: "kubectl get nodes -n kube-system -o name"
+ register: get_node_register
+ changed_when: false
+
+- name: get join command
+ delegate_to: "{{ groups.master[0] }}"
+ command: kubeadm token create --print-join-command
+ register: join_command_raw
+
+- name: join cluster
+ shell: "sudo {{ join_command_raw.stdout_lines[0] }} --ignore-preflight-errors=all > $HOME/node_joined.txt"
+ when: ( 'node/' + ansible_hostname ) not in get_node_register.stdout_lines
diff --git a/tools/lma/ansible-client/ansible.cfg b/tools/lma/ansible-client/ansible.cfg
new file mode 100644
index 00000000..307ef457
--- /dev/null
+++ b/tools/lma/ansible-client/ansible.cfg
@@ -0,0 +1,17 @@
+[defaults]
+inventory = ./hosts
+host_key_checking = false
+
+# additional path to search for roles in
+roles_path = roles
+
+# enable logging
+log_path = ./ansible.log
+
+[privilege_escalation]
+become=True
+become_method=sudo
+become_user=root
+
+[ssh_connection]
+pipelining = True
diff --git a/tools/lma/ansible-client/hosts b/tools/lma/ansible-client/hosts
new file mode 100644
index 00000000..eba586ce
--- /dev/null
+++ b/tools/lma/ansible-client/hosts
@@ -0,0 +1,2 @@
+[all]
+127.0.0.1 ansible_connection=local
diff --git a/tools/lma/ansible-client/playbooks/clean.yaml b/tools/lma/ansible-client/playbooks/clean.yaml
new file mode 100644
index 00000000..4f77b062
--- /dev/null
+++ b/tools/lma/ansible-client/playbooks/clean.yaml
@@ -0,0 +1,25 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#clean td-agent
+- name: clean td-agent
+ hosts: all
+ roles:
+ - clean-td-agent
+
+#clean collectd
+- name: clean collectd
+ hosts: all
+ roles:
+ - clean-collectd
diff --git a/tools/lma/ansible-client/playbooks/setup.yaml b/tools/lma/ansible-client/playbooks/setup.yaml
new file mode 100644
index 00000000..c79ee347
--- /dev/null
+++ b/tools/lma/ansible-client/playbooks/setup.yaml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#setup td-agent
+- name: setup td-agent
+ hosts: all
+ roles:
+ - td-agent
+
+- name: setup collectd
+ hosts: all
+ vars_prompt:
+ - name: host_name
+ prompt: "Enter host_name for collectd configuration"
+ private: no
+ roles:
+ - collectd
diff --git a/tools/lma/ansible-client/roles/clean-collectd/main.yml b/tools/lma/ansible-client/roles/clean-collectd/main.yml
new file mode 100644
index 00000000..97100cad
--- /dev/null
+++ b/tools/lma/ansible-client/roles/clean-collectd/main.yml
@@ -0,0 +1,44 @@
+# Copyright 2020 Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+- hosts: localhost
+
+ tasks:
+ - name: Check and install dependencies
+ yum:
+ name: docker
+ state: present
+
+ - name: Install python sdk
+ yum:
+ name: python-docker-py
+ state: present
+
+ - name: Stopping collectd container
+ docker_container:
+ name: collectd
+ state: stopped
+
+ - name: Removing collectd container
+ docker_container:
+ name: collectd
+ state: absent
+
+ # Removes the image (not recommended)
+ # - name: Remove image
+ # docker_image:
+ # state: absent
+ # name: opnfv/barometer-collectd
+ # tag: latest
diff --git a/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml b/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml
new file mode 100644
index 00000000..7c59c698
--- /dev/null
+++ b/tools/lma/ansible-client/roles/clean-td-agent/tasks/main.yml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#TD-agent uninstallation
+- name: TD-agent Uninstallation
+ yum:
+ name: td-agent
+ state: absent
+
+- name: removing folder
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/td-agent/
+ - /var/log/td-agent/
diff --git a/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2 b/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2
new file mode 100644
index 00000000..ba953e3a
--- /dev/null
+++ b/tools/lma/ansible-client/roles/collectd/files/collectd.conf.j2
@@ -0,0 +1,44 @@
+Hostname "{{ host_name }}"
+Interval 10
+LoadPlugin intel_rdt
+LoadPlugin processes
+LoadPlugin interface
+LoadPlugin network
+LoadPlugin ovs_stats
+LoadPlugin cpu
+LoadPlugin memory
+#LoadPlugin csv
+#LoadPlugin write_http
+#LoadPlugin dpdkstat
+##############################################################################
+# Plugin configuration #
+##############################################################################
+<Plugin processes>
+ ProcessMatch "ovs-vswitchd" "ovs-vswitchd"
+ ProcessMatch "ovsdb-server" "ovsdb-server"
+ ProcessMatch "collectd" "collectd"
+</Plugin>
+
+<Plugin cpu>
+ ReportByCpu true
+ ReportByState true
+ ValuesPercentage true
+ ReportNumCpu true
+ ReportGuestState false
+ SubtractGuestState false
+</Plugin>
+
+<Plugin network>
+ Server "10.10.120.211" "30826"
+</Plugin>
+
+<Plugin ovs_stats>
+ Port "6640"
+ Address "127.0.0.1"
+ Socket "/usr/local/var/run/openvswitch/db.sock"
+ Bridges "vsperf-br0"
+</Plugin>
+
+<Plugin "intel_rdt">
+ Cores "2" "4-5" "6-7" "8" "9" "22" "23" "24" "25" "26" "27"
+</Plugin>
diff --git a/tools/lma/ansible-client/roles/collectd/tasks/main.yml b/tools/lma/ansible-client/roles/collectd/tasks/main.yml
new file mode 100644
index 00000000..0befb22b
--- /dev/null
+++ b/tools/lma/ansible-client/roles/collectd/tasks/main.yml
@@ -0,0 +1,60 @@
+# Copyright 2020 Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# Dependency check
+- name: Check and install dependencies
+ yum:
+ name: ['docker', 'python-docker-py']
+ state: present
+
+- name: Install pip
+ yum:
+ name: python-pip
+ state: present
+
+- name: install docker-py
+ pip: name=docker-py
+
+- name: Cloning barometer
+ git:
+ repo: https://gerrit.opnfv.org/gerrit/barometer
+ dest: /tmp/barometer
+
+- name: Create Folder
+ file:
+ path: /tmp/barometer/docker/src/collectd_sample_configs
+ state: directory
+
+# Build collectd
+- name: Downlaod and Build Image
+ command: chdir=/tmp/ {{ item }}
+ become: true
+ with_items:
+ - docker build -t opnfv/barometer-collectd -f barometer/docker/barometer-collectd/Dockerfile barometer/docker/barometer-collectd
+
+# Configuring collectd0
+- name: Ensure collectd is configured
+ template:
+ src: ../files/collectd.conf.j2
+ dest: /tmp/barometer/docker/src/collectd_sample_configs/collectd.conf
+
+# Running Collectd container #####################
+- name: Running collectd
+ command : chdir=/tmp/ {{ item }}
+ become: true
+ with_items:
+ - docker run -tid --name collectd --net=host -v /tmp/barometer/docker/src/collectd_sample_configs:/opt/collectd/etc/collectd.conf.d -v /var/run:/var/run -v /tmp:/tmp --privileged opnfv/barometer-collectd /run_collectd.sh
+ - docker ps
diff --git a/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf b/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf
new file mode 100644
index 00000000..9d656e65
--- /dev/null
+++ b/tools/lma/ansible-client/roles/td-agent/files/td-agent.conf
@@ -0,0 +1,63 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+<source>
+ @type tail
+ path /tmp/result*/*.log, /tmp/result*/*.dat, /tmp/result*/*.csv, /tmp/result*/stc-liveresults.dat.*, /var/log/userspace*.log, /var/log/sriovdp/*.log.*, /var/log/pods/**/*.log
+ path_key log_path
+# read_from_head true
+
+ <parse>
+ @type regexp
+ expression ^(?<msg>.*)$
+ </parse>
+
+ tag log.test
+</source>
+
+<filter log.test>
+ @type record_transformer
+ enable_ruby
+ <record>
+ host "#{Socket.gethostname}"
+ </record>
+</filter>
+
+
+<filter log.test>
+ @type parser
+ key_name log_path
+ reserve_data true
+ <parse>
+ @type regexp
+ expression /.*\/(?<file>.*)/
+ </parse>
+</filter>
+
+<match log.test>
+ @type copy
+
+ <store>
+ @type forward
+ send_timeout 10s
+ <server>
+ host 10.10.120.211
+ port 32224
+ </server>
+ </store>
+
+ <store>
+ @type stdout
+ </store>
+</match> \ No newline at end of file
diff --git a/tools/lma/ansible-client/roles/td-agent/tasks/main.yml b/tools/lma/ansible-client/roles/td-agent/tasks/main.yml
new file mode 100644
index 00000000..c7f50765
--- /dev/null
+++ b/tools/lma/ansible-client/roles/td-agent/tasks/main.yml
@@ -0,0 +1,30 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#TD-agent setup
+- name: TD-agent installation
+ shell: curl -L https://toolbelt.treasuredata.com/sh/install-redhat-td-agent4.sh | sh
+
+#replace the config file
+- name: Replace the content of my file
+ copy:
+ src: ../files/td-agent.conf
+ dest: /etc/td-agent/td-agent.conf
+
+#start the service
+- name: Starting and Enabling the TD-agent services
+ service:
+ name: td-agent
+ state: started
+ enabled: yes
diff --git a/tools/lma/ansible-server/ansible.cfg b/tools/lma/ansible-server/ansible.cfg
new file mode 100644
index 00000000..307ef457
--- /dev/null
+++ b/tools/lma/ansible-server/ansible.cfg
@@ -0,0 +1,17 @@
+[defaults]
+inventory = ./hosts
+host_key_checking = false
+
+# additional path to search for roles in
+roles_path = roles
+
+# enable logging
+log_path = ./ansible.log
+
+[privilege_escalation]
+become=True
+become_method=sudo
+become_user=root
+
+[ssh_connection]
+pipelining = True
diff --git a/tools/lma/ansible-server/group_vars/all.yml b/tools/lma/ansible-server/group_vars/all.yml
new file mode 100644
index 00000000..b0725ff5
--- /dev/null
+++ b/tools/lma/ansible-server/group_vars/all.yml
@@ -0,0 +1,27 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#apiserver advertise address
+ad_addr: 10.10.120.211
+
+#pod network cidr
+pod_cidr: 192.168.0.0/16
+
+#token generated by master
+token_file: join_token
+
+#give hostname
+vm3: 'vm3'
+vm2: 'vm2'
+vm1: 'vm1'
diff --git a/tools/lma/ansible-server/hosts b/tools/lma/ansible-server/hosts
new file mode 100644
index 00000000..0a13d754
--- /dev/null
+++ b/tools/lma/ansible-server/hosts
@@ -0,0 +1,12 @@
+[all]
+10.10.120.211 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.203 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.204 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+
+
+[master]
+10.10.120.211 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+
+[worker-nodes]
+10.10.120.203 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd
+10.10.120.204 ansible_connection=ssh ansible_ssh_user=root ansible_sudo_pass=P@ssw0rd ansible_ssh_pass=P@ssw0rd \ No newline at end of file
diff --git a/tools/lma/ansible-server/playbooks/clean.yaml b/tools/lma/ansible-server/playbooks/clean.yaml
new file mode 100644
index 00000000..b4da66da
--- /dev/null
+++ b/tools/lma/ansible-server/playbooks/clean.yaml
@@ -0,0 +1,52 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# clean monitoring
+- name: Clean PAG setup
+ hosts: master
+ roles:
+ - clean-monitoring
+
+#clean logging
+- name: Clean EFK setup
+ hosts: master
+ roles:
+ - clean-logging
+
+#IF KUBELET IS RUNNING THEN RUN THIS
+#clean k8s cluster
+- name: Clean k8s cluster
+ hosts: master
+ roles:
+ - clean-k8s-cluster
+
+#reset worker-nodes
+- name: Reset worker-nodes
+ hosts: worker-nodes
+ roles:
+ - clean-k8s-worker-reset
+
+#unistall pre-requisites for k8s
+- name: unistall pre-requisites for k8s
+ hosts: all
+ roles:
+ - clean-k8s-pre
+
+#*************************************************************************************************************
+#THIS WILL DELETE DATA OF ELASTICSEARCH
+#*************************************************************************************************************
+# - name: Clean nfs server
+# hosts: all
+# roles:
+# - clean-nfs
diff --git a/tools/lma/ansible-server/playbooks/setup.yaml b/tools/lma/ansible-server/playbooks/setup.yaml
new file mode 100644
index 00000000..1f5ed1f5
--- /dev/null
+++ b/tools/lma/ansible-server/playbooks/setup.yaml
@@ -0,0 +1,44 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Pre-requisites for k8s and NFS server
+- name: Pre-requisites for k8s in all the nodes
+ hosts: all
+ roles:
+ - k8s-pre
+ - nfs
+
+#master setup for k8s
+- name: master setup for k8s
+ hosts: master
+ roles:
+ - k8s-master
+
+#worker setup for k8s
+- name: worker setup for k8s
+ hosts: worker-nodes
+ roles:
+ - k8s-worker
+
+#EFK setup in k8s
+- name: EFK setup in k8s
+ hosts: master
+ roles:
+ - logging
+
+#PAG setup in k8s
+- name: PAG setup in k8s
+ hosts: master
+ roles:
+ - monitoring
diff --git a/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml
new file mode 100644
index 00000000..83ac086d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-cluster/tasks/main.yml
@@ -0,0 +1,34 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#IF KUBELET IS RUNNING, THEN
+#reset k8s
+- name: reset k8s
+ shell: |
+ kubectl drain {{vm3}} --delete-local-data --force --ignore-daemonsets
+ kubectl drain {{vm2}} --delete-local-data --force --ignore-daemonsets
+ kubectl drain {{vm1}} --delete-local-data --force --ignore-daemonsets
+ kubectl delete node {{vm3}}
+ kubectl delete node {{vm2}}
+ kubectl delete node {{vm1}}
+ sudo kubeadm reset -f
+ sudo rm $HOME/.kube/config
+ when: "_svc_kubelet.rc == 0"
+
diff --git a/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml
new file mode 100644
index 00000000..6d12bd5f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-pre/tasks/main.yml
@@ -0,0 +1,65 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Uninstalling K8s
+- name: Uninstalling K8s
+ yum:
+ name: ['kubeadm', 'kubectl', 'kubelet', 'docker-ce']
+ state: absent
+
+#Enabling Swap
+- name: Enabling Swap on all nodes
+ shell: swapon -a
+ ignore_errors: yes
+
+#Uncommenting Swap entries
+- name: Uncommenting Swap entries in /etc/fstab
+ replace:
+ path: /etc/fstab
+ regexp: '^# /(.*swap.*)'
+ replace: '\1'
+
+
+#Starting firewalld
+- name: 'Starting firewall'
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+
+# Enabling SELinux
+- name: Enabling SELinux on all nodes
+ shell: |
+ setenforce 1
+ sudo sed -i 's/^SELINUX=permissive$/SELINUX=enforcing/' /etc/selinux/config
+
+#removing Docker repo
+- name: removing Docker repo
+ command: yum-config-manager --disable docker-ce-stable
+
+#removing K8s repo
+- name: removing repository details in Kubernetes repo file.
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ state: absent
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=1
+ repo_gpgcheck=1
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
diff --git a/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml b/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml
new file mode 100644
index 00000000..3ba9c9ea
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-k8s-worker-reset/tasks/main.yml
@@ -0,0 +1,26 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#IF KUBELET IS RUNNING, THEN
+#reset k8s
+- name: reset k8s
+ command: kubeadm reset -f
+ when: "_svc_kubelet.rc == 0"
+
diff --git a/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml b/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml
new file mode 100644
index 00000000..259065ed
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-logging/tasks/main.yml
@@ -0,0 +1,193 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Deleting EFK setup from k8s cluster
+
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../../logging/files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop elastalert
+#***********************************************************************************************************
+- name: Delete elastalert config configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-conf-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert key configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-key-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert rule configmap
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/ealert-rule-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete elastalert pod
+ k8s:
+ state: absent
+ src: /tmp/files/elastalert/elastalert.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop fluentd
+#***********************************************************************************************************
+
+- name: Delete fluentd service
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent-service.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete fluentd configmap
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete fluentd pod
+ k8s:
+ state: absent
+ src: /tmp/files/fluentd/fluent.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop nginx
+#***********************************************************************************************************
+- name: Delete nginx service
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-service.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx configmap
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-conf-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx key configmap
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx-key-cm.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+- name: Delete nginx pod
+ k8s:
+ state: absent
+ src: /tmp/files/nginx/nginx.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Kibana
+#***********************************************************************************************************
+- name: Stopping Kibana
+ k8s:
+ state: absent
+ src: /tmp/files/kibana/kibana.yaml
+ namespace: logging
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Elasticsearch
+#***********************************************************************************************************
+- name: Stopping Elasticsearch
+ k8s:
+ state: absent
+ src: /tmp/files/elasticsearch/elasticsearch.yaml
+ namespace: logging
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Stop Elasticsearch operator
+#***********************************************************************************************************
+- name: Stopping Elasticsearch operator
+ shell: kubectl delete -f https://download.elastic.co/downloads/eck/1.2.0/all-in-one.yaml
+ ignore_errors: yes
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Persistent Volume
+#***********************************************************************************************************
+- name: Deleting Persistent Volume
+ k8s:
+ state: absent
+ src: /tmp/files/persistentVolume.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Storage Class
+#***********************************************************************************************************
+- name: Deleting Storage Class
+ k8s:
+ state: absent
+ src: /tmp/files/storageClass.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#IF KUBELET IS RUNNING, THEN
+#Delete Namespace
+#***********************************************************************************************************
+- name: Deleting Namespace
+ k8s:
+ state: absent
+ src: /tmp/files/namespace.yaml
+ namespace: logging
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml b/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml
new file mode 100644
index 00000000..49943ec0
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-monitoring/tasks/main.yml
@@ -0,0 +1,48 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#Deleting PAG setup from k8s cluster
+
+#check kubelet is running or not
+- name: check for kubelet
+ shell: "systemctl status kubelet"
+ register: _svc_kubelet
+ failed_when: _svc_kubelet.rc != 0 and ("could not be found" not in _svc_kubelet.stderr)
+
+#***********************************************************************************************************
+#copy yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy namespace yaml to /tmp/files/
+ copy:
+ src: ../../monitoring/files/monitoring-namespace.yaml
+ dest: /tmp/monitoring-namespace.yaml
+
+#***********************************************************************************************************
+#Deleting Namespace
+#***********************************************************************************************************
+- name: Deleting Namespace
+ k8s:
+ state: absent
+ src: /tmp/monitoring-namespace.yaml
+ namespace: monitoring
+ when: "_svc_kubelet.rc == 0"
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/monitoring-namespace.yaml
+ file:
+ path: "/tmp/monitoring-namespace.yaml"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml b/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml
new file mode 100644
index 00000000..157db849
--- /dev/null
+++ b/tools/lma/ansible-server/roles/clean-nfs/tasks/main.yml
@@ -0,0 +1,44 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Edit /etc/export
+- name: Edit /etc/export file for NFS
+ lineinfile:
+ path: /etc/exports
+ line: "{{item.line}}"
+ state: absent
+ with_items:
+ - {line: "/srv/nfs/master *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/srv/nfs/data *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/usr/share/monitoring_data/grafana *(rw,sync,no_root_squash,no_subtree_check)"}
+
+#uninstall NFS server
+- name: Uninstalling NFS server utils
+ yum:
+ name: nfs-utils
+ state: absent
+
+#remove Elasticsearch data
+- name: Removing Directory for elasticsearch
+ file:
+ path: "/srv/nfs/{{item}}"
+ state: absent
+ with_items:
+ - ['data', 'master']
+
+#remove Grafana data
+- name: Removing Directory for grafana
+ file:
+ path: "/usr/share/monitoring_data/grafana"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml
new file mode 100644
index 00000000..edc8f10b
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-master/tasks/main.yml
@@ -0,0 +1,49 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#pull k8s images
+- name: Pulling images required for setting up a Kubernetes cluster
+ shell: kubeadm config images pull
+
+#reset k8s
+- name: Resetting kubeadm
+ shell: kubeadm reset -f
+
+#init k8s
+- name: Initializing Kubernetes cluster
+ shell: kubeadm init --apiserver-advertise-address {{ad_addr}} --pod-network-cidr={{pod_cidr}}
+
+#Copying required files
+- name: Copying required files
+ shell: |
+ mkdir -p $HOME/.kube
+ sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+#get token
+- name: Storing token for future purpose.
+ shell: kubeadm token create --print-join-command
+ register: token
+
+#save token to join worker
+- name: Storing token for worker
+ local_action: copy content={{ token.stdout }} dest={{ token_file }}
+
+#install calico
+- name: Install Network Add-on
+ command: kubectl apply -f https://docs.projectcalico.org/v3.11/manifests/calico.yaml
+
+#Taint master
+- name: Taint master
+ command: kubectl taint nodes --all node-role.kubernetes.io/master-
diff --git a/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml
new file mode 100644
index 00000000..95526a28
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-pre/tasks/main.yml
@@ -0,0 +1,72 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+
+#Disabling Swap
+- name: Disabling Swap on all nodes
+ shell: swapoff -a
+
+#Commenting Swap entries
+- name: Commenting Swap entries in /etc/fstab
+ replace:
+ path: /etc/fstab
+ regexp: '(^/.*swap*)'
+ replace: '# \1'
+
+#Stopping firewalld
+- name: 'Stopping firewall'
+ service:
+ name: firewalld
+ state: stopped
+ enabled: no
+
+#Disabling SELinux
+- name: Disabling SELinux on all nodes
+ shell: |
+ setenforce 0
+ sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
+
+#installing docker
+- name: Installing Docker
+ shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+#Adding K8s repo
+- name: Adding repository details in Kubernetes repo file.
+ blockinfile:
+ path: /etc/yum.repos.d/kubernetes.repo
+ block: |
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=1
+ repo_gpgcheck=1
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+#installing K8s ans docker
+- name: Installing K8s
+ yum:
+ name: ['kubeadm', 'kubectl', 'kubelet', 'docker-ce']
+ state: present
+
+#Starting docker and kubelet services
+- name: Starting and Enabling the required services
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - docker
+ - kubelet
diff --git a/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml b/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml
new file mode 100644
index 00000000..89d2b373
--- /dev/null
+++ b/tools/lma/ansible-server/roles/k8s-worker/tasks/main.yml
@@ -0,0 +1,24 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#Worker
+
+- name: Copying token to worker nodes
+ copy: src={{ token_file }} dest=join_token
+
+- name: Joining worker nodes with kubernetes master
+ shell: |
+ kubeadm reset -f
+ cat join_token | tail -1 > out.sh
+ sh out.sh
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml
new file mode 100644
index 00000000..a320ef75
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-conf-cm.yaml
@@ -0,0 +1,48 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-config
+data:
+ elastalert.yaml: |
+ # This is the folder that contains the rule yaml files
+ # Any .yaml file will be loaded as a rule
+ rules_folder: rules
+ # How often ElastAlert will query Elasticsearch
+ # The unit can be anything from weeks to seconds
+ run_every:
+ minutes: 1
+ # ElastAlert will buffer results from the most recent
+ # period of time, in case some log sources are not in real time
+ buffer_time:
+ minutes: 15
+
+ scan_subdirectories: false
+
+ # The Elasticsearch hostname for metadata writeback
+ # Note that every rule can have its own Elasticsearch host
+ es_host: logging-es-http
+ es_port: 9200
+ es_username: ${ES_USERNAME}
+ es_password: ${ES_PASSWORD}
+ es_conn_timeout: 120
+ verify_certs: False
+ use_ssl: True
+ client_cert: '/opt/elastalert/key/elastalert.pem'
+ client_key: '/opt/elastalert/key/elastalert.key'
+ writeback_index: elastalert_status
+ writeback_alias: elastalert_alerts
+ alert_time_limit:
+ days: 2
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml
new file mode 100644
index 00000000..0c606a9c
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-key-cm.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-key
+data:
+ elastalert.key: |
+ -----BEGIN PRIVATE KEY-----
+ MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC0uQ+B0gy3VB4w
+ 5CeWOx575lqSUuYvrGW3ILpV1gmj0ZZCMZUGvt4UvaCEaNPIAqNaHPmaslQqJb5C
+ PJH9pMN7vUVp3DACzmYrS4HdROHamn5gjebXs4hq43heLaIB1Kb+4F+7sEY88irK
+ xOevadcN35y5ld7lVUGRsj6JYcweaAeh/YZ/HaBT5RfdGF+x07NDus+mFqT8j3PD
+ rs2+JtEvEoWtjcxwFgloc9GkHsWZoV1AQHgyAWjmDXZtZeV0HQSkl7hWFG9vxTni
+ DvdrdhX0g+D+u8jWnlR4Za4jd64KbTp9C9trSHyMSRIvN5obm/H8O5MQ+sZ+NQ0X
+ PdK92MjbAgMBAAECggEASbRPxrpLxVjhFz91haeGvzErLxHwHvFIam9Gj0tDkzQe
+ +9AM3ztohzzvAhFejevFgzLd+WFRQf8yoQDi6XcQ4p5GeO38Bqj2siGRTRSSp/zq
+ HabBxqbJtA4hQQeLUwPPN5N6d6lke+an3RqBAuE/e8D+whGFXjJvE2SGbLEd9if2
+ uzHj37sPsVi8kRvgZBDOozmt7YFzQVO/1V+4Lw6nz48M3t+hOHaUXY0Yd8nsk5A6
+ kgoDQ4CGUHjtWfSrccZrYNk51Zows9/sX8axfJ94wKJSImWJcuW9PXIQhzT4exnH
+ sPOwY6Noy3nXRk9gcchT60fKpp+tsJZk3ezkwSpgwQKBgQDvsaYcbnIVdFZpaNKF
+ Tmt/w60CmfGeNozRygfi84ot7edUf93cB6WSKChcAE8fbq9Ji5USPNtfbnZfFXsI
+ IyTr2KHW3RkHuDEyu+Lan9JuReEH3QOG83vvN/oYA3J3hqUTCjEGkPjqnoFtdk8L
+ f7WH1jZvXYEMo0C48SXo+yGohQKBgQDBBGkzL928j1QB9NfiNFk70EalDsF8Im2W
+ n8bQ54KYspUybKD/Hmw0jIV7kdu2vhgGC4RYkn9c5qATtulbYJUgUBelaSi0vhXT
+ gfAuO+JIIZ50P+mkkxH/KIUyu1xWUB2jtMulqLLomdoBvfp/u51qCY6fT3WMCB+R
+ ouWLr2oZ3wKBgQCAuas4AaiLFRuDKKRGq0LYLsIvb3VvPmSKFjH+FETVPbrKipEf
+ pYup3p8uKYxUmSDSIoBAdyZpLe2sSuD0Ecu2TXU86yiSGL1zPawrNUHRrv2XN365
+ bvHUGv/Y/aDvyAPHIeYKXLkRZ2ai3rK8vi1Dcitxy4mOu+36ZKezY4tD8QKBgQCd
+ hakJUj4nPd20fwqUnF5a1z5gRGuZkEtZiunp4ZaOYegrL8YwjraGKExjrYTfXcIj
+ ZNDMrDpvKfRoQnWt0mPB7DtwDiNfZmZPqBLI2Kxya6VygBqA6lncoEgcQBY6hsW5
+ rbopZ0UjWTQ3CcFe71GnkUcpMuLetl51L7kgR7dShwKBgQC+vqjhe/h081JGLTo1
+ tKeRUCaDA/V3VHjFKgM5g+S3/KzgU/EaB1rq3Qja1quGv0zHveca3zibdNQi1ENm
+ KSutWh2zQXzzvmycPmVcthhOxaKzRXDjG0mXiA0bnSgK3F2o9t4196RYhIiiSvAH
+ shVjZMTK04h8ciTLIqK/GtZr+g==
+ -----END PRIVATE KEY-----
+ elastalert.pem: |
+ -----BEGIN CERTIFICATE-----
+ MIIDVzCCAj+gAwIBAgIJAORgkR7Y0Nk9MA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV
+ BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+ Q29tcGFueSBMdGQwHhcNMjAwNjI4MTM1NjAwWhcNMjEwNjI4MTM1NjAwWjBCMQsw
+ CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh
+ dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ tLkPgdIMt1QeMOQnljsee+ZaklLmL6xltyC6VdYJo9GWQjGVBr7eFL2ghGjTyAKj
+ Whz5mrJUKiW+QjyR/aTDe71FadwwAs5mK0uB3UTh2pp+YI3m17OIauN4Xi2iAdSm
+ /uBfu7BGPPIqysTnr2nXDd+cuZXe5VVBkbI+iWHMHmgHof2Gfx2gU+UX3RhfsdOz
+ Q7rPphak/I9zw67NvibRLxKFrY3McBYJaHPRpB7FmaFdQEB4MgFo5g12bWXldB0E
+ pJe4VhRvb8U54g73a3YV9IPg/rvI1p5UeGWuI3euCm06fQvba0h8jEkSLzeaG5vx
+ /DuTEPrGfjUNFz3SvdjI2wIDAQABo1AwTjAdBgNVHQ4EFgQUFAvjohHTavHmbRbj
+ Yq2h3cq7UMEwHwYDVR0jBBgwFoAUFAvjohHTavHmbRbjYq2h3cq7UMEwDAYDVR0T
+ BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAB9oDASl4OfF/D49i3KtVzjzge4up
+ WssBPYKVwASh3cXfLLe3NdY9ihdCXFd/8Rus0hBGaRPIyR06sZoHRDEfJ2xrRD6g
+ pr4iHRfaoEWqols7+iW0cgQehvw5efEpFL1vg9zK9kOwruS4ZUhDrak6GcO/O8Jh
+ 6lSGmidHSHrQmfqFeTotaezwylV/uHvRZHPvk2JhQfC+vFjn5/iN/0wCeQCwYvOC
+ rePq2ZFdYg/0bS9BYwKsT2w1Z/AU/wIMLmbNB1af+fTBBEQlxb4rAeDb+J9EoSQ5
+ MVP7jm3BVnHQCs6CA4LV4yRQNF2K6GkWem1oUg/H3S2SG8TAUlKpX/1XRw==
+ -----END CERTIFICATE-----
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml
new file mode 100644
index 00000000..af28b6f6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/ealert-rule-cm.yaml
@@ -0,0 +1,132 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: elastalert-rule
+data:
+ rule-node4-vswitch.yaml: |
+ name: vswitch-3-sec
+ type: any
+ index: node4*
+ filter:
+ - range:
+ time_vswitchd:
+ gt: 3 #Greater than
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: threshold
+ label: vswitchd start time > 3 sec
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ time_vswitchd: time_vswitchd
+ num_hits: num_hits
+ num_matches: num_matches
+
+ rule-node1-vswitch.yaml: |
+ name: vswitch-3-sec
+ type: any
+ index: node1*
+ filter:
+ - range:
+ time_vswitchd:
+ gt: 3 #Greater than
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: threshold
+ label: vswitchd start time > 3 sec
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ time_vswitchd: time_vswitchd
+ num_hits: num_hits
+ num_matches: num_matches
+
+ rule-node4-blacklist.yaml: |
+ name: error-finder-node4
+ type: blacklist
+ compare_key: alert
+ index: node4*
+ blacklist:
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "device not found"
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: pattern-match
+ label: failed
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ reason: alert
+ num_hits: num_hits
+ num_matches: num_matches
+ rule-node1-blacklist.yaml: |
+ name: error-finder-node1
+ type: blacklist
+ compare_key: alert
+ index: node1*
+ blacklist:
+ - "Failed to run test"
+ - "Failed to execute in '30' seconds"
+ - "('Result', 'Failed')"
+ - "could not open socket: connection refused"
+ - "Input/output error"
+ - "dpdk|ERR|EAL: Error - exiting with code: 1"
+ - "Failed to execute in '30' seconds"
+ - "dpdk|ERR|EAL: Driver cannot attach the device"
+ - "dpdk|EMER|Cannot create lock on"
+ - "device not found"
+
+ realert:
+ minutes: 0
+
+ alert: post
+ http_post_url: "http://10.10.120.211:31000/alerts"
+ http_post_static_payload:
+ type: pattern-match
+ label: failed
+ http_post_payload:
+ index: _index
+ log: msg
+ log_path: log_path
+ reason: alert
+ num_hits: num_hits
+ num_matches: num_matches
diff --git a/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml b/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml
new file mode 100644
index 00000000..9e32e2b7
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elastalert/elastalert.yaml
@@ -0,0 +1,76 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: elastalert
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ run: elastalert
+ template:
+ metadata:
+ labels:
+ run: elastalert
+ spec:
+ volumes:
+ - name: econfig
+ configMap:
+ name: elastalert-config
+ items:
+ - key: elastalert.yaml
+ path: elastalert.yaml
+ - name: erule
+ configMap:
+ name: elastalert-rule
+ items:
+ - key: rule-node4-vswitch.yaml
+ path: rule-node4-vswitch.yaml
+ - key: rule-node4-blacklist.yaml
+ path: rule-node4-blacklist.yaml
+ - key: rule-node1-blacklist.yaml
+ path: rule-node1-blacklist.yaml
+ - name: ekey
+ configMap:
+ name: elastalert-key
+ items:
+ - key: elastalert.key
+ path: elastalert.key
+ - key: elastalert.pem
+ path: elastalert.pem
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-es-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: elastalert
+ image: adi0509/elastalert:latest
+ env:
+ - name: ES_USERNAME
+ value: "elastic"
+ - name: ES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: logging-es-elastic-user
+ key: elastic
+ command: [ "sh", "-c"]
+ args: ["elastalert-create-index --config /opt/elastalert/elastalert.yaml; python -m elastalert.elastalert --config /opt/elastalert/elastalert.yaml"]
+ volumeMounts:
+ - mountPath: /opt/elastalert/
+ name: econfig
+ - mountPath: /opt/elastalert/rules/
+ name: erule
+ - mountPath: /opt/elastalert/key
+ name: ekey
diff --git a/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml b/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml
new file mode 100644
index 00000000..5b0a8476
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elasticsearch/elasticsearch.yaml
@@ -0,0 +1,231 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: elasticsearch.k8s.elastic.co/v1
+kind: Elasticsearch
+metadata:
+ name: logging
+spec:
+ version: 7.8.0
+ http:
+ service:
+ spec:
+ type: NodePort
+ ports:
+ - name: https
+ nodePort: 31111
+ port: 9200
+ protocol: TCP
+ targetPort: 9200
+ auth:
+ fileRealm:
+ - secretName: custom-user
+ nodeSets:
+ - name: vm1-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm1
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm1-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm1
+ - name: vm1-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm1
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm1-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm1
+ - name: vm2-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm2
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm2-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ - name: vm2-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm2
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm2-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ - name: vm3-master
+ count: 1
+ config:
+ node.master: true
+ node.data: false
+ node.attr.zone: vm3
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm3-master
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
+ - name: vm3-data
+ count: 1
+ config:
+ node.master: false
+ node.data: true
+ node.attr.zone: vm3
+ cluster.routing.allocation.awareness.attributes: zone
+ volumeClaimTemplates:
+ - metadata:
+ name: elasticsearch-data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: log-vm3-data
+ podTemplate:
+ spec:
+ initContainers:
+ - name: sysctl
+ securityContext:
+ privileged: true
+ command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
diff --git a/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml b/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml
new file mode 100644
index 00000000..3e71fe92
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/elasticsearch/user-secret.yaml
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+kind: Secret
+apiVersion: v1
+metadata:
+ name: custom-user
+stringData:
+ users: |-
+ elasticsearch:$2a$10$DzOu7/.Vo2FBDYworbUZe.LNL9tCUl18kpVZ6C/mvkKcXRzYrpmJu
+ users_roles: |-
+ kibana_admin:elasticsearch
+ superuser:elasticsearch
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml
new file mode 100644
index 00000000..36ff80d6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-cm.yaml
@@ -0,0 +1,525 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: fluentd-config
+data:
+ index_template.json: |
+ {
+ "index_patterns": [
+ "node*"
+ ],
+ "settings": {
+ "index.lifecycle.name": "delete_policy",
+ "number_of_replicas": 1
+ }
+ }
+ fluent.conf: |
+ <source>
+ @type forward
+ port 24224
+ bind 0.0.0.0
+ tag log
+ </source>
+
+ #tag the .dat file
+ <match log>
+ @type rewrite_tag_filter
+ #Trex data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*counts.dat/
+ tag countdat.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*errors.dat/
+ tag errordat.${tag}
+ </rule>
+ #Spirent data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/stc-liveresults.dat.tx/
+ tag stcdattx.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/stc-liveresults.dat.rx/
+ tag stcdatrx.${tag}
+ </rule>
+ #Ixia data
+ <rule>
+ key log_path
+ pattern /\/tmp\/result.*\/.*Statistics.csv/
+ tag ixia.${tag}
+ </rule>
+ #log files
+ <rule>
+ key log_path
+ pattern /vsperf-overall/
+ tag vsperf.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /vswitchd/
+ tag vswitchd.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/userspace/
+ tag userspace.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/sriovdp/
+ tag sriovdp.${tag}
+ </rule>
+ <rule>
+ key log_path
+ pattern /\/var\/log\/pods/
+ tag pods.${tag}
+ </rule>
+ </match>
+
+ #to find error
+ @include error.conf
+
+ #to parse time-series data
+ @include time-series.conf
+
+ #to calculate time analysis
+ @include time-analysis.conf
+
+ #give tag 'node1' if host is worker and tag 'node4' if host is pod12-node4
+ <match **.log>
+ @type rewrite_tag_filter
+ <rule>
+ key host
+ pattern /pod12-node4/
+ tag node4
+ </rule>
+ <rule>
+ key host
+ pattern /worker/
+ tag node1
+ </rule>
+ </match>
+
+
+ <filter node1>
+ @type elasticsearch_genid
+ hash_id_key _hash1
+ </filter>
+
+ #send the node1 log to node1 index in elasticsearch
+ <match node1>
+ @type copy
+ <store>
+ @type elasticsearch
+ host logging-es-http
+ port 9200
+ scheme https
+ ssl_verify false
+ user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
+ password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
+ logstash_format true
+ logstash_prefix node1
+ logstash_dateformat %Y%m%d
+ flush_interval 1s
+ id_key _hash1
+ remove_keys _hash1
+
+ enable_ilm true
+ application_name ${tag}
+ index_date_pattern ""
+ ilm_policy_id delete_policy
+ template_name delpol-test
+ template_file /fluentd/etc/index_template.json
+ ilm_policy {
+ "policy": {
+ "phases": {
+ "delete": {
+ "min_age": "3m",
+ "actions": {
+ "delete": {}
+ }
+ }
+ }
+ }
+ }
+ </store>
+ <store>
+ @type stdout
+ </store>
+ </match>
+
+ <filter node4>
+ @type elasticsearch_genid
+ hash_id_key _hash4
+ </filter>
+
+ #send the node4 log to node4 index in elasticsearch
+ <match node4>
+ @type copy
+ <store>
+ @type elasticsearch
+ host logging-es-http
+ port 9200
+ scheme https
+ ssl_verify false
+ user "#{ENV['FLUENT_ELASTICSEARCH_USER']}"
+ password "#{ENV['FLUENT_ELASTICSEARCH_PASSWORD']}"
+ logstash_format true
+ logstash_prefix node4
+ logstash_dateformat %Y%m%d
+ flush_interval 1s
+ id_key _hash4
+ remove_keys _hash4
+
+ enable_ilm true
+ application_name ${tag}
+ index_date_pattern ""
+ ilm_policy_id delete_policy
+ template_name delpol-test
+ template_file /fluentd/etc/index_template.json
+ ilm_policy {
+ "policy": {
+ "phases": {
+ "delete": {
+ "min_age": "3m",
+ "actions": {
+ "delete": {}
+ }
+ }
+ }
+ }
+ }
+ </store>
+ <store>
+ @type stdout
+ </store>
+ </match>
+ error.conf: |
+ <filter vsperf.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}).*(?<alert>Failed to run test|Failed to execute in '30' seconds|\('Result', 'Failed'\)|could not open socket: connection refused|Input\/output error)/
+ </parse>
+ </filter>
+
+ <filter vswitchd.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z).*(?<alert>dpdk\|ERR\|EAL: Error - exiting with code: 1|Failed to execute in '30' seconds|dpdk\|ERR\|EAL: Driver cannot attach the device|dpdk\|EMER\|Cannot create lock on)/
+ </parse>
+ </filter>
+ <filter vswitchd.log>
+ @type parser
+ reserve_data true
+ key_name msg
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /(?<alert_time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z).*dpdk\|ERR\|VHOST_CONFIG:.*(?<alert>device not found)/
+ </parse>
+ </filter>
+ time-series.conf: |
+ #parse *counts.dat
+ <filter countdat.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<rx_port>\d*),(?<tx_port>\d*),(?<rx_pkts>[\.\d]*),(?<tx_pkts>[\.\d]*),(?<rx_pps>[\.\d]*),(?<tx_pps>[\.\d]*),(?<rx_bps_num>[\.\d]*),(?<rx_bps_den>[\.\d]*),(?<tx_bps_num>[\.\d]*),(?<tx_bps_den>[\.\d]*)$/
+ types rx_port:integer,tx_port:integer,rx_pkts:float,tx_pkts:float,rx_pps:float,tx_pps:float,rx_bps_num:float,rx_bps_den:float,tx_bps_num:float,tx_bps_den:float
+ </parse>
+ </filter>
+
+ #parse *errors.dat
+ <filter errordat.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<dropped>[\.\d]*),(?<ooo>[\.\d]*),(?<dup>[\.\d]*),(?<seq_too_high>[\.\d]*),(?<seq_too_low>[\.\d]*)$/
+ types ts:integer,dropped:integer,ooo:integer,dup:integer,seq_too_high:integer,seq_too_low:integer
+ </parse>
+ </filter>
+
+ #parse stc-liveresults.dat.tx
+ <filter stcdattx.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(?<StrId>[\.\d]*),(?<BlkId>[\.\d]*),(?<FrCnt>[\.\d]*),(?<FrRate>[\.\d]*),(?<ERxFrCnt>[\.\d]*),(?<OctCnt>[\.\d]*),(?<OctRate>[\.\d]*),(?<bitCnt>[\.\d]*),(?<bitRate>[\.\d]*)$/
+ types ts:integer,StrId:integer,BlkId:integer,FrCnt:integer,FrRate:integer,ERxFrCnt:integer,OctCnt:integer,OctRate:integer,bitCnt:integer,bitRate:integer
+ </parse>
+ </filter>
+
+ #parse stc-liveresults.dat.rx
+ <filter stcdatrx.log>
+ @type parser
+ key_name msg
+ reserve_data true
+ emit_invalid_record_to_error false
+ <parse>
+ @type regexp
+ expression /^(?<ts>[\.\d]*),(.*, |)(?<RxPrt>.*),(?<DrpFrCnt>[\.\d]*),(?<SeqRnLen>[\.\d]*),(?<AvgLat>.*),(?<DrpFrRate>[\.\d]*),(?<FrCnt>[\.\d]*),(?<FrRate>[\.\d]*),(?<MaxLat>[\.\d]*),(?<MinLat>[\.\d]*),(?<OctCnt>[\.\d]*),(?<OctRate>[\.\d]*)$/
+ types ts:integer,DrpFrCnt:integer,SeqRnLen:integer,FrCnt:integer,FrRate:integer,MaxLat:integer,MinLat:integer,OctCnt:integer,OctRate:integer
+ </parse>
+ </filter>
+ time-analysis.conf: |
+ # 1. Test Duration - Duration Between: first line and last line.
+ # 2. Setup Duration - Duration Between: Creating result directory TO Class found ---
+ # 3. Traffic Duration - Duration between From Starting traffic at 0.1 Gbps speed TO Traffic Results
+ # 4. Iteration Durations -- Example: Duration between - Starting traffic at 10.0 Gbps TO Starting traffic at 5.0 Gbps speed
+ # 5. Reporting Duration - Duration between From Traffic Results TO Write results to file
+ # 6. Vswitchd start Duration- Duration between From Starting vswitchd... TO send_traffic with
+
+ <match vsperf.log>
+ @type rewrite_tag_filter
+ <rule>
+ key msg
+ pattern /Creating result directory:/
+ tag firstline.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Write results to file/
+ tag lastline.${tag}
+ </rule>
+
+ <rule>
+ key msg
+ pattern /Class found/
+ tag setupend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 0.1 Gbps speed/
+ tag trafficstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Traffic Results/
+ tag trafficend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 10.0 Gbps/
+ tag iterationstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting traffic at 5.0 Gbps speed/
+ tag iterationend.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /Starting vswitchd/
+ tag vswitchstart.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern /send_traffic/
+ tag vswitch.${tag}
+ </rule>
+ <rule>
+ key msg
+ pattern ^.*$
+ tag logs.${tag}
+ </rule>
+ </match>
+
+ #############################################################################################
+ #save the starting log and append that log in ending log
+ #############################################################################################
+ <filter firstline.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${$vswitch_start="";$reportstart="";$firstline="";$traffic_start="";$iteration_start="";$firstline = record["msg"];return record["msg"];}
+ </record>
+ </filter>
+ <filter lastline.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" | "+$firstline + " | "+ $reportstart}
+ </record>
+ </filter>
+
+ <filter setupend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$firstline}
+ </record>
+ </filter>
+
+ <filter trafficstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${if $traffic_start.eql?("");$traffic_start=record["msg"];end;return record["msg"];}
+ </record>
+ </filter>
+ <filter trafficend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${if $reportstart.eql?("");$reportstart=record["msg"];end;return record["msg"]+" "+$traffic_start;}
+ </record>
+ </filter>
+
+ <filter iterationstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${if $iteration_start.eql?("");$iteration_start=record["msg"];end;return record["msg"];}
+ </record>
+ </filter>
+ <filter iterationend.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$iteration_start}
+ </record>
+ </filter>
+
+ <filter vswitchstart.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ msg ${$vswitch_start=record["msg"];return record["msg"];}
+ </record>
+ </filter>
+ <filter vswitch.**>
+ @type record_transformer
+ enable_ruby true
+ <record>
+ newmsg ${record["msg"]+" "+$vswitch_start}
+ </record>
+ </filter>
+ #############################################################################################
+ #parse time from the log
+ #############################################################################################
+ <filter setupend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<setupend>.*) : Class found: Trex. (?<setupstart>.*) : .*$/
+ </parse>
+ </filter>
+ <filter iterationend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<iterationend>.*) : Starting traffic at 5.0 Gbps speed (?<iterationstart>.*) : Starting traffic at 10.0 Gbps speed$/
+ </parse>
+ </filter>
+ <filter vswitch.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<vswitch>.*) : send_traffic with <.*> (?<vswitchstart>.*) : Starting vswitchd...$/
+ </parse>
+ </filter>
+ <filter trafficend.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<trafficend>.*) : Traffic Results: (?<trafficstart>.*) : Starting traffic at 0.1 Gbps speed/
+ </parse>
+ </filter>
+ <filter lastline.**>
+ @type parser
+ key_name newmsg
+ reserve_data true
+ remove_key_name_field true
+ <parse>
+ @type regexp
+ expression /^(?<lastline>.*) : Write results to file: .* \| (?<firstline>.*) : Creating result directory: .* \| (?<reportstart>.*) : Traffic Results:$/
+ </parse>
+ </filter>
+ #############################################################################################
+ #calculate time
+ #############################################################################################
+ <filter setupend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ setup_duration ${ require 'time';Time.parse(record["setupend"])-Time.parse(record["setupstart"]); }
+ </record>
+ </filter>
+ <filter iterationend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ iteration_duration ${ require 'time';Time.parse(record["iterationend"])-Time.parse(record["iterationstart"]); }
+ </record>
+ </filter>
+ <filter vswitch.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ vswitch_duration ${ require 'time';Time.parse(record["vswitch"])-Time.parse(record["vswitchstart"]); }
+ </record>
+ </filter>
+ <filter trafficend.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ traffic_duration ${ require 'time';Time.parse(record["trafficend"])-Time.parse(record["trafficstart"]); }
+ </record>
+ </filter>
+ <filter lastline.**>
+ @type record_transformer
+ enable_ruby
+ <record>
+ test_duration ${ require 'time';Time.parse(record["lastline"])-Time.parse(record["firstline"]); }
+ </record>
+ <record>
+ report_duration ${ require 'time';Time.parse(record["lastline"])-Time.parse(record["reportstart"]); }
+ </record>
+ </filter>
+ #############################################################################################
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml
new file mode 100644
index 00000000..9a43b82f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent-service.yaml
@@ -0,0 +1,34 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Service
+metadata:
+ name: fluentd
+ labels:
+ run: fluentd
+spec:
+ type: NodePort
+ ports:
+ - name: tcp
+ port: 32224
+ targetPort: 24224
+ protocol: TCP
+ nodePort: 32224
+ - name: udp
+ port: 32224
+ targetPort: 24224
+ protocol: UDP
+ nodePort: 32224
+ selector:
+ run: fluentd
diff --git a/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml
new file mode 100644
index 00000000..3830f682
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/fluentd/fluent.yaml
@@ -0,0 +1,65 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: fluentd
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ run: fluentd
+ template:
+ metadata:
+ labels:
+ run: fluentd
+ spec:
+ volumes:
+ - name: fconfig
+ configMap:
+ name: fluentd-config
+ items:
+ - key: fluent.conf
+ path: fluent.conf
+ - key: error.conf
+ path: error.conf
+ - key: time-series.conf
+ path: time-series.conf
+ - key: time-analysis.conf
+ path: time-analysis.conf
+ - key: index_template.json
+ path: index_template.json
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-es-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: fluentd
+ image: adi0509/fluentd:latest
+ env:
+ - name: FLUENT_ELASTICSEARCH_USER
+ value: "elastic"
+ - name: FLUENT_ELASTICSEARCH_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: logging-es-elastic-user
+ key: elastic
+ ports:
+ - containerPort: 24224
+ protocol: TCP
+ - containerPort: 24224
+ protocol: UDP
+ volumeMounts:
+ - name: fconfig
+ mountPath: /fluentd/etc/
diff --git a/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml b/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml
new file mode 100644
index 00000000..5ec6937e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/kibana/kibana.yaml
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: kibana.k8s.elastic.co/v1
+kind: Kibana
+metadata:
+ name: logging
+spec:
+ version: 7.8.0
+ count: 1
+ elasticsearchRef:
+ name: logging
+ namespace: logging
diff --git a/tools/lma/ansible-server/roles/logging/files/namespace.yaml b/tools/lma/ansible-server/roles/logging/files/namespace.yaml
new file mode 100644
index 00000000..6964af5c
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/namespace.yaml
@@ -0,0 +1,17 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: logging
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml
new file mode 100644
index 00000000..f5a11e80
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-conf-cm.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nginx-config
+data:
+ default.conf: |
+ server {
+ listen 80;
+ ssl on;
+ ssl_certificate /etc/ssl/certs/kibana-access.pem;
+ ssl_certificate_key /etc/ssl/private/kibana-access.key;
+
+ location / {
+ proxy_pass https://logging-kb-http:5601;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ proxy_read_timeout 300s;
+ proxy_connect_timeout 75s;
+ }
+ }
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml
new file mode 100644
index 00000000..93d7d6ec
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-key-cm.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: nginx-key
+data:
+ kibana-access.key: |
+ -----BEGIN PRIVATE KEY-----
+ MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDI92SBrcEdzxqS
+ rt883bVcj4F4RuKtm+AjjOEhbFUd3XOp5Wa5NzyYQSTP9ZJHG0dYiTAKOJBVcgbc
+ YRMNUAzHAIskf1q2/SvwyLNRMJLkBD5HHjbnEcuKQm/+nPdzkfvo2zfRNTDPKS83
+ HqFQ779hT8ZLkSzoPuR0QD17ZNWYVMZv/r9wqnjX8U/k5AjrJOIxuaO9nGAgv2Pu
+ Qm6wuU8UBEaMRgPVHQ3ztflQr9QPr/S6HU0cl4Gu+Nwid6iC1RVYxANNq7E7wRvq
+ GMKRS5cA9Nlnu/b7IEI4LSx5yeTSDzwmZKTNnUWi2cpqk30M4G4cUokoz9bP+62I
+ YWEh3B8HAgMBAAECggEBAI1luzqepTSzBhBUp88sczGX6tFUlqLt/Ism0TPyBAVK
+ TdopBNima6T4mM0VDIGpSM6bX8ihObRU0Uz3pC8GtqbB1CSu0oXTpbn5jGlAkumJ
+ rsPdF2YHGD3ENwZfLKANA8A3lZNGKHxpjsXqcDgBJ5dxSKTclUsnDRhaJqgOL1bI
+ d9QCXdA1vbpxHDJWSo73E7omv3AyHi3HxMWU4gzyerUFSMFGqm0W5dPeeresNE3a
+ bv9/46YdykufuRuJZqsUDLCgUUcJPhbE5iOrB4iv8oaDqT0onxwzRQTSgidPxbp2
+ EmjVHpFCACltOKSqELM4+PQFCk8xUBya8HWD5UHrVDkCgYEA4y3WwmhtLUT/g3G3
+ cowvmxjgPl6xqkqTA7Xcdc3sk+6/jS1kayT5TL1qfpd1QL/K617jva9mfSMZ8ei9
+ Y7M/2QkSb0uHKulGR0+if+7sT0L8OYO/OE7c+HTZmZK4hD1CCJN2M34D9Qo2fzQ6
+ 4v+AO1wGiAtiNev0YIBKYNSco+sCgYEA4nY8m93XuC19z991sFRvE0UBeKcN2esg
+ TwY9UuYHJ56s+6UozkUgZArwYFW8LWFeIjkrrKELBNDsmJtTZ006TyUWxY/ccdjV
+ fJZTLV3niv6IQzy74aOmXV2vtNjxyBlllT9mvig6T0t0TvAtolsuSVHBL09zxcy4
+ wN4pGIfqllUCgYBYLq/hMKXIX7MK87YwqYfFHWfV7e3q2x2r4AjeVXuShKcoBsmm
+ 6Wg3yIKw9tuVsZzzthaSx6XxxxFIHH5/V9Hdzi6wstGZ74jPH3NFU5m4vpinPqOY
+ GMyfSMQ6X4BuHFUofQzxueWRVVCIGd8Nw/2jjPogDsMliRyH5OR6J61R1wKBgEa6
+ 8SEpf7fJlZL4UzS4mlylX9lEK+JVOqkT5NFggPmR6KtMIVuTYZN9iyg7fuOZlqIP
+ wyFOxzdA3bSoRrtr9ntDtUINNaflNoCMHvx7aNcTupFthazqxQpCOZ+9Zn691+lu
+ fPOFcvjTM0d4YnhkDCfgPfs90IYF8+phOOqtgMplAoGBAI+mcaUH7ADYxlONCi1E
+ gNHRvHJRBdQBaydKUfPxbe3vS5QJb8Gb5RU46vDl3w+YHUVwUi+Hj68zuKExXxhD
+ 9CGTAQIejtHWScZ1Djl3bcvNa/czHyuNVsGwvJ3fy1JzpxRmUUMPSdJ90A1n57Tk
+ LFEmZhwaj7YF869wfKngQ57d
+ -----END PRIVATE KEY-----
+ kibana-access.pem: |
+ -----BEGIN CERTIFICATE-----
+ MIIDVzCCAj+gAwIBAgIJAIQzf1mxHsvgMA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV
+ BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
+ Q29tcGFueSBMdGQwHhcNMjAwNjI1MTY1NzQ3WhcNMjEwNjI1MTY1NzQ3WjBCMQsw
+ CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh
+ dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ yPdkga3BHc8akq7fPN21XI+BeEbirZvgI4zhIWxVHd1zqeVmuTc8mEEkz/WSRxtH
+ WIkwCjiQVXIG3GETDVAMxwCLJH9atv0r8MizUTCS5AQ+Rx425xHLikJv/pz3c5H7
+ 6Ns30TUwzykvNx6hUO+/YU/GS5Es6D7kdEA9e2TVmFTGb/6/cKp41/FP5OQI6yTi
+ MbmjvZxgIL9j7kJusLlPFARGjEYD1R0N87X5UK/UD6/0uh1NHJeBrvjcIneogtUV
+ WMQDTauxO8Eb6hjCkUuXAPTZZ7v2+yBCOC0secnk0g88JmSkzZ1FotnKapN9DOBu
+ HFKJKM/Wz/utiGFhIdwfBwIDAQABo1AwTjAdBgNVHQ4EFgQUrz/R+M2XkTTfjrau
+ VVBW6+pdatgwHwYDVR0jBBgwFoAUrz/R+M2XkTTfjrauVVBW6+pdatgwDAYDVR0T
+ BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAyIhJLwg9oTil0Rb1zbYQb0Mr0UYz
+ rlS4f8QkxygkGLAZ8q9VkR+NpKfqhYDSHofGg5Yg5/p54NRJh5M4ASuM7N9AK0LH
+ KbCvS+YRNWhmo+7H7zjDNkV8FbzG41nkt9jQjaKFF7GdKr4HkWvupMX6PwsAZ0jI
+ b2Y6QzFQP9wF0QoBHrK42u3eWbfYv2IIDd6xsV90ilKRDtKkCiI4dyKGK46YDyZB
+ 3eqJ08Pm67HDbxQLydRXkNJvd33PASRgE/VOh44n3xWG+Gu4IMz7EO/4monyuv1Q
+ V2v1A9NV+ZnAq4PT7WJY7fWYavDUr+kwxMAGNQkG/Cg3X4FYrRwrq6gk7Q==
+ -----END CERTIFICATE-----
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml
new file mode 100644
index 00000000..8aea53dd
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx-service.yaml
@@ -0,0 +1,28 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Service
+metadata:
+ name: nginx
+ labels:
+ run: nginx
+spec:
+ type: NodePort
+ ports:
+ - port: 8000
+ targetPort: 80
+ protocol: TCP
+ nodePort: 32000
+ selector:
+ run: nginx
diff --git a/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml b/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml
new file mode 100644
index 00000000..fdf5c835
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/nginx/nginx.yaml
@@ -0,0 +1,58 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ run: nginx
+ template:
+ metadata:
+ labels:
+ run: nginx
+ spec:
+ volumes:
+ - name: nconfig
+ configMap:
+ name: nginx-config
+ items:
+ - key: default.conf
+ path: default.conf
+ - name: nkey
+ configMap:
+ name: nginx-key
+ items:
+ - key: kibana-access.key
+ path: kibana-access.key
+ - key: kibana-access.pem
+ path: kibana-access.pem
+ initContainers:
+ - name: init-myservice
+ image: busybox:1.28
+ command: ['sh', '-c', 'until nslookup logging-kb-http; do echo "waiting for myservice"; sleep 2; done;']
+ containers:
+ - name: nginx
+ image: nginx
+ volumeMounts:
+ - mountPath: /etc/nginx/conf.d/
+ name: nconfig
+ - mountPath: /etc/ssl/certs/
+ name: nkey
+ - mountPath: /etc/ssl/private/
+ name: nkey
+ ports:
+ - containerPort: 80
diff --git a/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml b/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml
new file mode 100644
index 00000000..c1a96077
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/persistentVolume.yaml
@@ -0,0 +1,105 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm1
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm1-master
+ nfs:
+ server: 10.10.120.211
+ path: "/srv/nfs/master"
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm1
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm1-data
+ nfs:
+ server: 10.10.120.211
+ path: "/srv/nfs/data"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm2
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm2-master
+ nfs:
+ server: 10.10.120.203
+ path: "/srv/nfs/master"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm2
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm2-data
+ nfs:
+ server: 10.10.120.203
+ path: "/srv/nfs/data"
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-master-vm3
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm3-master
+ nfs:
+ server: 10.10.120.204
+ path: "/srv/nfs/master"
+
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: pv-data-vm3
+spec:
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: log-vm3-data
+ nfs:
+ server: 10.10.120.204
+ path: "/srv/nfs/data"
diff --git a/tools/lma/ansible-server/roles/logging/files/storageClass.yaml b/tools/lma/ansible-server/roles/logging/files/storageClass.yaml
new file mode 100644
index 00000000..a2f1e3aa
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/files/storageClass.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#storage class for VM1 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm1-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM1 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm1-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM2 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm2-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM2 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm2-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM3 master
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm3-master
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
+---
+#storage class for VM3 data
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: log-vm3-data
+reclaimPolicy: Retain
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: Immediate
+allowVolumeExpansion: true
diff --git a/tools/lma/ansible-server/roles/logging/tasks/main.yml b/tools/lma/ansible-server/roles/logging/tasks/main.yml
new file mode 100644
index 00000000..dcbf4d4d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/logging/tasks/main.yml
@@ -0,0 +1,165 @@
+# Copyright 2020 Adarsh yadav
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#EFK setup in k8s cluster
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#Creating Namespace
+#***********************************************************************************************************
+- name: Creating Namespace
+ k8s:
+ state: present
+ src: /tmp/files/namespace.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#creating Storage Class
+#***********************************************************************************************************
+- name: creating Storage Class
+ k8s:
+ state: present
+ src: /tmp/files/storageClass.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume
+ k8s:
+ state: present
+ src: /tmp/files/persistentVolume.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#add user
+#***********************************************************************************************************
+- name: add user
+ k8s:
+ state: present
+ src: /tmp/files/elasticsearch/user-secret.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting Elasticsearch operator
+#***********************************************************************************************************
+- name: Starting Elasticsearch operator
+ shell: kubectl apply -f https://download.elastic.co/downloads/eck/1.2.0/all-in-one.yaml
+ ignore_errors: yes
+
+#***********************************************************************************************************
+#Starting Elasticsearch
+#***********************************************************************************************************
+- name: Starting Elasticsearch
+ k8s:
+ state: present
+ src: /tmp/files/elasticsearch/elasticsearch.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting Kibana
+#***********************************************************************************************************
+- name: Starting Kibana
+ k8s:
+ state: present
+ src: /tmp/files/kibana/kibana.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#Starting nginx
+#***********************************************************************************************************
+- name: creating nginx configmap
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-conf-cm.yaml
+ namespace: logging
+
+- name: creating nginx key configmap
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-key-cm.yaml
+ namespace: logging
+
+- name: creating nginx pod
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx.yaml
+ namespace: logging
+
+- name: creating nginx service
+ k8s:
+ state: present
+ src: /tmp/files/nginx/nginx-service.yaml
+ namespace: logging
+#***********************************************************************************************************
+#Starting fluentd
+#***********************************************************************************************************
+- name: creating fluentd configmap
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent-cm.yaml
+ namespace: logging
+
+- name: creating fluentd pod
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent.yaml
+ namespace: logging
+
+- name: creating fluentd service
+ k8s:
+ state: present
+ src: /tmp/files/fluentd/fluent-service.yaml
+ namespace: logging
+#***********************************************************************************************************
+#Starting elastalert
+#***********************************************************************************************************
+- name: creating elastalert config configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-conf-cm.yaml
+ namespace: logging
+
+- name: creating elastalert key configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-key-cm.yaml
+ namespace: logging
+
+- name: creating elastalert rule configmap
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/ealert-rule-cm.yaml
+ namespace: logging
+
+- name: creating elastalert pod
+ k8s:
+ state: present
+ src: /tmp/files/elastalert/elastalert.yaml
+ namespace: logging
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml
new file mode 100644
index 00000000..7b9abc47
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-config.yaml
@@ -0,0 +1,37 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: alertmanager-config
+ namespace: monitoring
+data:
+ config.yml: |-
+ global:
+ route:
+ receiver: "webhook"
+ group_by: ['alertname', 'priority']
+ group_wait: 1s
+ group_interval: 5s
+ repeat_interval: 5s
+ routes:
+ - match:
+ severity: critical
+
+ receivers:
+ - name: "webhook"
+ webhook_configs:
+ - url: 'http://10.10.120.20/alertmanager'
+ send_resolved: true
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml
new file mode 100644
index 00000000..f1c3d78e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-deployment.yaml
@@ -0,0 +1,62 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ name: alertmanager
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: alertmanager
+ labels:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ spec:
+ containers:
+ - name: alertmanager
+ image: prom/alertmanager
+ args:
+ - --config.file=/etc/alertmanager/config.yml
+ - --storage.path=/alertmanager
+ - --cluster.peer=alertmanager1:6783
+ - --cluster.listen-address=0.0.0.0:6783
+ ports:
+ - containerPort: 9093
+ - containerPort: 6783
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/alertmanager
+ - name: alertmanager
+ mountPath: /alertmanager
+ restartPolicy: Always
+ volumes:
+ - name: config-volume
+ configMap:
+ name: alertmanager-config
+ - name: alertmanager
+ emptyDir: {}
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml
new file mode 100644
index 00000000..c67517d3
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager-service.yaml
@@ -0,0 +1,41 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: alertmanager
+ app: alertmanager
+ name: alertmanager
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: /
+ prometheus.io/port: '8080'
+
+spec:
+ selector:
+ app: alertmanager
+ adi10hero.monitoring: alertmanager
+ type: NodePort
+ ports:
+ - name: "9093"
+ port: 9093
+ targetPort: 9093
+ nodePort: 30930
+ - name: "6783"
+ port: 6783
+ targetPort: 6783
+ nodePort: 30679
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml
new file mode 100644
index 00000000..18b76456
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-deployment.yaml
@@ -0,0 +1,62 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ name: alertmanager1
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: alertmanager1
+ labels:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ spec:
+ containers:
+ - name: alertmanager1
+ image: prom/alertmanager
+ args:
+ - --config.file=/etc/alertmanager/config.yml
+ - --storage.path=/alertmanager
+ - --cluster.peer=alertmanager:6783
+ - --cluster.listen-address=0.0.0.0:6783
+ ports:
+ - containerPort: 9093
+ - containerPort: 6783
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/alertmanager
+ - name: alertmanager
+ mountPath: /alertmanager
+ restartPolicy: Always
+ volumes:
+ - name: config-volume
+ configMap:
+ name: alertmanager-config
+ - name: alertmanager
+ emptyDir: {}
diff --git a/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml
new file mode 100644
index 00000000..66d0d2b1
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/alertmanager/alertmanager1-service.yaml
@@ -0,0 +1,42 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: alertmanager1
+ app: alertmanager1
+ name: alertmanager1
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: /
+ prometheus.io/port: '8080'
+
+spec:
+ selector:
+ app: alertmanager1
+ adi10hero.monitoring: alertmanager1
+ type: NodePort
+ ports:
+ - name: "9093"
+ port: 9093
+ targetPort: 9093
+ nodePort: 30931
+ - name: "6783"
+ port: 6783
+ targetPort: 6783
+ nodePort: 30678
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml
new file mode 100644
index 00000000..6a62985e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-deamonset.yaml
@@ -0,0 +1,79 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: cadvisor
+ namespace: monitoring
+ labels:
+ adi10hero.monitoring: cadvisor
+ app: cadvisor
+spec:
+ selector:
+ matchLabels:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
+ template:
+ metadata:
+ name: cadvisor
+ labels:
+ adi10hero.monitoring: cadvisor
+ app: cadvisor
+ spec:
+ containers:
+ - image: gcr.io/google-containers/cadvisor
+ name: cadvisor
+ ports:
+ - containerPort: 8080
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /rootfs
+ name: cadvisor-hostpath0
+ readOnly: true
+ - mountPath: /var/run
+ name: cadvisor-hostpath1
+ - mountPath: /sys
+ name: cadvisor-hostpath2
+ readOnly: true
+ - mountPath: /sys/fs/cgroup
+ name: cadvisor-hostpath3
+ readOnly: true
+ - mountPath: /dev/disk
+ name: cadvisor-hostpath4
+ readOnly: true
+ - mountPath: /var/lib/docker
+ name: cadvisor-hostpath5
+ readOnly: true
+ restartPolicy: Always
+ volumes:
+ - hostPath:
+ path: /
+ name: cadvisor-hostpath0
+ - hostPath:
+ path: /var/run
+ name: cadvisor-hostpath1
+ - hostPath:
+ path: /sys
+ name: cadvisor-hostpath2
+ - hostPath:
+ path: /cgroup
+ name: cadvisor-hostpath3
+ - hostPath:
+ path: /dev/disk/
+ name: cadvisor-hostpath4
+ - hostPath:
+ path: /var/lib/docker/
+ name: cadvisor-hostpath5
diff --git a/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml
new file mode 100644
index 00000000..734240b8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/cadvisor/cadvisor-service.yaml
@@ -0,0 +1,30 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
+ name: cadvisor
+ namespace: monitoring
+spec:
+ ports:
+ - name: "8080"
+ port: 8080
+ targetPort: 8080
+ selector:
+ app: cadvisor
+ adi10hero.monitoring: cadvisor
diff --git a/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
new file mode 100644
index 00000000..b6bfe0b6
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-deployment.yaml
@@ -0,0 +1,51 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: collectd-exporter
+ namespace: monitoring
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: collectd-exporter
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ spec:
+ containers:
+ - args:
+ - --collectd.listen-address=0.0.0.0:25826
+ image: prom/collectd-exporter
+ name: collectd-exporter
+ ports:
+ - containerPort: 9103
+ - containerPort: 25826
+ protocol: UDP
+ securityContext:
+ runAsUser: 0
+ restartPolicy: Always
+ volumes: null
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml
new file mode 100644
index 00000000..5609d04a
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/collectd-exporter/collectd-exporter-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: collectd-exporter
+ namespace: monitoring
+ labels:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+spec:
+ ports:
+ - name: "9103"
+ port: 9103
+ nodePort: 30103
+ - name: "25826"
+ port: 25826
+ protocol: UDP
+ nodePort: 30826
+ selector:
+ app: collectd-exporter
+ adi10hero.monitoring: collectd-exporter
+ type: NodePort
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml
new file mode 100644
index 00000000..e2b8c9fa
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-datasource-config.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: grafana-datasources
+ namespace: monitoring
+data:
+ prometheus.yaml: |-
+ {
+ "apiVersion": 1,
+ "datasources": [
+ {
+ "access":"proxy",
+ "editable": true,
+ "name": "prometheus",
+ "orgId": 1,
+ "type": "prometheus",
+ "url": "http://prometheus-main:9090",
+ "version": 1
+ }
+ ]
+ }
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml
new file mode 100644
index 00000000..afb00948
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-deployment.yaml
@@ -0,0 +1,68 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ name: grafana
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: grafana
+ labels:
+ adi10hero.monitoring: grafana
+ app: grafana
+ spec:
+ containers:
+ - name: grafana
+ image: grafana/grafana
+ ports:
+ - containerPort: 3000
+ env:
+ - name: GF_SECURITY_ADMIN_PASSWORD
+ value: admin
+ - name: GF_SECURITY_ADMIN_USER
+ value: admin
+ - name: GF_SERVER_DOMAIN
+ value: 10.10.120.20
+ - name: GF_SERVER_ROOT_URL
+ value: "%(protocol)s://%(domain)s:/metrics"
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/grafana
+ name: grafana-storage
+ - mountPath: /etc/grafana/provisioning/datasources
+ name: grafana-datasources
+ readOnly: false
+ restartPolicy: Always
+ volumes:
+ - name: grafana-storage
+ persistentVolumeClaim:
+ claimName: grafana-pvc
+ - name: grafana-datasources
+ configMap:
+ defaultMode: 420
+ name: grafana-datasources
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml
new file mode 100644
index 00000000..06bcc31b
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pv.yaml
@@ -0,0 +1,31 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: grafana-pv
+ namespace: monitoring
+ labels:
+ app: grafana-pv
+ adi10hero.monitoring: grafana-pv
+spec:
+ storageClassName: monitoring
+ capacity:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteMany
+ nfs:
+ server: 10.10.120.211
+ path: "/usr/share/monitoring_data/grafana"
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml
new file mode 100644
index 00000000..2c2955c8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-pvc.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: grafana-pvc
+ namespace: monitoring
+ labels:
+ app: grafana-pvc
+ adi10hero.monitoring: grafana-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: monitoring
+ resources:
+ requests:
+ storage: 4Gi
+ selector:
+ matchLabels:
+ app: grafana-pv
+ adi10hero.monitoring: grafana-pv
diff --git a/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml
new file mode 100644
index 00000000..d1c9c9cc
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/grafana/grafana-service.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: grafana
+ namespace: monitoring
+ labels:
+ app: grafana
+ adi10hero.monitoring: grafana
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '3000'
+spec:
+ selector:
+ app: grafana
+ adi10hero.monitoring: grafana
+ type: NodePort
+ ports:
+ - name: "3000"
+ port: 3000
+ targetPort: 3000
+ nodePort: 30000
+
diff --git a/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
new file mode 100644
index 00000000..af3c5469
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-deployment.yaml
@@ -0,0 +1,36 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kube-state-metrics
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ app: kube-state-metrics
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: kube-state-metrics
+ spec:
+ #serviceAccountName: prometheus
+ containers:
+ - name: kube-state-metrics
+ image: quay.io/coreos/kube-state-metrics:v1.2.0
+ ports:
+ - containerPort: 8080
+ name: monitoring
diff --git a/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
new file mode 100644
index 00000000..8d294391
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/kube-state-metrics/kube-state-metrics-service.yaml
@@ -0,0 +1,26 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: Service
+apiVersion: v1
+metadata:
+ name: kube-state-metrics
+ namespace: kube-system
+spec:
+ selector:
+ app: kube-state-metrics
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
diff --git a/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml b/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml
new file mode 100644
index 00000000..f1c9b889
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/monitoring-namespace.yaml
@@ -0,0 +1,18 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: monitoring
diff --git a/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml
new file mode 100644
index 00000000..9334b2f4
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-daemonset.yaml
@@ -0,0 +1,80 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: node-exporter-daemonset
+ namespace: monitoring
+ labels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+spec:
+ selector:
+ matchLabels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+ template:
+ metadata:
+ labels:
+ app: node-exporter
+ adi10hero.monitoring: node-exporter
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9100"
+ spec:
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+ containers:
+ - ports:
+ - containerPort: 9100
+ protocol: TCP
+ resources:
+ requests:
+ cpu: 0.15
+ securityContext:
+ runAsUser: 0
+ privileged: true
+ image: prom/node-exporter:v0.15.2
+ args:
+ - --path.procfs
+ - /host/proc
+ - --path.sysfs
+ - /host/sys
+ - --collector.filesystem.ignored-mount-points
+ - '"^/(sys|proc|dev|host|etc)($|/)"'
+ name: node-exporter
+ volumeMounts:
+ - name: dev
+ mountPath: /host/dev
+ - name: proc
+ mountPath: /host/proc
+ - name: sys
+ mountPath: /host/sys
+ - name: rootfs
+ mountPath: /rootfs
+ volumes:
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sys
+ hostPath:
+ path: /sys
+ - name: rootfs
+ hostPath:
+ path: /
diff --git a/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml
new file mode 100644
index 00000000..dd0aea4d
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/node-exporter/nodeexporter-service.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: node-exporter
+ app: node-exporter
+ name: node-exporter
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9100"
+spec:
+ ports:
+ - name: "node-exporter"
+ port: 9100
+ targetPort: 9100
+ selector:
+ adi10hero.monitoring: node-exporter
+ app: node-exporter
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml
new file mode 100644
index 00000000..58b220a8
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/main-prometheus-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus-main
+ app: prometheus-main
+ name: prometheus-main
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus-main
+ protocol: TCP
+ port: 9090
+ nodePort: 30902
+ selector:
+ adi10hero.monitoring: prometheus1
+ app: prometheus
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml
new file mode 100644
index 00000000..917f978f
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-config.yaml
@@ -0,0 +1,609 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: prometheus-config
+ namespace: monitoring
+data:
+ alert.rules: |-
+ groups:
+ - name: targets
+ rules:
+ - alert: MonitorServiceDown
+ expr: up == 0
+ for: 30s
+ labels:
+ severity: critical
+ annotations:
+ summary: "Monitor service non-operational"
+ description: "Service {{ $labels.instance }} is down."
+ - alert: HighCpuLoad
+ expr: node_load1 > 1.9
+ for: 15s
+ labels:
+ severity: critical
+ annotations:
+ summary: "Service under high load"
+ description: "Docker host is under high load, the avg load 1m is at {{ $value}}. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - name: host and hardware
+ rules:
+ - alert: HostHighCpuLoad
+ expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host high CPU load (instance {{ $labels.instance }})"
+ description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostSwapIsFillingUp
+ expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host swap is filling up (instance {{ $labels.instance }})"
+ description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HighMemoryLoad
+ expr: (sum(node_memory_MemTotal_bytes) - sum(node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes) ) / sum(node_memory_MemTotal_bytes) * 100 > 85
+ for: 30s
+ labels:
+ severity: warning
+ annotations:
+ summary: "Server memory is almost full"
+ description: "Docker host memory usage is {{ humanize $value}}%. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - alert: HighStorageLoad
+ expr: (node_filesystem_size_bytes{fstype="aufs"} - node_filesystem_free_bytes{fstype="aufs"}) / node_filesystem_size_bytes{fstype="aufs"} * 100 > 85
+ for: 30s
+ labels:
+ severity: warning
+ annotations:
+ summary: "Server storage is almost full"
+ description: "Docker host storage usage is {{ humanize $value}}%. Reported by instance {{ $labels.instance }} of job {{ $labels.job }}."
+
+ - alert: HostNetworkTransmitErrors
+ expr: increase(node_network_transmit_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Transmit Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOutOfMemory
+ expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host out of memory (instance {{ $labels.instance }})"
+ description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostMemoryUnderMemoryPressure
+ expr: rate(node_vmstat_pgmajfault[1m]) > 1000
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host memory under memory pressure (instance {{ $labels.instance }})"
+ description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputIn
+ expr: sum by (instance) (irate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual network throughput in (instance {{ $labels.instance }})"
+ description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualNetworkThroughputOut
+ expr: sum by (instance) (irate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual network throughput out (instance {{ $labels.instance }})"
+ description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualDiskRateRead
+ expr: sum by (instance) (irate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual disk read rate (instance {{ $labels.instance }})"
+ description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostUnusualDiskRateWrite
+ expr: sum by (instance) (irate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host unusual disk write rate (instance {{ $labels.instance }})"
+ description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOutOfDiskSpace
+ expr: (node_filesystem_avail_bytes{mountpoint="/rootfs"} * 100) / node_filesystem_size_bytes{mountpoint="/rootfs"} < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host out of disk space (instance {{ $labels.instance }})"
+ description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostDiskWillFillIn4Hours
+ expr: predict_linear(node_filesystem_free_bytes{fstype!~"tmpfs"}[1h], 4 * 3600) < 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host disk will fill in 4 hours (instance {{ $labels.instance }})"
+ description: "Disk will fill in 4 hours at current write rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostPhysicalComponentTooHot
+ expr: node_hwmon_temp_celsius > 75
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host physical component too hot (instance {{ $labels.instance }})"
+ description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNodeOvertemperatureAlarm
+ expr: node_hwmon_temp_alarm == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Host node overtemperature alarm (instance {{ $labels.instance }})"
+ description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostKernelVersionDeviations
+ expr: count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host kernel version deviations (instance {{ $labels.instance }})"
+ description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostOomKillDetected
+ expr: increase(node_vmstat_oom_kill[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host OOM kill detected (instance {{ $labels.instance }})"
+ description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostEdacCorrectableErrorsDetected
+ expr: increase(node_edac_correctable_errors_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: info
+ annotations:
+ summary: "Host EDAC Correctable Errors detected (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostEdacUncorrectableErrorsDetected
+ expr: node_edac_uncorrectable_errors_total > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNetworkReceiveErrors
+ expr: increase(node_network_receive_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Receive Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: HostNetworkTransmitErrors
+ expr: increase(node_network_transmit_errs_total[5m]) > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Host Network Transmit Errors (instance {{ $labels.instance }})"
+ description: "{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - name: container
+ rules:
+ - alert: ContainerKilled
+ expr: time() - container_last_seen > 60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container killed (instance {{ $labels.instance }})"
+ description: "A container has disappeared\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerCpuUsage
+ expr: sum by(instance, name) (rate(container_cpu_usage_seconds_total[3m]) * 100 > 80)
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container CPU usage (instance {{ $labels.instance }})"
+ description: "Container CPU usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerMemoryUsage
+ expr: (sum(container_memory_usage_bytes) BY (instance, name) / sum(container_spec_memory_limit_bytes > 0) BY (instance, name) * 100) > 125
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Memory usage (instance {{ $labels.instance }})"
+ description: "Container Memory usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerVolumeUsage
+ expr: (1 - (sum(container_fs_inodes_free) BY (instance) / sum(container_fs_inodes_total) BY (instance)) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Volume usage (instance {{ $labels.instance }})"
+ description: "Container Volume usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerVolumeIoUsage
+ expr: (sum(container_fs_io_current) BY (instance, name) * 100) > 80
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container Volume IO usage (instance {{ $labels.instance }})"
+ description: "Container Volume IO usage is above 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: ContainerHighThrottleRate
+ expr: rate(container_cpu_cfs_throttled_seconds_total[3m]) > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Container high throttle rate (instance {{ $labels.instance }})"
+ description: "Container is being throttled\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - name: kubernetes
+ rules:
+ - alert: KubernetesNodeReady
+ expr: kube_node_status_condition{condition="Ready",status="true"} == 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Node ready (instance {{ $labels.instance }})"
+ description: "Node {{ $labels.node }} has been unready for a long time\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesMemoryPressure
+ expr: kube_node_status_condition{condition="MemoryPressure",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes memory pressure (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDiskPressure
+ expr: kube_node_status_condition{condition="DiskPressure",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes disk pressure (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesOutOfDisk
+ expr: kube_node_status_condition{condition="OutOfDisk",status="true"} == 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes out of disk (instance {{ $labels.instance }})"
+ description: "{{ $labels.node }} has OutOfDisk condition\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesJobFailed
+ expr: kube_job_status_failed > 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Job failed (instance {{ $labels.instance }})"
+ description: "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesCronjobSuspended
+ expr: kube_cronjob_spec_suspend != 0
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes CronJob suspended (instance {{ $labels.instance }})"
+ description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is suspended\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPersistentvolumeclaimPending
+ expr: kube_persistentvolumeclaim_status_phase{phase="Pending"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes PersistentVolumeClaim pending (instance {{ $labels.instance }})"
+ description: "PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesVolumeOutOfDiskSpace
+ expr: kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes * 100 < 10
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Volume out of disk space (instance {{ $labels.instance }})"
+ description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesVolumeFullInFourDays
+ expr: predict_linear(kubelet_volume_stats_available_bytes[6h], 4 * 24 * 3600) < 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Volume full in four days (instance {{ $labels.instance }})"
+ description: "{{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPersistentvolumeError
+ expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes PersistentVolume error (instance {{ $labels.instance }})"
+ description: "Persistent volume is in bad state\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetDown
+ expr: (kube_statefulset_status_replicas_ready / kube_statefulset_status_replicas_current) != 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet down (instance {{ $labels.instance }})"
+ description: "A StatefulSet went down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaScalingAbility
+ expr: kube_hpa_status_condition{condition="false", status="AbleToScale"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA scaling ability (instance {{ $labels.instance }})"
+ description: "Pod is unable to scale\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaMetricAvailability
+ expr: kube_hpa_status_condition{condition="false", status="ScalingActive"} == 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA metric availability (instance {{ $labels.instance }})"
+ description: "HPA is not able to colelct metrics\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesHpaScaleCapability
+ expr: kube_hpa_status_desired_replicas >= kube_hpa_spec_max_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes HPA scale capability (instance {{ $labels.instance }})"
+ description: "The maximum number of desired Pods has been hit\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPodNotHealthy
+ expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[1h:]) > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Pod not healthy (instance {{ $labels.instance }})"
+ description: "Pod has been in a non-ready state for longer than an hour.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesPodCrashLooping
+ expr: rate(kube_pod_container_status_restarts_total[15m]) * 60 * 5 > 5
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes pod crash looping (instance {{ $labels.instance }})"
+ description: "Pod {{ $labels.pod }} is crash looping\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesReplicassetMismatch
+ expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes ReplicasSet mismatch (instance {{ $labels.instance }})"
+ description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDeploymentReplicasMismatch
+ expr: kube_deployment_spec_replicas != kube_deployment_status_replicas_available
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes Deployment replicas mismatch (instance {{ $labels.instance }})"
+ description: "Deployment Replicas mismatch\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetReplicasMismatch
+ expr: kube_statefulset_status_replicas_ready != kube_statefulset_status_replicas
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes StatefulSet replicas mismatch (instance {{ $labels.instance }})"
+ description: "A StatefulSet has not matched the expected number of replicas for longer than 15 minutes.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDeploymentGenerationMismatch
+ expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes Deployment generation mismatch (instance {{ $labels.instance }})"
+ description: "A Deployment has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetGenerationMismatch
+ expr: kube_statefulset_status_observed_generation != kube_statefulset_metadata_generation
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet generation mismatch (instance {{ $labels.instance }})"
+ description: "A StatefulSet has failed but has not been rolled back.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesStatefulsetUpdateNotRolledOut
+ expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated)
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes StatefulSet update not rolled out (instance {{ $labels.instance }})"
+ description: "StatefulSet update has not been rolled out.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDaemonsetRolloutStuck
+ expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled * 100 < 100 or kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes DaemonSet rollout stuck (instance {{ $labels.instance }})"
+ description: "Some Pods of DaemonSet are not scheduled or not ready\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesDaemonsetMisscheduled
+ expr: kube_daemonset_status_number_misscheduled > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes DaemonSet misscheduled (instance {{ $labels.instance }})"
+ description: "Some DaemonSet Pods are running where they are not supposed to run\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesCronjobTooLong
+ expr: time() - kube_cronjob_next_schedule_time > 3600
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes CronJob too long (instance {{ $labels.instance }})"
+ description: "CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesJobCompletion
+ expr: kube_job_spec_completions - kube_job_status_succeeded > 0 or kube_job_status_failed > 0
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes job completion (instance {{ $labels.instance }})"
+ description: "Kubernetes Job failed to complete\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiServerErrors
+ expr: sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[2m])) / sum(rate(apiserver_request_count{job="apiserver"}[2m])) * 100 > 3
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes API server errors (instance {{ $labels.instance }})"
+ description: "Kubernetes API server is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiClientErrors
+ expr: (sum(rate(rest_client_requests_total{code=~"(4|5).."}[2m])) by (instance, job) / sum(rate(rest_client_requests_total[2m])) by (instance, job)) * 100 > 1
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes API client errors (instance {{ $labels.instance }})"
+ description: "Kubernetes API client is experiencing high error rate\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesClientCertificateExpiresNextWeek
+ expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes client certificate expires next week (instance {{ $labels.instance }})"
+ description: "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesClientCertificateExpiresSoon
+ expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 24*60*60
+ for: 5m
+ labels:
+ severity: critical
+ annotations:
+ summary: "Kubernetes client certificate expires soon (instance {{ $labels.instance }})"
+ description: "A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+ - alert: KubernetesApiServerLatency
+ expr: histogram_quantile(0.99, sum(apiserver_request_latencies_bucket{verb!~"CONNECT|WATCHLIST|WATCH|PROXY"}) WITHOUT (instance, resource)) / 1e+06 > 1
+ for: 5m
+ labels:
+ severity: warning
+ annotations:
+ summary: "Kubernetes API server latency (instance {{ $labels.instance }})"
+ description: "Kubernetes API server has a 99th percentile latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
+
+
+ prometheus.yml: |-
+ global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+ rule_files:
+ - "/etc/prometheus/alert.rules"
+
+ scrape_configs:
+ - job_name: 'collectd-exporter'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['collectd-exporter:9103']
+
+ - job_name: 'cadvisor'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['cadvisor:8080']
+
+ - job_name: 'node-exporter'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['node-exporter:9100']
+
+ - job_name: 'prometheus'
+ scrape_interval: 10s
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'kube-state-metrics'
+ scrape_interval: 10s
+ static_configs:
+ - targets: ['kube-state-metrics.kube-system.svc.cluster.local:8080']
+
+ alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets: ['alertmanager:9093', 'alertmanager1:9093']
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml
new file mode 100644
index 00000000..5b98b154
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-deployment.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus-deployment
+ namespace: monitoring
+ labels:
+ app: prometheus
+ adi10hero.monitoring: prometheus
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm2
+ containers:
+ - name: prometheus
+ image: prom/prometheus
+ args:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --storage.tsdb.retention.size=3GB
+ - --storage.tsdb.retention.time=30d
+ - --web.console.libraries=/etc/prometheus/console_libraries
+ - --web.console.templates=/etc/prometheus/consoles
+ ports:
+ - containerPort: 9090
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: prometheus-config-volume
+ mountPath: /etc/prometheus/
+ - name: prometheus-storage-volume
+ mountPath: /prometheus/
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config-volume
+ configMap:
+ defaultMode: 420
+ name: prometheus-config
+ - name: prometheus-storage-volume
+ persistentVolumeClaim:
+ claimName: prometheus-pvc
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml
new file mode 100644
index 00000000..f10cd073
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pv.yaml
@@ -0,0 +1,30 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-pv
+ namespace: monitoring
+ labels:
+ app: prometheus-pv
+ adi10hero.monitoring: prometheus-pv
+spec:
+ storageClassName: monitoring
+ capacity:
+ storage: 6Gi
+ accessModes:
+ - ReadWriteMany
+ hostPath:
+ path: "/usr/share/monitoring_data/prometheus"
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml
new file mode 100644
index 00000000..812fcc73
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-pvc.yaml
@@ -0,0 +1,33 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: prometheus-pvc
+ namespace: monitoring
+ labels:
+ app: prometheus-pvc
+ adi10hero.monitoring: prometheus-pvc
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: monitoring
+ resources:
+ requests:
+ storage: 3Gi
+ selector:
+ matchLabels:
+ app: prometheus-pv
+ adi10hero.monitoring: prometheus-pv
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml
new file mode 100644
index 00000000..5be76d3e
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus-service.yaml
@@ -0,0 +1,34 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus
+ app: prometheus
+ name: prometheus
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus
+ protocol: TCP
+ port: 9090
+ nodePort: 30900
+ selector:
+ adi10hero.monitoring: prometheus
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml
new file mode 100644
index 00000000..149bea84
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-deployment.yaml
@@ -0,0 +1,73 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus1-deployment
+ namespace: monitoring
+ labels:
+ app: prometheus1
+ adi10hero.monitoring: prometheus1
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - vm3
+ containers:
+ - name: prometheus
+ image: prom/prometheus
+ args:
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --storage.tsdb.path=/prometheus
+ - --storage.tsdb.retention.size=3GB
+ - --storage.tsdb.retention.time=30d
+ - --web.console.libraries=/etc/prometheus/console_libraries
+ - --web.console.templates=/etc/prometheus/consoles
+ ports:
+ - containerPort: 9090
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - name: prometheus-config-volume
+ mountPath: /etc/prometheus/
+ - name: prometheus-storage-volume
+ mountPath: /prometheus/
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config-volume
+ configMap:
+ defaultMode: 420
+ name: prometheus-config
+ - name: prometheus-storage-volume
+ persistentVolumeClaim:
+ claimName: prometheus-pvc
diff --git a/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml
new file mode 100644
index 00000000..439deec1
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/files/prometheus/prometheus1-service.yaml
@@ -0,0 +1,35 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
+ name: prometheus1
+ namespace: monitoring
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/port: '9090'
+spec:
+ type: NodePort
+ ports:
+ - name: prometheus1
+ protocol: TCP
+ port: 9090
+ nodePort: 30901
+ selector:
+ adi10hero.monitoring: prometheus1
+ app: prometheus1
diff --git a/tools/lma/ansible-server/roles/monitoring/tasks/main.yml b/tools/lma/ansible-server/roles/monitoring/tasks/main.yml
new file mode 100644
index 00000000..cd4e6aca
--- /dev/null
+++ b/tools/lma/ansible-server/roles/monitoring/tasks/main.yml
@@ -0,0 +1,273 @@
+# Copyright 2020 Aditya Srivastava.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+#PAG setup in k8s cluster
+
+#***********************************************************************************************************
+#copy all yaml to /tmp/files/
+#***********************************************************************************************************
+- name: copy all yaml to /tmp/files/
+ copy:
+ src: ../files/
+ dest: /tmp/files/
+
+#***********************************************************************************************************
+#Creating Namespace
+#***********************************************************************************************************
+- name: Creating Monitoring Namespace
+ k8s:
+ state: present
+ src: /tmp/files/monitoring-namespace.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume for Prometheus
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-pv.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume
+#***********************************************************************************************************
+- name: creating Persistent Volume for Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-pv.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume Claim
+#***********************************************************************************************************
+- name: creating Persistent Volume Claim for Prometheus
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-pvc.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#creating Persistent Volume Claim
+#***********************************************************************************************************
+- name: creating Persistent Volume Claim for Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-pvc.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the CAdvisor deamonset
+#***********************************************************************************************************
+- name: Creating cAdvisor deamonset
+ k8s:
+ state: present
+ src: /tmp/files/cadvisor/cadvisor-deamonset.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting the CAdvisor service
+#***********************************************************************************************************
+- name: Starting cAdvisor service
+ k8s:
+ state: present
+ src: /tmp/files/cadvisor/cadvisor-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Deploying and Starting the kube-system-metrics service
+#***********************************************************************************************************
+- name: Deploying kube-system-metrics
+ k8s:
+ state: present
+ src: /tmp/files/kube-state-metrics/kube-state-metrics-deployment.yaml
+ namespace: kube-system
+
+- name: Starting kube-system-metrics service
+ k8s:
+ state: present
+ src: /tmp/files/kube-state-metrics/kube-state-metrics-service.yaml
+ namespace: kube-system
+
+#***********************************************************************************************************
+#Making the NodeExporter deamonset
+#***********************************************************************************************************
+- name: Creating NodeExporter deamonset
+ k8s:
+ state: present
+ src: /tmp/files/node-exporter/nodeexporter-daemonset.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting the NodeExporter service
+#***********************************************************************************************************
+- name: Starting NodeExporter service
+ k8s:
+ state: present
+ src: /tmp/files/node-exporter/nodeexporter-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the collectd-exporter deployment
+#***********************************************************************************************************
+- name: Creating collectd-exporter deamonset
+ k8s:
+ state: present
+ src: /tmp/files/collectd-exporter/collectd-exporter-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the collectd-exporter service
+#***********************************************************************************************************
+- name: Creating collectd-exporter service
+ k8s:
+ state: present
+ src: /tmp/files/collectd-exporter/collectd-exporter-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Webhook goes here
+#***********************************************************************************************************
+
+#***********************************************************************************************************
+#Making the config file for Alertmanagers
+#***********************************************************************************************************
+- name: Creating config map for Alertmanagers
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-config.yaml
+ namespace: monitoring
+
+# - name: Creating config map for Alertmanagers
+# k8s:
+# state: present
+# src: /tmp/files/alertmanager1-config.yaml
+# namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 1st alertmanager deployment
+#***********************************************************************************************************
+- name: Creating 1st alertmanager deployment
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 1st alertmanager service
+#***********************************************************************************************************
+- name: Creating 1st alertmanager service
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 2nd alertmanager deployment
+#***********************************************************************************************************
+- name: Creating 2nd alertmanager deployment
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager1-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the 2nd alertmanager service
+#***********************************************************************************************************
+- name: Creating 2nd alertmanager service
+ k8s:
+ state: present
+ src: /tmp/files/alertmanager/alertmanager1-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Making the config file for Prometheus
+#***********************************************************************************************************
+- name: Creating 1st Prometheus Config
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-config.yaml
+ namespace: monitoring
+
+# - name: Creating 2nd Prometheus Config
+# k8s:
+# state: present
+# src: /tmp/files/prometheus1-config.yaml
+# namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Prometheus
+#***********************************************************************************************************
+- name: Starting Prometheus 1
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-deployment.yaml
+ namespace: monitoring
+
+- name: Starting Prometheus 2
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus1-deployment.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Prometheus Service
+#***********************************************************************************************************
+- name: Starting Prometheus 1 Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus-service.yaml
+ namespace: monitoring
+
+- name: Starting Prometheus 2 Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/prometheus1-service.yaml
+ namespace: monitoring
+
+- name: Starting Main Prometheus Service
+ k8s:
+ state: present
+ src: /tmp/files/prometheus/main-prometheus-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#Starting Grafana
+#***********************************************************************************************************
+- name: Creating Grafana Datasource Config
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-datasource-config.yaml
+ namespace: monitoring
+
+- name: Starting Grafana
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-deployment.yaml
+ namespace: monitoring
+
+- name: Starting Grafana Service
+ k8s:
+ state: present
+ src: /tmp/files/grafana/grafana-service.yaml
+ namespace: monitoring
+
+#***********************************************************************************************************
+#removing /tmp/files
+#***********************************************************************************************************
+- name: Removing /tmp/files
+ file:
+ path: "/tmp/files"
+ state: absent
diff --git a/tools/lma/ansible-server/roles/nfs/tasks/main.yml b/tools/lma/ansible-server/roles/nfs/tasks/main.yml
new file mode 100644
index 00000000..2380ea74
--- /dev/null
+++ b/tools/lma/ansible-server/roles/nfs/tasks/main.yml
@@ -0,0 +1,42 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+#create Dir /srv/nfs
+- name: Create Directory for elasticserch
+ file: path="/srv/nfs/{{item}}" state=directory
+ with_items:
+ - ['data', 'master']
+
+- name: Create Directory for grafana
+ file: path="/usr/share/monitoring_data/grafana" state=directory
+
+#installing NFS
+- name: Installing NFS server utils
+ yum:
+ name: nfs-utils
+ state: present
+
+#update /etc/export file
+- name: Edit /etc/export file for NFS
+ lineinfile: path=/etc/exports line="{{item.line}}"
+ with_items:
+ - {line: "/srv/nfs/master *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/srv/nfs/data *(rw,sync,no_root_squash,no_subtree_check)"}
+ - {line: "/usr/share/monitoring_data/grafana *(rw,sync,no_root_squash,no_subtree_check)"}
+
+#starting NFS service
+- name: 'starting NFS service'
+ service:
+ name: nfs
+ state: restarted
diff --git a/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb b/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb
new file mode 100644
index 00000000..d2e7886a
--- /dev/null
+++ b/tools/lma/jupyter-notebooks/Causation-Analysis.ipynb
@@ -0,0 +1,784 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Logs and Metrics Analysis Notebook\n",
+ "\n",
+ "#### Used to capture anomalies in the logs and analyse / visualize the metrics in the vicinity of that time\n",
+ "\n",
+ "##### Contributors:\n",
+ "\n",
+ "- Adarsh Yadav <adiyadav0509@gmail.com> \n",
+ " \n",
+ " Log Analysis and Anomaly Finding\n",
+ " \n",
+ "\n",
+ "\n",
+ "\n",
+ "- Aditya Srivastava <adityasrivastava301199@gmail.com>\n",
+ " \n",
+ " Metrics Analysis and Visualization"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Metrics Analysis and Visualization"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "\n",
+ "import datetime\n",
+ "import time\n",
+ "import requests\n",
+ "\n",
+ "from pprint import pprint\n",
+ "import json\n",
+ "from datetime import datetime, timedelta\n",
+ "\n",
+ "from elasticsearch import Elasticsearch\n",
+ "from elasticsearch_dsl import Search\n",
+ "from elasticsearch.connection import create_ssl_context\n",
+ "import ssl\n",
+ "import urllib3"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "PROMETHEUS = 'http://10.10.120.211:30902/' #do not change, unless sure"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#function to make DF out of query json\n",
+ "\n",
+ "def convert_to_df(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # making colums\n",
+ " headers = data_list[0]\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " for metric in metrics.keys():\n",
+ " res_df[metric] = np.nan\n",
+ " res_df['value'] = 0\n",
+ " \n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " metrics['value'] = data['value'][-1]\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n",
+ "\n",
+ "def convert_to_df_range(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " values = np.array(data['values'])\n",
+ " for time, value in values:\n",
+ " metrics['timestamp'] = time\n",
+ " metrics['value'] = value\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# functions to query\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " return time.mktime(datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple())\n",
+ "\n",
+ "def query_current(params={}):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query', \n",
+ " params=params)\n",
+ " return json.loads(res.text)\n",
+ "\n",
+ "\n",
+ "def query_range(start, end, params={}, steps = '30s'):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ " params[\"start\"] = convert_to_timestamp(start)\n",
+ " params[\"end\"] = convert_to_timestamp(end)\n",
+ " params[\"step\"] = steps\n",
+ "\n",
+ " # print(params)\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query_range', \n",
+ " params=params,\n",
+ " )\n",
+ "\n",
+ " return json.loads(res.text)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '100':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " unused_cores.append(int(key))\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '0':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " fully_used_cores.append(int(key))\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def plot_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ " \n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " used_cores = []\n",
+ "\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " user_row = curr_df.loc[curr_df['type'] == 'user']\n",
+ " sys_row = curr_df.loc[curr_df['type'] == 'system']\n",
+ "\n",
+ "\n",
+ " if np.any(sys_row != '0') or np.any(user_row != '0'):\n",
+ " used_cores.append(key)\n",
+ " type_grps = curr_df.groupby('type')\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " for type_key, new_item in type_grps:\n",
+ "\n",
+ " if type_key == 'system':\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(type_key)\n",
+ " ax1.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'user':\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(type_key)\n",
+ " ax2.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'wait':\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(type_key)\n",
+ " ax3.plot(new_item['timestamp'], new_item['value'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_dropped(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_0_total{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " interface_dropped_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interface_dropped_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_dropped_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_dropped_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_dropped_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " dropped_interfaces= []\n",
+ " drop_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " plot_iter = 111\n",
+ " for group in groups:\n",
+ " dropped = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " dropped_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " dropped.append([key, dropped_row['timestamp'].iloc[0]])\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[drop_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(plot_iter)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ " dropped_interfaces.append(dropped)\n",
+ " plt.suptitle('Interfaces Drop type {}'.format(drop_type), fontsize=14)\n",
+ " plt.show()\n",
+ " drop_type += 1\n",
+ " return dropped_interfaces\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_errors(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_0_total{exported_instance='\" + node + \"'}\"}\n",
+ " interfaces_errors_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interfaces_errors_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_errors_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_errors_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_errors_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " err_interfaces= []\n",
+ " err_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " for group in groups:\n",
+ " errors = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " err_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " erros.append([key, err_row['timestamp'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[err_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(111)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ "\n",
+ " err_interfaces.append(errors)\n",
+ " plt.suptitle('Interfaces Error type {}'.format(err_type), fontsize=14)\n",
+ " plt.show()\n",
+ " err_type += 1\n",
+ "\n",
+ " return err_interfaces"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_intel_rdt_bytes{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_bytes = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_bytes)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_ipc{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_ipc = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_ipc)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}, IPC value\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ "\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_memory_bandwidth_total{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_mem_bw = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_mem_bw)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " \n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_memory{exported_instance='\" + node + \"'} / (1024*1024*1024) \"} \n",
+ " target_memory_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_memory_usage_range)\n",
+ " \n",
+ " df = df.drop(['instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['memory'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Memory Type: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Testing Zone"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "get_memory_usage('2020-08-03 08:00:12', '2020-08-03 08:01:12', 'pod12-node4')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def analyse(timestamp, node):\n",
+ " ts = datetime.strptime(timestamp.split(',')[0], \"%Y-%m-%d %H:%M:%S\")\n",
+ " start = ts - timedelta(seconds=10)\n",
+ " end = ts + timedelta(seconds=10)\n",
+ " \n",
+ " start = str(start)\n",
+ " end = str(end)\n",
+ " steps = '5s'\n",
+ "\n",
+ " print(\"Starting Analysis from\",start,\"to\",end,'\\n\\n')\n",
+ "\n",
+ " if \"node4\" in node:\n",
+ " node = 'pod12-node4'\n",
+ "\n",
+ " #cpu analysis\n",
+ " print(\"=====CPU ANALYSIS=====\\n\")\n",
+ " unused = unused_cores(start, end, node, steps)\n",
+ " print(\"Unused Cores:\", unused)\n",
+ " fully_used = fully_used_cores(start, end, node, steps)\n",
+ " print(\"Fully Used Cores:\", fully_used)\n",
+ " print(\"Plotting used cores:\")\n",
+ " used_cores = plot_used_cores(start, end, node, steps)\n",
+ " \n",
+ " #interface analysis\n",
+ " print(\"=====Interfaces Dropped / Errors=====\\n\")\n",
+ " dropped_interfaces = interface_dropped(start, end, node, steps)\n",
+ " err_interfaces = interface_errors(start, end, node, steps)\n",
+ " \n",
+ " #RDT Analysis\n",
+ " print(\"=====RDT Analysis=====\\n\")\n",
+ " plot_rdt_bytes(start, end, node, steps)\n",
+ " plot_rdt_ipc(start, end, node, steps)\n",
+ " mem_bandwidht = get_rdt_memory_bandwidth(start, end, node, steps)\n",
+ " \n",
+ " #Memory Analysis:\n",
+ " print(\"=====Memory Analysis=====\\n\")\n",
+ " mem = get_memory_usage(start, end, node, steps)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "fully_used = fully_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching\n",
+ "plot_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "#csv\n",
+ "# use Analysis-Monitoring-Local Notebook for correct analysis \n",
+ "plot_used_cores(csv='metrics_data/cpu-0/cpu-user-2020-06-02')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "dropped_interfaces = interface_dropped('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "interface_errors('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### RDT\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "# fetch\n",
+ "plot_rdt_bytes('2020-07-31 08:00:12', '2020-07-31 08:01:12','pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "plot_rdt_ipc('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "get_rdt_memory_bandwidth('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### Memory\n",
+ "\n",
+ "- Memory usage\n",
+ "\n",
+ "```py\n",
+ "get_memory_usage('2020-08-03 08:00:12', '2020-08-03 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### Analyse everything\n",
+ "\n",
+ "```py\n",
+ "# example alert_time: 2020-08-03 08:00:12\n",
+ "# example index: 'pod12-node4'\n",
+ "analyse(alert_time,index)\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Checking Anomaly in logs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Give file name\n",
+ "foldername = \"results_2020-08-07_03-39-57\"\n",
+ "#Give index name - \"node1*\" or \"node4*\"\n",
+ "index = \"node4*\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "ssl_context = create_ssl_context()\n",
+ "ssl_context.check_hostname = False\n",
+ "ssl_context.verify_mode = ssl.CERT_NONE\n",
+ "urllib3.disable_warnings()\n",
+ "client = Elasticsearch(['https://elasticsearch:password123@10.10.120.211:31111'],verify_certs=False,ssl_context=ssl_context)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "vsperf = \"vsperf-overall_\"+ foldername[8:] +\".log\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"alert\").query(\"match_phrase\", log_path=vsperf)\n",
+ "for hits in s.scan():\n",
+ " alert_time = hits.alert_time\n",
+ "\n",
+ "print(alert_time)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "analyse(alert_time,index)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/logs/dockerfile/elastalert/Dockerfile b/tools/lma/logs/dockerfile/elastalert/Dockerfile
new file mode 100644
index 00000000..3304ad17
--- /dev/null
+++ b/tools/lma/logs/dockerfile/elastalert/Dockerfile
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM python:alpine
+RUN apk --update upgrade && \
+ apk add gcc libffi-dev musl-dev python3-dev openssl-dev tzdata libmagic && \
+ rm -rf /var/cache/apk/*
+RUN pip install elastalert &&\
+ apk del gcc libffi-dev musl-dev python3-dev openssl-dev
+RUN mkdir -p /opt/elastalert && \
+ mkdir -p /opt/elastalert/rules &&\
+WORKDIR /opt/elastalert \ No newline at end of file
diff --git a/tools/lma/logs/dockerfile/fluentd/Dockerfile b/tools/lma/logs/dockerfile/fluentd/Dockerfile
new file mode 100644
index 00000000..19dea0f8
--- /dev/null
+++ b/tools/lma/logs/dockerfile/fluentd/Dockerfile
@@ -0,0 +1,23 @@
+# Copyright 2020 Adarsh yadav, Aditya Srivastava
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM fluent/fluentd:v1.11.0-debian-1.0
+USER root
+RUN gem sources --add https://rubygems.org/
+RUN apt-get update \
+ && gem install fluent-plugin-elasticsearch \
+ && gem install elasticsearch-xpack\
+ && gem install fluent-plugin-rewrite-tag-filter\
+ && gem install fluent-plugin-dio
+USER fluent \ No newline at end of file
diff --git a/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb b/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb
new file mode 100644
index 00000000..1bc770a1
--- /dev/null
+++ b/tools/lma/logs/jupyter-notebooks/Trend-Analysis.ipynb
@@ -0,0 +1,308 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Trend Analysis\n",
+ "##### Contributor:\n",
+ "\n",
+ "- Adarsh Yadav <adiyadav0509@gmail.com> "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import seaborn as sns\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "import io \n",
+ "\n",
+ "from elasticsearch import Elasticsearch\n",
+ "from elasticsearch_dsl import Search\n",
+ "from elasticsearch.connection import create_ssl_context\n",
+ "import csv\n",
+ "import ssl\n",
+ "import urllib3\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Enter foldername and index"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#Give folder name\n",
+ "# foldername = \"results_2020-06-12_06-47-56\"\n",
+ "foldername = \"result-test1\"\n",
+ "#Give index name - \"node1*\" or \"node4*\"\n",
+ "index = \"node4*\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ssl_context = create_ssl_context()\n",
+ "ssl_context.check_hostname = False\n",
+ "ssl_context.verify_mode = ssl.CERT_NONE\n",
+ "urllib3.disable_warnings()\n",
+ "client = Elasticsearch(['https://elasticsearch:password123@10.10.120.211:31111'],verify_certs=False,ssl_context=ssl_context)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Trex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/trex-liveresults-counts.dat\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"ts\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "trex = pd.DataFrame()\n",
+ "trex_data = dict()\n",
+ "for hits in s.scan():\n",
+ " trex_data['ts'] = hits.ts\n",
+ " trex_data['rx_pkts'] = hits.rx_pkts\n",
+ " trex_data['rx_port'] = hits.rx_port\n",
+ " trex_data['tx_port'] = hits.tx_port\n",
+ " trex = trex.append(trex_data, ignore_index=True)\n",
+ "if not trex.empty:\n",
+ " #convert 'ts' to datetime\n",
+ " trex['ts'] = pd.to_datetime(trex['ts'],unit='s')\n",
+ " trex_grp = trex.groupby('rx_port')\n",
+ " trex_rx_0 = trex_grp.get_group(0.0) \n",
+ " trex_rx_1 = trex_grp.get_group(1.0) \n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not trex.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(trex_rx_0['ts'],\n",
+ " trex_rx_0['rx_pkts'],\n",
+ " 'tab:orange')\n",
+ " ax[0].title.set_text(\"At rx_port=0 & tx_port=1\")\n",
+ " ax[0].set(xlabel=\"timestamp\")\n",
+ " ax[0].set(ylabel=\"rx_pkts\")\n",
+ "\n",
+ " ax[1].plot(trex_rx_1['ts'],\n",
+ " trex_rx_1['rx_pkts'],\n",
+ " 'tab:green')\n",
+ " ax[1].title.set_text(\"At rx_port=1 & tx_port=0\")\n",
+ " ax[1].set(xlabel=\"timestamp\")\n",
+ " ax[1].set(ylabel=\"rx_pkts\")\n",
+ "\n",
+ " #change date format\n",
+ " myFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')\n",
+ " for i in range(2):\n",
+ " ax[i].xaxis.set_major_formatter(myFmt) \n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Spirent"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/stc-liveresults.dat.rx\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"ts\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "spirent = pd.DataFrame()\n",
+ "spirent_data = dict()\n",
+ "for hits in s.scan():\n",
+ " spirent_data['ts'] = hits.ts\n",
+ " spirent_data['RxPrt'] = hits.RxPrt\n",
+ " spirent_data['FrCnt'] = hits.FrCnt\n",
+ " spirent = spirent.append(spirent_data, ignore_index=True)\n",
+ "if not spirent.empty:\n",
+ " #convert 'ts' to datetime\n",
+ " spirent['ts'] = pd.to_datetime(spirent['ts'],unit='s')\n",
+ " spirent_grp = spirent.groupby('RxPrt')\n",
+ " spirent_rx_1 = spirent_grp.get_group('Port //1/1') \n",
+ " spirent_rx_2 = spirent_grp.get_group('Port //1/2') "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not spirent.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(spirent_rx_1['ts'],\n",
+ " spirent_rx_1['FrCnt'],\n",
+ " 'tab:orange')\n",
+ " ax[0].title.set_text(\"At RxPrt=//1/1\")\n",
+ " ax[0].set(xlabel=\"timestamp\")\n",
+ " ax[0].set(ylabel=\"FrCnt\")\n",
+ "\n",
+ " ax[1].plot(spirent_rx_2['ts'],\n",
+ " spirent_rx_2['FrCnt'],\n",
+ " 'tab:green')\n",
+ " ax[1].title.set_text(\"At RxPrt=//1/2\")\n",
+ " ax[1].set(xlabel=\"timestamp\")\n",
+ " ax[1].set(ylabel=\"FrCnt\")\n",
+ "\n",
+ " #change date format\n",
+ " myFmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')\n",
+ " for i in range(2):\n",
+ " ax[i].xaxis.set_major_formatter(myFmt) \n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Ixia"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/Traffic Item Statistics.csv\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"msg\").query(\"match_phrase\", log_path=filename)\n",
+ "\n",
+ "for hits in s.scan():\n",
+ " with open('./ixia-traffic.csv', 'a+') as f:\n",
+ " f.write(hits.msg+\"\\n\")\n",
+ " \n",
+ "ixia = pd.DataFrame()\n",
+ "if os.path.exists('./ixia-traffic.csv'):\n",
+ " ixia = pd.read_csv('./ixia-traffic.csv')\n",
+ " os.remove(f.name)\n",
+ " f.close()\n",
+ "if not ixia.empty:\n",
+ " ixia = ixia[['~ElapsedTime','Traffic Item 1:Frames Delta','Traffic Item 1:Loss %']].astype(float)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if not ixia.empty:\n",
+ " fig, ax = plt.subplots(2,figsize=(16, 10))\n",
+ " ax[0].plot(ixia['~ElapsedTime'],\n",
+ " ixia['Traffic Item 1:Frames Delta'],\n",
+ " 'tab:orange')\n",
+ " ax[0].set(xlabel=\"Elapsed Time\")\n",
+ " ax[0].set(ylabel=\"Frames Delta\")\n",
+ "\n",
+ " ax[1].plot(ixia['~ElapsedTime'],\n",
+ " ixia['Traffic Item 1:Loss %'],\n",
+ " 'tab:green')\n",
+ " ax[1].set(xlabel=\"Elapsed Time\")\n",
+ " ax[1].set(ylabel=\"Loss %\")\n",
+ "\n",
+ " plt.show()\n",
+ "else:\n",
+ " print(\"No data Found\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Time Analysis"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = \"/tmp/\"+foldername+\"/\"\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"setup_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Setup duration: \", hits.setup_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"iteration_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Iteration duration: \", hits.iteration_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"traffic_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Traffic duration: \", hits.traffic_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"test_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Test duration: \", hits.test_duration,\"s\")\n",
+ "\n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"report_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Report duration: \", hits.report_duration,\"s\")\n",
+ " \n",
+ "s = Search(index=index).using(client).query(\"exists\", field=\"vswitch_duration\").query(\"match_phrase\", log_path=filename)\n",
+ "for hits in s.scan():\n",
+ " print(\"Vswitch starting duration: \", hits.vswitch_duration,\"s\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/metrics/dashboard/cpu_usage_using.json b/tools/lma/metrics/dashboard/cpu_usage_using.json
new file mode 100644
index 00000000..85f7f122
--- /dev/null
+++ b/tools/lma/metrics/dashboard/cpu_usage_using.json
@@ -0,0 +1,750 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 4,
+ "iteration": 1596637894836,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{exported_instance='$host'}",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{cpu='$core', exported_instance='$host'}",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU utilization per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 14
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "collectd_cpu_percent{cpu='$core',exported_instance='$host'}",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "10s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [
+ {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": false,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "CPU Usage",
+ "uid": "XeDwSiSGk",
+ "version": 13
+} \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/memory_using.json b/tools/lma/metrics/dashboard/memory_using.json
new file mode 100644
index 00000000..3b92d8f5
--- /dev/null
+++ b/tools/lma/metrics/dashboard/memory_using.json
@@ -0,0 +1,337 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 6,
+ "iteration": 1597616052316,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 15,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_memory{exported_instance='$host', memory='$type'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "used",
+ "value": "used"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "type",
+ "options": [
+ {
+ "selected": false,
+ "text": "buffered",
+ "value": "buffered"
+ },
+ {
+ "selected": false,
+ "text": "cached",
+ "value": "cached"
+ },
+ {
+ "selected": false,
+ "text": "free",
+ "value": "free"
+ },
+ {
+ "selected": false,
+ "text": "slab_recl",
+ "value": "slab_recl"
+ },
+ {
+ "selected": false,
+ "text": "slab_unrecl",
+ "value": "slab_unrecl"
+ },
+ {
+ "selected": true,
+ "text": "used",
+ "value": "used"
+ }
+ ],
+ "query": "buffered,cached,free,slab_recl,slab_unrecl,used",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Memory",
+ "uid": "kuro-mem",
+ "version": 4
+} \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/ovs_stats_using.json b/tools/lma/metrics/dashboard/ovs_stats_using.json
new file mode 100644
index 00000000..1e679fbe
--- /dev/null
+++ b/tools/lma/metrics/dashboard/ovs_stats_using.json
@@ -0,0 +1,854 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 6,
+ "iteration": 1596643135141,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_rx_octets_total{exported_instance='$host'}[$__interval])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average RX values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_tx_octets_total{exported_instance='$host'}[$__interval])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average TX values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_ovs_stats_if_collisions_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_dropped_0_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_dropped_1_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "C"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_errors_0_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "D"
+ },
+ {
+ "expr": "rate(collectd_ovs_stats_if_errors_1_total{exported_instance='$host'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "E"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average Collisions, Drops and Error values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "core",
+ "options": [
+ {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ {
+ "selected": true,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": false,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "OVS Stats",
+ "uid": "K1N5ciIGz",
+ "version": 7
+ } \ No newline at end of file
diff --git a/tools/lma/metrics/dashboard/rdt_using.json b/tools/lma/metrics/dashboard/rdt_using.json
new file mode 100644
index 00000000..a0ce7987
--- /dev/null
+++ b/tools/lma/metrics/dashboard/rdt_using.json
@@ -0,0 +1,833 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "prometheus",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "limit": 100,
+ "name": "Monitoring",
+ "showIn": 0,
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 7,
+ "iteration": 1597615840124,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_bytes{exported_instance='$host', intel_rdt='$intel_rdt'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RDT Bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_ipc{exported_instance='$host', intel_rdt='$intel_rdt'}[$range])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "IPC values",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "interval": "1s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(collectd_intel_rdt_memory_bandwidth_total{exported_instance='$host', type='local'}[$range])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(collectd_intel_rdt_memory_bandwidth_total{exported_instance='$host', type='remote'}[$range])",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory Bandwidth Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "monitoring"
+ ],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "prometheus",
+ "value": "prometheus"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "host",
+ "options": [
+ {
+ "selected": true,
+ "text": "pod12-node4",
+ "value": "pod12-node4"
+ }
+ ],
+ "query": "pod12-node4,",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ },
+ {
+ "auto": false,
+ "auto_count": 30,
+ "auto_min": "10s",
+ "current": {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ "hide": 0,
+ "label": null,
+ "name": "range",
+ "options": [
+ {
+ "selected": true,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "5m",
+ "value": "5m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ }
+ ],
+ "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
+ "queryValue": "",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "text": "2",
+ "value": "2"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "intel_rdt",
+ "options": [
+ {
+ "selected": false,
+ "text": "0",
+ "value": "0"
+ },
+ {
+ "selected": false,
+ "text": "1",
+ "value": "1"
+ },
+ {
+ "selected": true,
+ "text": "2",
+ "value": "2"
+ },
+ {
+ "selected": false,
+ "text": "3",
+ "value": "3"
+ },
+ {
+ "selected": false,
+ "text": "4",
+ "value": "4"
+ },
+ {
+ "selected": false,
+ "text": "5",
+ "value": "5"
+ },
+ {
+ "selected": false,
+ "text": "6",
+ "value": "6"
+ },
+ {
+ "selected": false,
+ "text": "7",
+ "value": "7"
+ },
+ {
+ "selected": false,
+ "text": "8",
+ "value": "8"
+ },
+ {
+ "selected": false,
+ "text": "9",
+ "value": "9"
+ },
+ {
+ "selected": false,
+ "text": "10",
+ "value": "10"
+ },
+ {
+ "selected": false,
+ "text": "11",
+ "value": "11"
+ },
+ {
+ "selected": false,
+ "text": "12",
+ "value": "12"
+ },
+ {
+ "selected": false,
+ "text": "13",
+ "value": "13"
+ },
+ {
+ "selected": false,
+ "text": "14",
+ "value": "14"
+ },
+ {
+ "selected": false,
+ "text": "15",
+ "value": "15"
+ },
+ {
+ "selected": false,
+ "text": "16",
+ "value": "16"
+ },
+ {
+ "selected": false,
+ "text": "17",
+ "value": "17"
+ },
+ {
+ "selected": false,
+ "text": "18",
+ "value": "18"
+ },
+ {
+ "selected": false,
+ "text": "19",
+ "value": "19"
+ },
+ {
+ "selected": false,
+ "text": "20",
+ "value": "20"
+ },
+ {
+ "selected": false,
+ "text": "21",
+ "value": "21"
+ },
+ {
+ "selected": false,
+ "text": "22",
+ "value": "22"
+ },
+ {
+ "selected": false,
+ "text": "23",
+ "value": "23"
+ },
+ {
+ "selected": false,
+ "text": "24",
+ "value": "24"
+ },
+ {
+ "selected": false,
+ "text": "25",
+ "value": "25"
+ },
+ {
+ "selected": false,
+ "text": "26",
+ "value": "26"
+ },
+ {
+ "selected": false,
+ "text": "27",
+ "value": "27"
+ },
+ {
+ "selected": false,
+ "text": "28",
+ "value": "28"
+ },
+ {
+ "selected": false,
+ "text": "29",
+ "value": "29"
+ },
+ {
+ "selected": false,
+ "text": "30",
+ "value": "30"
+ },
+ {
+ "selected": false,
+ "text": "31",
+ "value": "31"
+ },
+ {
+ "selected": false,
+ "text": "32",
+ "value": "32"
+ },
+ {
+ "selected": false,
+ "text": "33",
+ "value": "33"
+ },
+ {
+ "selected": false,
+ "text": "34",
+ "value": "34"
+ },
+ {
+ "selected": false,
+ "text": "35",
+ "value": "35"
+ },
+ {
+ "selected": false,
+ "text": "36",
+ "value": "36"
+ },
+ {
+ "selected": false,
+ "text": "37",
+ "value": "37"
+ },
+ {
+ "selected": false,
+ "text": "38",
+ "value": "38"
+ },
+ {
+ "selected": false,
+ "text": "39",
+ "value": "39"
+ },
+ {
+ "selected": false,
+ "text": "40",
+ "value": "40"
+ },
+ {
+ "selected": false,
+ "text": "41",
+ "value": "41"
+ },
+ {
+ "selected": false,
+ "text": "42",
+ "value": "42"
+ },
+ {
+ "selected": false,
+ "text": "43",
+ "value": "43"
+ },
+ {
+ "selected": false,
+ "text": "44",
+ "value": "44"
+ },
+ {
+ "selected": false,
+ "text": "45",
+ "value": "45"
+ },
+ {
+ "selected": false,
+ "text": "46",
+ "value": "46"
+ },
+ {
+ "selected": false,
+ "text": "47",
+ "value": "47"
+ },
+ {
+ "selected": false,
+ "text": "48",
+ "value": "48"
+ },
+ {
+ "selected": false,
+ "text": "49",
+ "value": "49"
+ },
+ {
+ "selected": false,
+ "text": "50",
+ "value": "50"
+ },
+ {
+ "selected": false,
+ "text": "51",
+ "value": "51"
+ },
+ {
+ "selected": false,
+ "text": "52",
+ "value": "52"
+ },
+ {
+ "selected": false,
+ "text": "53",
+ "value": "53"
+ },
+ {
+ "selected": false,
+ "text": "54",
+ "value": "54"
+ },
+ {
+ "selected": false,
+ "text": "55",
+ "value": "55"
+ },
+ {
+ "selected": false,
+ "text": "56",
+ "value": "56"
+ },
+ {
+ "selected": false,
+ "text": "57",
+ "value": "57"
+ },
+ {
+ "selected": false,
+ "text": "58",
+ "value": "58"
+ },
+ {
+ "selected": false,
+ "text": "59",
+ "value": "59"
+ },
+ {
+ "selected": false,
+ "text": "60",
+ "value": "60"
+ },
+ {
+ "selected": false,
+ "text": "61",
+ "value": "61"
+ },
+ {
+ "selected": false,
+ "text": "62",
+ "value": "62"
+ },
+ {
+ "selected": false,
+ "text": "63",
+ "value": "63"
+ },
+ {
+ "selected": false,
+ "text": "64",
+ "value": "64"
+ }
+ ],
+ "query": "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64",
+ "queryValue": "",
+ "skipUrlSync": false,
+ "type": "custom"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "RDT (L3 Cache)",
+ "uid": "kuro-rdt",
+ "version": 9
+} \ No newline at end of file
diff --git a/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb
new file mode 100644
index 00000000..10c59d84
--- /dev/null
+++ b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-K8S.ipynb
@@ -0,0 +1,644 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Metrics Analysis Notebook (k8s)\n",
+ "\n",
+ "#### Used to analyse / visualize the metrics, data fetched from prometheus (monitoring cluster)\n",
+ "\n",
+ "### Contributor: Aditya Srivastava <adityasrivastava301199@gmail.com>\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "\n",
+ "import datetime\n",
+ "import time\n",
+ "import requests\n",
+ "\n",
+ "from pprint import pprint\n",
+ "import json\n",
+ "from datetime import datetime"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "PROMETHEUS = 'http://10.10.120.211:30902/' #do not change, unless sure"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#function to make DF out of query json\n",
+ "\n",
+ "def convert_to_df(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # making colums\n",
+ " headers = data_list[0]\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " for metric in metrics.keys():\n",
+ " res_df[metric] = np.nan\n",
+ " res_df['value'] = 0\n",
+ " \n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " metrics['value'] = data['value'][-1]\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n",
+ "\n",
+ "def convert_to_df_range(res_json):\n",
+ "\n",
+ " data_list = res_json['data']['result']\n",
+ " res_df = pd.DataFrame()\n",
+ " if not data_list:\n",
+ " return res_df\n",
+ "\n",
+ " # filling the df\n",
+ " for data in data_list:\n",
+ " metrics = data['metric']\n",
+ " values = np.array(data['values'])\n",
+ " for time, value in values:\n",
+ " metrics['timestamp'] = time\n",
+ " metrics['value'] = value\n",
+ " res_df = res_df.append(metrics, ignore_index=True) \n",
+ "\n",
+ " return res_df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# functions to query\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " return time.mktime(datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\").timetuple())\n",
+ "\n",
+ "def query_current(params={}):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ "\n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query', \n",
+ " params=params)\n",
+ " return json.loads(res.text)\n",
+ "\n",
+ "\n",
+ "def query_range(start, end, params={}, steps = '30s'):\n",
+ " # input: params\n",
+ " # type: dict\n",
+ " # Example: {'query': 'container_cpu_user_seconds_total'}\n",
+ " \n",
+ " # Output: dict, loaded json response of the query\n",
+ " params[\"start\"] = convert_to_timestamp(start)\n",
+ " params[\"end\"] = convert_to_timestamp(end)\n",
+ " params[\"step\"] = steps\n",
+ "\n",
+ " print(params)\n",
+ " \n",
+ " res = requests.get(PROMETHEUS + '/api/v1/query_range', \n",
+ " params=params,\n",
+ " )\n",
+ "\n",
+ " return json.loads(res.text)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '100':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " unused_cores.append(int(key))\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ " if idle_row['value'].iloc[0] == '0':\n",
+ " if verbose: print(\"Core: \",key)\n",
+ " fully_used_cores.append(int(key))\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def plot_used_cores(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " \n",
+ " # \n",
+ " df['rate'] = df['value'].diff()\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text('CPU usage')\n",
+ " ax1.plot(df['epoch'], df['rate'])\n",
+ " return df\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_cpu_percent{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " target_cpu_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_cpu_usage_range)\n",
+ " \n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " used_cores = []\n",
+ "\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_row = curr_df.loc[curr_df['type'] == 'idle']\n",
+ "\n",
+ " if idle_row['value'].iloc[0] != '100':\n",
+ " used_cores.append(key)\n",
+ " type_grps = curr_df.groupby('type')\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " for type_key, new_item in type_grps:\n",
+ "\n",
+ " if type_key == 'system':\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(type_key)\n",
+ " ax1.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'user':\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(type_key)\n",
+ " ax2.plot(new_item['timestamp'], new_item['value'])\n",
+ " elif type_key == 'wait':\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(type_key)\n",
+ " ax3.plot(new_item['timestamp'], new_item['value'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_dropped(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_0_total{exported_instance='\" + node + \"'}\"}\n",
+ "\n",
+ " interface_dropped_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interface_dropped_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_dropped_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_dropped_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_dropped_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_dropped_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " dropped_interfaces= []\n",
+ " drop_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " plot_iter = 111\n",
+ " for group in groups:\n",
+ " dropped = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " dropped_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " dropped.append([key, dropped_row['timestamp'].iloc[0]])\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[drop_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(plot_iter)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ " dropped_interfaces.append(dropped)\n",
+ " plt.suptitle('Interfaces Drop type {}'.format(drop_type), fontsize=14)\n",
+ " plt.show()\n",
+ " drop_type += 1\n",
+ " return dropped_interfaces\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "#TODO: Change this to separate functions later\n",
+ "def interface_errors(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " df_0 = df #TODO: Change this\n",
+ " df_1 = df #TODO: Change this\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_0_total{exported_instance='\" + node + \"'}\"}\n",
+ " interfaces_errors_0 = query_range(start, end, params, steps)\n",
+ " df_0 = convert_to_df_range(interfaces_errors_0)\n",
+ " \n",
+ " params = {'query' : \"collectd_interface_if_errors_1_total{exported_instance='\" + node + \"'}\"}\n",
+ " interface_errors_1 = query_range(start, end, params, steps)\n",
+ " df_1 = convert_to_df_range(interface_errors_1)\n",
+ "\n",
+ " \n",
+ " #df_0 : interfaces_errors_0_df\n",
+ " df_0 = df_0.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " #df_1 : interfaces_dropped_1_df\n",
+ " df_1 = df_1.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ "\n",
+ " groups_0 = df_0.groupby(['interface'])\n",
+ " groups_1 = df_1.groupby(['interface'])\n",
+ "\n",
+ " groups = [groups_0, groups_1]\n",
+ " err_interfaces= []\n",
+ " err_type = 0\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " for group in groups:\n",
+ " errors = []\n",
+ "\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['value'] == '1'):\n",
+ " err_row = curr_df.loc[curr_df['value'] == '1']\n",
+ " erros.append([key, err_row['timestamp'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[err_type], edgecolor='red')\n",
+ " ax = fig.add_subplot(111)\n",
+ " ax.title.set_text(\"Interface: {}\".format(key))\n",
+ " ax.plot(item['timestamp'], item['value'])\n",
+ "\n",
+ " err_interfaces.append(errors)\n",
+ " plt.suptitle('Interfaces Error type {}'.format(err_type), fontsize=14)\n",
+ " plt.show()\n",
+ " err_type += 1\n",
+ "\n",
+ " return err_interfaces"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ "\n",
+ " params = {'query' : \"collectd_intel_rdt_bytes{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_bytes = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_bytes)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_ipc{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_ipc = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_ipc)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " groups = df.groupby(['intel_rdt'])\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ " ax1 = fig.add_subplot(111)\n",
+ " ax1.title.set_text(\"Intel RDT Number: {}, IPC value\".format(key))\n",
+ " ax1.plot(item['timestamp'], item['value'])\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ "\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_intel_rdt_memory_bandwidth_total{exported_instance='\" + node + \"'}\"}\n",
+ " intel_rdt_mem_bw = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(intel_rdt_mem_bw)\n",
+ "\n",
+ " df = df.drop(['__name__', 'instance', 'job'], axis = 1)\n",
+ " \n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " if csv is not None:\n",
+ " df = pd.read_csv(csv)\n",
+ " else:\n",
+ " if start is None or end is None or node is None:\n",
+ " return \"Start, end and Node name required when fetching from prometheus\"\n",
+ " \n",
+ " params = {'query' : \"collectd_memory{exported_instance='\" + node + \"'} / (1024*1024*1024) \"} \n",
+ " target_memory_usage_range = query_range(start, end, params, steps)\n",
+ " df = convert_to_df_range(target_memory_usage_range)\n",
+ "\n",
+ " df = df.drop(['instance', 'job'], axis = 1)\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Testing Zone"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [],
+ "source": [
+ "# prom fetch\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "print(cores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "cores = unused_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prometheus\n",
+ "fully_used = fully_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "# Fetching\n",
+ "plot_used_cores('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "#csv\n",
+ "# use Analysis-Monitoring-Local Notebook for correct analysis \n",
+ "plot_used_cores(csv='metrics_data/cpu-0/cpu-user-2020-06-02')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "dropped_interfaces = interface_dropped('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Fetching from prom\n",
+ "interface_errors('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "##### RDT\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "# fetch\n",
+ "plot_rdt_bytes('2020-07-31 08:00:12', '2020-07-31 08:01:12','pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "plot_rdt_ipc('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#fetch\n",
+ "get_rdt_memory_bandwidth('2020-07-31 08:00:12', '2020-07-31 08:01:12', 'pod12-node4')\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb
new file mode 100644
index 00000000..0385b6f9
--- /dev/null
+++ b/tools/lma/metrics/jupyter-notebooks/Analysis-Monitoring-Local.ipynb
@@ -0,0 +1,913 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Metrics Analysis Notebook (local)\n",
+ "\n",
+ "#### Used to analyse / visualize the metrics when uploaded via csv file\n",
+ "\n",
+ "### Contributor: Aditya Srivastava <adityasrivastava301199@gmail.com>\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "import json\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.dates as mdates\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import pandas as pd\n",
+ "from pprint import pprint\n",
+ "import re\n",
+ "import requests\n",
+ "import time"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Helper Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n",
+ "\n",
+ "def convert_to_timestamp(s):\n",
+ " global DATETIME_FORMAT\n",
+ " return time.mktime(datetime.strptime(s, DATETIME_FORMAT).timetuple())\n",
+ "\n",
+ "def convert_to_time_string(epoch):\n",
+ " global DATETIME_FORMAT\n",
+ " t = datetime.fromtimestamp(float(epoch)/1000.)\n",
+ " return t.strftime(DATETIME_FORMAT)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Note: \n",
+ " \n",
+ "Path will be used as a parameter in almost every function\n",
+ "\n",
+ "path / rootdir / csv : (str) Path to the folder whose direct children are metric folders\n",
+ "\n",
+ "example: /path/to/folder\n",
+ "\n",
+ "When : \n",
+ "```sh\n",
+ "ls /path/to/folder\n",
+ "\n",
+ "# output should be directories such as\n",
+ "# cpu-0 cpu-1 cpu-2 ..........................\n",
+ "# processes-ovs-vswitchd ........processes-ovsdb-server\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Analysis Function"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### CPU"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_cpu_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"cpu-\\d{1,2}\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " # read 3 files from this folder...\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'user' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['user'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'system' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['system'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'idle' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['idle'] = temp_df['value']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " _df['cpu'] = dirname.split('-')[-1]\n",
+ "\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ "\n",
+ " total = df['user'] + df['system'] + df['idle']\n",
+ "\n",
+ " df['user_percentage'] = df['user']*100 / total\n",
+ " df['system_percentage'] = df['system']*100 / total\n",
+ " df['idle_percentage'] = df['idle']*100 / total\n",
+ " \n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# CPU Unused Cores\n",
+ "def unused_cores(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Unused Cores :\")\n",
+ "\n",
+ " unused_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " unused_cores.append(key)\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] < 99.999]\n",
+ " if np.any(idle_values):\n",
+ " unused_cores.pop(-1)\n",
+ "\n",
+ " unused_cores = set(unused_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in unused_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ " \n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ " \n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of unused cores: \", len(unused_cores))\n",
+ " return unused_cores\n",
+ "\n",
+ "\n",
+ "#CPU fully used cores\n",
+ "def fully_used_cores(rootdir, verbose=False):\n",
+ " \n",
+ "\n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Fully Used Cores :\")\n",
+ "\n",
+ " fully_used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] <= 10]\n",
+ " if np.any(idle_values):\n",
+ " fully_used_cores.append(key)\n",
+ "\n",
+ " fully_used_cores = set(fully_used_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in fully_used_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ "\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ "\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of fully used cores: \", len(fully_used_cores))\n",
+ " return fully_used_cores\n",
+ "\n",
+ "\n",
+ "# CPU used cores plots\n",
+ "def used_cores(rootdir, verbose=False):\n",
+ "\n",
+ " df = fetch_cpu_data(rootdir)\n",
+ " groups = df.groupby(['cpu'])\n",
+ " if verbose: print(\"Used Cores :\")\n",
+ "\n",
+ " used_cores = []\n",
+ " for key, item in groups:\n",
+ " curr_df = item\n",
+ " idle_values = curr_df.loc[curr_df['idle_percentage'] < 99.999]\n",
+ " if np.any(idle_values):\n",
+ " used_cores.append(key)\n",
+ "\n",
+ " used_cores = set(used_cores)\n",
+ " for key, item in groups:\n",
+ " if key not in used_cores:\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor='oldlace', edgecolor='red')\n",
+ "\n",
+ " ax1 = fig.add_subplot(131)\n",
+ " ax1.title.set_text(\"System\")\n",
+ " ax1.plot(item['epoch'], item['system_percentage'])\n",
+ "\n",
+ " ax2 = fig.add_subplot(132)\n",
+ " ax2.title.set_text(\"User\")\n",
+ " ax2.plot(item['epoch'], item['user_percentage'])\n",
+ "\n",
+ " ax3 = fig.add_subplot(133)\n",
+ " ax3.title.set_text(\"Idle\")\n",
+ " ax3.plot(item['epoch'], item['idle_percentage'])\n",
+ "\n",
+ " plt.suptitle('Used CPU Core {}'.format(key), fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " print(\"Number of used cores: \", len(used_cores))\n",
+ " return used_cores\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Interface"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_interfaces_data(rootdir):\n",
+ "\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"interface-.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " # read 3 files from this folder...\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['error_rx'] = temp_df['rx']\n",
+ " _df['error_tx'] = temp_df['tx']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['dropped_rx'] = temp_df['rx']\n",
+ " _df['dropped_tx'] = temp_df['tx']\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ "\n",
+ " _df['interface'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Interface Dropped (both type 1 and 2, i.e rx and tx)\n",
+ "def interface_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_interfaces_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " dropped = {'rx':[], 'tx':[]}\n",
+ "\n",
+ " itr = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['dropped_rx'] == 1):\n",
+ " dropped_rows = curr_df[curr_df['dropped_rx'] == 1]\n",
+ " dropped['rx'].append([key, dropped_row['epoch'].iloc[0]])\n",
+ " if np.any(curr_df['dropped_tx'] == 1):\n",
+ " dropped_rows = curr_df[curr_df['dropped_tx'] == 1]\n",
+ " dropped['tx'].append([key, dropped_row['epoch'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[itr%2], edgecolor='red')\n",
+ " ax = fig.add_subplot(211)\n",
+ " ax.title.set_text(\"Interface: {} Dropped (rx)\".format(key))\n",
+ " ax.plot(item['epoch'], item['dropped_rx'])\n",
+ "\n",
+ " ax1 = fig.add_subplot(212)\n",
+ " ax1.title.set_text(\"Interface: {} Dropped (tx)\".format(key))\n",
+ " ax1.plot(item['epoch'], item['dropped_tx'])\n",
+ "\n",
+ " itr += 1\n",
+ "\n",
+ " plt.suptitle('Interface Dropped', fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " return dropped\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def interface_errors(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_interfaces_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " errors = {'rx':[], 'tx':[]}\n",
+ "\n",
+ " itr = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ "\n",
+ " if np.any(curr_df['error_rx'] == 1):\n",
+ " err_rows = curr_df[curr_df['error_rx'] == 1]\n",
+ " errors['rx'].append([key, err_row['epoch'].iloc[0]])\n",
+ " if np.any(curr_df['error_tx'] == 1):\n",
+ " err_rows = curr_df[curr_df['error_tx'] == 1]\n",
+ " errors['tx'].append([key, err_row['epoch'].iloc[0]])\n",
+ "\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[itr%2], edgecolor='red')\n",
+ " ax = fig.add_subplot(211)\n",
+ " ax.title.set_text(\"Interface: {} Errors (rx)\".format(key))\n",
+ " ax.plot(item['epoch'], item['error_rx'])\n",
+ "\n",
+ " ax1 = fig.add_subplot(212)\n",
+ " ax1.title.set_text(\"Interface: {} Errors (tx)\".format(key))\n",
+ " ax1.plot(item['epoch'], item['error_tx'])\n",
+ "\n",
+ " itr += 1\n",
+ "\n",
+ " plt.suptitle('Interface Erros', fontsize=14)\n",
+ " plt.show()\n",
+ "\n",
+ " return errors\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### OVS Stats (Non DPDK)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_ovs_stats_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"ovs_stats-.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " if 'dpdk' in dirname:\n",
+ " continue #ignoring dpdk\n",
+ "\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['interface'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def ovs_stats_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_ovs_stats_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'dropped' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"Interface: {} Dropped {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def ovs_stats_errors(rootdir, verbose=False):\n",
+ "\n",
+ "\n",
+ " df = fetch_ovs_stats_data(rootdir)\n",
+ " group = df.groupby(['interface'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'error' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"Interface: {} Errors {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### DPDK"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_dpdk_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\".*dpdk.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'errors' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'dropped' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['dpdk'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fetch_dpdk_data(rootdir)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def dpdk_dropped(rootdir, verbose=False):\n",
+ " \n",
+ " df = fetch_dpdk_data(rootdir)\n",
+ " group = df.groupby(['dpdk'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'dropped' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"DpDK: {} Dropped {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ " return\n",
+ "\n",
+ "\n",
+ "# Interface Errors (both type 1 and 2, i.e rx and tx)\n",
+ "def dpdk_errors(rootdir, verbose=False):\n",
+ "\n",
+ "\n",
+ " df = fetch_dpdk_data(rootdir)\n",
+ " group = df.groupby(['dpdk'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'error' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"DpDK: {} Errors {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dpdk_dropped(rootdir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### RDT (need to be testes)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_rdt_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\".*rdt.*\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames:\n",
+ " if 'bytes' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ " \n",
+ " if 'bandwidth' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ "\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ "\n",
+ " if 'ipc' in file:\n",
+ " col_name = '-'.join(file.split('_')[1:])\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [i + '_' + col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df \n",
+ " _df['intel_rdt'] = '-'.join(dirname.split('-')[1:])\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# L3 cache bytes\n",
+ "def plot_rdt_bytes(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'bytes' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT BYTES, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n",
+ "\n",
+ "# L3 IPC values\n",
+ "def plot_rdt_ipc(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'ipc' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT IPC, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n",
+ "\n",
+ "\n",
+ "# memeory bandwidtdh\n",
+ "def get_rdt_memory_bandwidth(start=None, end=None, node=None, steps='15s', csv=None, verbose=False):\n",
+ " \n",
+ " \n",
+ " df = fetch_rdt_data(rootdir)\n",
+ " group = df.groupby(['intel_rdt'])\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ "\n",
+ " i = 0\n",
+ " for key, item in group:\n",
+ " curr_df = item\n",
+ " for col in curr_df:\n",
+ " if 'bandwidht' in col:\n",
+ " if item[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(item['epoch'], item[col])\n",
+ " plt.title(\"RDT Memory Bandwidht, RDT: {}\".format(key, col))\n",
+ " i += 1\n",
+ " plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Memory (following functions still need to written for csv)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "rootdir = 'metrics_data/'\n",
+ "\n",
+ "def fetch_memory_data(rootdir):\n",
+ " df = pd.DataFrame()\n",
+ " reg_compile = re.compile(\"memory\")\n",
+ " for dirpath, dirnames, filenames in os.walk(rootdir):\n",
+ " dirname = dirpath.split(os.sep)[-1] \n",
+ " if reg_compile.match(dirname):\n",
+ " print(dirname)\n",
+ " _df = pd.DataFrame()\n",
+ " for file in filenames: \n",
+ " col_name = file.split('-')[1]\n",
+ " temp_df = pd.read_csv(dirpath + os.sep + file)\n",
+ " _df['epoch'] = temp_df['epoch']\n",
+ " temp_df = temp_df.drop(['epoch'], axis=1)\n",
+ " new_cols = [col_name for i in temp_df.columns]\n",
+ " _df[new_cols] = temp_df\n",
+ " df = df.append(_df, ignore_index=True)\n",
+ " return df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [],
+ "source": [
+ "def get_memory_usage(rootdir, verbose=False):\n",
+ " df = fetch_memory_data(rootdir)\n",
+ " color = ['oldlace', 'mistyrose']\n",
+ " i = 0\n",
+ " for col in df:\n",
+ " if df[col].isnull().all():\n",
+ " continue\n",
+ " fig = plt.figure(figsize=(24,6), facecolor=color[i%2], edgecolor='red')\n",
+ " plt.plot(df['epoch'], df[col])\n",
+ " plt.title(\"{} Memory\".format(col))\n",
+ " i += 1\n",
+ " plt.show()\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Usage / Examples\n",
+ "\n",
+ "\n",
+ "##### CPU \n",
+ "\n",
+ "- For calling cpu unsued cores\n",
+ "\n",
+ "```py\n",
+ "cores = unused_cores(rootdir='metrics_data')\n",
+ "```\n",
+ "\n",
+ "- For finding fully used cores\n",
+ "\n",
+ "```py\n",
+ "fully_used = fully_used_cores('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Similarly for plotting used cores\n",
+ "\n",
+ "```py\n",
+ "plot_used_cores(csv='metrics_data')\n",
+ "```\n",
+ "\n",
+ "\n",
+ "##### Interface\n",
+ "\n",
+ "- Interface Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dropped_interfaces = interface_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Interface Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "interface_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### OVS Stats\n",
+ "\n",
+ "- OVS Stats Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "ovs_stats_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- OVS Stats Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "ovs_stats_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### DPDK \n",
+ "\n",
+ "- DPDK Dropped \n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dpdk_dropped('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- DPDK Errors\n",
+ "\n",
+ "```py\n",
+ "# Using CSV\n",
+ "dpdk_errors('metrics_data')\n",
+ "```\n",
+ "\n",
+ "\n",
+ "\n",
+ "##### RDT (Do not run yet)\n",
+ "\n",
+ "- Plot bytes\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "plot_rdt_bytes('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Plot ipc values\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "plot_rdt_ipc('metrics_data')\n",
+ "```\n",
+ "\n",
+ "- Memory bandwidth\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "get_rdt_memory_bandwidth('metrics_data')\n",
+ "```\n",
+ "\n",
+ "##### Memory\n",
+ "\n",
+ "```py\n",
+ "#csv\n",
+ "get_memory_usage('metrics_data')\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/tools/lma/yamllintrc b/tools/lma/yamllintrc
new file mode 100644
index 00000000..9714a565
--- /dev/null
+++ b/tools/lma/yamllintrc
@@ -0,0 +1,25 @@
+# Copyright 2020 Tieto
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+extends: relaxed
+
+rules:
+ empty-lines:
+ max-start: 1
+ max-end: 1
+ colons:
+ max-spaces-after: 1
+ max-spaces-before: 1
+ line-length:
+ max: 250
diff --git a/tools/load_gen/stressorvm/stressor_vm.py b/tools/load_gen/stressorvm/stressor_vm.py
index f4936743..82329d2b 100644
--- a/tools/load_gen/stressorvm/stressor_vm.py
+++ b/tools/load_gen/stressorvm/stressor_vm.py
@@ -16,8 +16,12 @@
Wrapper file to create and manage Stressor-VM as loadgen
"""
+import locale
import logging
import os
+import re
+import subprocess
+import time
from tools import tasks
from tools.load_gen.load_gen import ILoadGenerator
from conf import settings as S
@@ -90,8 +94,42 @@ class QemuVM(tasks.Process):
'Removing content of shared directory...', True)
self._running = False
+ def affinitize_nn(self):
+ """
+ Affinitize the SMP cores of a NN instance.
+ This function is same as the one in vnfs/qemu/qemu.py
+
+ :returns: None
+ """
+ thread_id = (r'.* CPU #%d: .* thread_id=(\d+)')
+ cur_locale = locale.getdefaultlocale()[1]
+ proc = subprocess.Popen(
+ ('echo', 'info cpus'), stdout=subprocess.PIPE)
+ while not os.path.exists(self._monitor):
+ time.sleep(1)
+ output = subprocess.check_output(
+ ('sudo', 'socat', '-', 'UNIX-CONNECT:%s' % self._monitor),
+ stdin=proc.stdout)
+ proc.wait()
+
+ # calculate the number of CPUs specified by NN_SMP
+ cpu_nr = int(S.getValue('NN_SMP')[self._number])
+ # pin each NN's core to host core based on configured BINDING
+ for cpu in range(0, cpu_nr):
+ match = None
+ guest_thread_binding = S.getValue('NN_CORE_BINDING')[self._number]
+ for line in output.decode(cur_locale).split('\n'):
+ match = re.search(thread_id % cpu, line)
+ if match:
+ self._affinitize_pid(guest_thread_binding[cpu],
+ match.group(1))
+ break
+ if not match:
+ self._logger.error('Failed to affinitize guest core #%d. Could'
+ ' not parse tid.', cpu)
+
-# pylint: disable=super-init-not-called
+# pylint: disable=super-init-not-called,unused-argument
class StressorVM(ILoadGenerator):
"""
Wrapper Class for Load-Generation through stressor-vm
@@ -107,6 +145,7 @@ class StressorVM(ILoadGenerator):
"""
for nvm in self.qvm_list:
nvm.start()
+ nvm.affinitize_nn()
def kill(self, signal='-9', sleep=2):
"""
diff --git a/tools/md-testvnf/config.json b/tools/md-testvnf/config.json
new file mode 100644
index 00000000..fcfbf0cc
--- /dev/null
+++ b/tools/md-testvnf/config.json
@@ -0,0 +1,11 @@
+{
+ "username": "",
+ "password": "",
+ "networks" : "",
+ "source_image": "",
+ "flavor": "",
+ "domain_name": "",
+ "floating_ip_network" : "",
+
+ "ssh_path": ""
+}
diff --git a/tools/md-testvnf/http/ks.cfg b/tools/md-testvnf/http/ks.cfg
new file mode 100644
index 00000000..46aa3310
--- /dev/null
+++ b/tools/md-testvnf/http/ks.cfg
@@ -0,0 +1,88 @@
+install
+cdrom
+lang en_US.UTF-8
+keyboard us
+network --bootproto=dhcp
+rootpw centos
+firewall --disabled
+selinux --permissive
+timezone UTC
+unsupported_hardware
+bootloader --location=mbr
+text
+skipx
+zerombr
+clearpart --all --initlabel
+part / --fstype="ext4" --grow --size=100
+auth --enableshadow --passalgo=sha512 --kickstart
+firstboot --disabled
+reboot
+services --disabled kdump
+user --name=centos --plaintext --password centos
+url --url=http://centos.osuosl.org/7.8.2003/os/x86_64
+repo --name=updates --baseurl=http://centos.osuosl.org/7.8.2003/updates/x86_64
+
+%packages --nobase --ignoremissing
+openssh-clients
+sudo
+wget
+nfs-utils
+net-tools
+perl-libwww-perl
+bzip2
+vim
+rsync
+man
+man-pages
+parted
+-fprintd-pam
+-intltool
+
+# unnecessary firmware
+-aic94xx-firmware
+-atmel-firmware
+-b43-openfwwf
+-bfa-firmware
+-ipw2100-firmware
+-ipw2200-firmware
+-ivtv-firmware
+-iwl1000-firmware
+-iwl100-firmware
+-iwl105-firmware
+-iwl135-firmware
+-iwl2000-firmware
+-iwl2030-firmware
+-iwl3160-firmware
+-iwl3945-firmware
+-iwl4965-firmware
+-iwl5000-firmware
+-iwl5150-firmware
+-iwl6000-firmware
+-iwl6000g2a-firmware
+-iwl6000g2b-firmware
+-iwl6050-firmware
+-iwl7260-firmware
+-libertas-sd8686-firmware
+-libertas-sd8787-firmware
+-libertas-usb8388-firmware
+-ql2100-firmware
+-ql2200-firmware
+-ql23xx-firmware
+-ql2400-firmware
+-ql2500-firmware
+-rt61pci-firmware
+-rt73usb-firmware
+-xorg-x11-drv-ati-firmware
+-zd1211-firmware
+%end
+
+%post
+yum -y upgrade
+# update root certs
+wget https://raw.githubusercontent.com/bagder/curl/master/lib/mk-ca-bundle.pl
+perl mk-ca-bundle.pl /etc/pki/tls/certs/ca-bundle.crt
+rm certdata.txt mk-ca-bundle.pl
+# sudo
+echo "%centos ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/centos
+sed -i "s/^.*requiretty/#Defaults requiretty/" /etc/sudoers
+%end \ No newline at end of file
diff --git a/tools/md-testvnf/playbook.yml b/tools/md-testvnf/playbook.yml
new file mode 100644
index 00000000..81a51f5e
--- /dev/null
+++ b/tools/md-testvnf/playbook.yml
@@ -0,0 +1,36 @@
+---
+- hosts: all
+ vars:
+ username: "testvnf"
+ password: "testvnf"
+ become: true
+ tasks:
+
+ - name: create a new user
+ user:
+ name: "{{ username }}"
+ state: present
+ groups: "wheel"
+ password: "{{ password | password_hash('sha512') }}"
+ comment: "user for ansible connection"
+
+ - lineinfile:
+ path: /etc/sudoers
+ state: present
+ regexp: '^%wheel'
+ line: '%wheel ALL=(ALL) NOPASSWD: ALL'
+ validate: 'visudo -cf %s'
+
+ - name: Ansible create file if it doesn't exist example
+ file:
+ path: "temp"
+ state: touch
+
+
+ - name: install epel-release
+ package:
+ name: epel-release
+ state: present
+
+ - name: Execute the deployment script
+ command: /home/centos/deploycentostools.sh deploy \ No newline at end of file
diff --git a/tools/md-testvnf/scripts/ansible.sh b/tools/md-testvnf/scripts/ansible.sh
new file mode 100644
index 00000000..770e2483
--- /dev/null
+++ b/tools/md-testvnf/scripts/ansible.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -eux
+yum -y update
+# Install EPEL repository.
+yum -y install epel-release
+
+# Install Ansible.
+yum -y install ansible \ No newline at end of file
diff --git a/tools/md-testvnf/scripts/deploycentostools.sh b/tools/md-testvnf/scripts/deploycentostools.sh
new file mode 100755
index 00000000..694b020c
--- /dev/null
+++ b/tools/md-testvnf/scripts/deploycentostools.sh
@@ -0,0 +1,364 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# Directory for package build
+BUILD_DIR="/opt/rapid"
+TREX_DIR="/opt/trex"
+RAMSPEED_DIR="/opt/ramspeed"
+STRESSNG_DIR="/opt/stressng"
+UNIXBENCH_DIR="/opt/unixbench"
+DPDK_VERSION="20.05"
+PROX_COMMIT="80dfeb5c734cc4d681f467e853a541a8a91fe1cf"
+PROX_CHECKOUT="git checkout ${PROX_COMMIT}"
+## Next line is overruling the PROX_COMMIT and will replace the version with a very specific patch. Should be commented out
+## if you want to use a committed version of PROX with the COMMIT ID specified above
+## As an example: Following line has the commit for testing IMIX, IPV6, ... It is the merge of all PROX commits on May 27th 2020
+#PROX_CHECKOUT="git fetch \"https://gerrit.opnfv.org/gerrit/samplevnf\" refs/changes/23/70223/1 && git checkout FETCH_HEAD"
+MULTI_BUFFER_LIB_VER="0.52"
+export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+# By default, do not update OS
+OS_UPDATE="n"
+# By default, asumming that we are in the VM
+K8S_ENV="n"
+
+# If already running from root, no need for sudo
+SUDO=""
+[ $(id -u) -ne 0 ] && SUDO="sudo"
+
+function os_pkgs_install()
+{
+ ${SUDO} yum install -y deltarpm yum-utils
+
+ # NASM repository for AESNI MB library
+ #${SUDO} yum-config-manager --add-repo http://www.nasm.us/nasm.repo
+
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+ ${SUDO} yum install -y git wget gcc unzip libpcap-devel ncurses-devel \
+ libedit-devel lua-devel kernel-devel iperf3 pciutils \
+ numactl-devel vim tuna openssl-devel wireshark \
+ make driverctl
+
+ ${SUDO} wget https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+ ${SUDO} rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+}
+
+function k8s_os_pkgs_runtime_install()
+{
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+
+ # Install required dynamically linked libraries + required packages
+ ${SUDO} yum install -y numactl-libs libpcap openssh openssh-server \
+ openssh-clients sudo
+}
+
+function os_cfg()
+{
+ # huge pages to be used by DPDK
+ ${SUDO} sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+ ${SUDO} sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+ # Enabling tuned with the realtime-virtual-guest profile
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm
+ # Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+ # do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ # Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+ # isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+ # When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+ # and update the realtime-virtual-guest-variables.conf accordingly.
+ echo "isolated_cores=1-3" | ${SUDO} tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+ ${SUDO} tuned-adm profile realtime-virtual-guest
+
+ # Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+ # assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+ ${SUDO} chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+ ${SUDO} mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+ ${SUDO} mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+ ${SUDO} systemctl daemon-reload
+ ${SUDO} systemctl enable check-prox-system-setup.service
+ popd > /dev/null 2>&1
+}
+
+function k8s_os_cfg()
+{
+ [ ! -f /etc/ssh/ssh_host_rsa_key ] && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ecdsa_key ] && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ed25519_key ] && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ''
+
+ [ ! -d /var/run/sshd ] && mkdir -p /var/run/sshd
+
+ USER_NAME="centos"
+ USER_PWD="centos"
+
+ useradd -m -d /home/${USER_NAME} -s /bin/bash -U ${USER_NAME}
+ echo "${USER_NAME}:${USER_PWD}" | chpasswd
+ usermod -aG wheel ${USER_NAME}
+
+ echo "%wheel ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/wheelnopass
+}
+
+function mblib_install()
+{
+ export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+
+ # Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+ unzip v${MULTI_BUFFER_LIB_VER}.zip
+ pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function trex_install()
+{
+ pushd ${TREX_DIR} > /dev/null 2>&1
+ wget --no-cache https://trex-tgen.cisco.com/trex/release/latest
+ tar -xzvf latest
+ popd > /dev/null 2>&1
+}
+
+function unixbench_install()
+{
+ pushd ${UNIXBENCH_DIR} > /dev/null 2>&1
+ git clone https://github.com/kdlucas/byte-unixbench
+ popd > /dev/null 2>&1
+}
+
+function ramspeed_install()
+{
+ RAMSPEED_BUILD_DIR = "${RAMSPEED_DIR}/ramspeed-smp"
+ pushd ${RAMSPEED_DIR} > /dev/null 2>&1
+ git clone https://github.com/cruvolo/ramspeed-smp
+ pushd ${RAMSPEED_BUILD_DIR} > /dev/null 2>&1
+ chmod 766 build.sh
+ source build.sh
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function collectd_install()
+{
+ ${SUDO} yum -y install collectd
+}
+
+function fio_install()
+{
+ ${SUDO} yum -y install fio
+}
+
+function stressng_install()
+{
+ STRESSNG_BUILD_DIR = "${STRESSNG_DIR}/stress-ng"
+ pushd ${STRESSNG_DIR} > /dev/null 2>&1
+ git clone https://github.com/ColinIanKing/stress-ng
+ ${SUDO} yum -y install libaio-devel libbsd-devel libcap-devel libattr-devel libgcrypt-devel
+ ${SUDO} yum -y install Judy-devel keyutils-libs-devel lksctp-tools-devel libatomic zlib-devel
+ pushd ${STRESSNG_BUILD_DIR} > /dev/null 2>&1
+ make clean
+ make
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function dpdk_install()
+{
+ # Build DPDK for the latest kernel installed
+ LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+ export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+ # Get and compile DPDK
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz
+ tar -xf ./dpdk-${DPDK_VERSION}.tar.xz
+ popd > /dev/null 2>&1
+
+ ${SUDO} ln -s ${RTE_SDK} ${BUILD_DIR}/dpdk
+
+ pushd ${RTE_SDK} > /dev/null 2>&1
+ make config T=${RTE_TARGET}
+ # Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+ # Uncomment the sed command to enable the driver compilation
+ #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
+
+ # For Kubernetes environment we use host vfio module
+ if [ "${K8S_ENV}" == "y" ]; then
+ sed -i 's/CONFIG_RTE_EAL_IGB_UIO=y/CONFIG_RTE_EAL_IGB_UIO=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_LIBRTE_KNI=y/CONFIG_RTE_LIBRTE_KNI=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_KNI_KMOD=y/CONFIG_RTE_KNI_KMOD=n/g' ${RTE_SDK}/build/.config
+ fi
+
+ # Compile with MB library
+ sed -i '/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/c\CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y' ${RTE_SDK}/build/.config
+ make -j`getconf _NPROCESSORS_ONLN`
+ ln -s ${RTE_SDK}/build ${RTE_SDK}/${RTE_TARGET}
+ popd > /dev/null 2>&1
+}
+
+function prox_compile()
+{
+ # Compile PROX
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+ popd > /dev/null 2>&1
+}
+
+function prox_install()
+{
+ # Clone and compile PROX
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ git clone https://git.opnfv.org/samplevnf
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX > /dev/null 2>&1
+ bash -c "${PROX_CHECKOUT}"
+ popd > /dev/null 2>&1
+ prox_compile
+ popd > /dev/null 2>&1
+}
+
+function port_info_build()
+{
+ [ ! -d ${BUILD_DIR}/port_info ] && echo "Skipping port_info compilation..." && return
+
+ pushd ${BUILD_DIR}/port_info > /dev/null 2>&1
+ make
+ ${SUDO} cp ${BUILD_DIR}/port_info/build/app/port_info_app ${BUILD_DIR}/port_info_app
+ popd > /dev/null 2>&1
+}
+
+function create_minimal_install()
+{
+ ldd ${BUILD_DIR}/prox | awk '{ if ($(NF-1) != "=>") print $(NF-1) }' >> ${BUILD_DIR}/list_of_install_components
+
+ echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components
+
+ tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+}
+
+function cleanup()
+{
+ ${SUDO} yum autoremove -y
+ ${SUDO} yum clean all
+ ${SUDO} rm -rf /var/cache/yum
+}
+
+function k8s_runtime_image()
+{
+ k8s_os_pkgs_runtime_install
+ k8s_os_cfg
+ cleanup
+
+ pushd / > /dev/null 2>&1
+ tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+ popd > /dev/null 2>&1
+
+ ldconfig
+
+ #rm -rf ${BUILD_DIR}/install_components.tgz
+}
+
+function print_usage()
+{
+ echo "Usage: ${0} [OPTIONS] [COMMAND]"
+ echo "Options:"
+ echo " -u, --update Full OS update"
+ echo " -k, --kubernetes Build for Kubernetes environment"
+ echo "Commands:"
+ echo " deploy Run through all deployment steps"
+ echo " compile PROX compile only"
+ echo " runtime_image Apply runtime configuration only"
+}
+
+COMMAND=""
+# Parse options and comman
+for opt in "$@"; do
+ case ${opt} in
+ -u|--update)
+ echo 'Full OS update will be done!'
+ OS_UPDATE="y"
+ ;;
+ -k|--kubernetes)
+ echo "Kubernetes environment is set!"
+ K8S_ENV="y"
+ ;;
+ compile)
+ COMMAND="compile"
+ ;;
+ runtime_image)
+ COMMAND="runtime_image"
+ ;;
+ deploy)
+ COMMAND="deploy"
+ ;;
+ *)
+ echo "Unknown option/command ${opt}"
+ print_usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ "${COMMAND}" == "compile" ]; then
+ echo "PROX compile only..."
+ prox_compile
+elif [ "${COMMAND}" == "runtime_image" ]; then
+ echo "Runtime image intallation and configuration..."
+ k8s_runtime_image
+elif [ "${COMMAND}" == "deploy" ]; then
+ [ ! -d ${BUILD_DIR} ] && ${SUDO} mkdir -p ${BUILD_DIR}
+ ${SUDO} chmod 0777 ${BUILD_DIR}
+
+ os_pkgs_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ k8s_os_cfg
+ else
+ os_cfg
+ fi
+
+ mblib_install
+ dpdk_install
+ prox_install
+ trex_install
+ collectd_install
+ stressng_install
+ fio_install
+ unixbench_install
+ ramspeed_install
+
+
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ port_info_build
+ create_minimal_install
+ fi
+
+ cleanup
+else
+ print_usage
+fi
diff --git a/tools/md-testvnf/scripts/sshConfig.sh b/tools/md-testvnf/scripts/sshConfig.sh
new file mode 100644
index 00000000..b746cde6
--- /dev/null
+++ b/tools/md-testvnf/scripts/sshConfig.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -eux
+sudo mv temp /home/testvnf/authorized_keys
+sudo mkdir /home/testvnf/.ssh
+sudo mv /home/testvnf/authorized_keys /home/testvnf/.ssh/
+sudo chmod 700 /home/testvnf/.ssh
+sudo chmod 600 /home/testvnf/.ssh/authorized_keys
+sudo chown testvnf /home/testvnf/.ssh
+sudo chown testvnf /home/testvnf/.ssh/authorized_keys
+# Add `sync` so Packer doesn't quit too early, before the large file is deleted.
+sync \ No newline at end of file
diff --git a/tools/md-testvnf/testVNF_image.json b/tools/md-testvnf/testVNF_image.json
new file mode 100644
index 00000000..2b27a28a
--- /dev/null
+++ b/tools/md-testvnf/testVNF_image.json
@@ -0,0 +1,72 @@
+{
+ "builders": [
+ {
+ "boot_command": [
+ "<tab> text biosdevname=0 net.ifnames=0 ",
+ "ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ks.cfg<enter><wait>"
+ ],
+ "accelerator": "kvm",
+ "boot_wait": "10s",
+ "disk_size": 2048,
+ "disk_interface": "virtio-scsi",
+ "http_directory": "http",
+ "iso_checksum": "101bc813d2af9ccf534d112cbe8670e6d900425b297d1a4d2529c5ad5f226372",
+ "iso_checksum_type": "sha256",
+ "iso_url": "http://centos.osuosl.org/7.8.2003/isos/x86_64/CentOS-7-x86_64-NetInstall-2003.iso",
+ "output_directory": "image",
+ "qemuargs": [ [ "-m", "1024M" ]],
+ "shutdown_command": "echo 'centos'|sudo -S /sbin/halt -h -p",
+ "ssh_password": "centos",
+ "ssh_port": 22,
+ "ssh_username": "centos",
+ "ssh_wait_timeout": "10000s",
+ "type": "qemu",
+ "vm_name": "packer-centos-7-x86_64-openstack",
+ "vnc_bind_address": "0.0.0.0",
+ "Headless": "true"
+ },
+ {
+ "name": "openstack",
+ "type": "openstack",
+ "image_name": "testvnf_image",
+ "identity_endpoint": "{{user `identiy_endpoint`}}",
+ "username": "{{user `username`}}",
+ "password": "{{user `password`}}",
+ "ssh_username": "centos",
+ "networks" : "{{user `networks`}}",
+ "source_image": "{{user `source_image`}}",
+ "flavor": "{{user `flavor`}}",
+ "domain_name": "{{user `domain_name`}}",
+ "ssh_timeout": "15m",
+ "use_floating_ip": "true",
+ "floating_ip_network" : "{{user `floating_ip_network`}}"
+ }
+],
+ "provisioners": [
+ {
+ "type": "shell",
+ "execute_command": "echo testvnf | {{.Vars}} sudo -S -E bash '{{.Path}}'",
+ "script": "scripts/ansible.sh"
+ },
+ {
+ "type": "file",
+ "source": "scripts/deploycentostools.sh",
+ "destination": "deploycentostools.sh"
+ },
+ {
+ "type": "ansible-local",
+ "playbook_file": "playbook.yml"
+ },
+ {
+ "type": "file",
+ "source": "{{user `ssh_path`}}",
+ "destination": "temp"
+ },
+ {
+ "type": "shell",
+ "execute_command": "echo testvnf | {{.Vars}} sudo -S -E bash '{{.Path}}'",
+ "script": "scripts/sshConfig.sh"
+ }
+ ]
+}
+
diff --git a/tools/os_deploy_tgen/__init__.py b/tools/os_deploy_tgen/__init__.py
new file mode 100644
index 00000000..1b2d5ea6
--- /dev/null
+++ b/tools/os_deploy_tgen/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Package to deploy Traffic-generator in Openstack
+"""
diff --git a/tools/os_deploy_tgen/osclients/__init__.py b/tools/os_deploy_tgen/osclients/__init__.py
new file mode 100644
index 00000000..e73a36c9
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Openstack Client
+"""
diff --git a/tools/os_deploy_tgen/osclients/glance.py b/tools/os_deploy_tgen/osclients/glance.py
new file mode 100644
index 00000000..f59f0d8d
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/glance.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Glance CLient
+"""
+
+def get_image(glance_client, image_name):
+ """
+ Get the IMage
+ """
+ for image in glance_client.images.list():
+ if image.name == image_name:
+ return image
+ return None
+
+
+def get_supported_versions(glance_client):
+ """
+ Get Supported Version
+ """
+ return set(version['id'] for version in glance_client.versions.list())
diff --git a/tools/os_deploy_tgen/osclients/heat.py b/tools/os_deploy_tgen/osclients/heat.py
new file mode 100755
index 00000000..8681731b
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/heat.py
@@ -0,0 +1,156 @@
+# Copyright 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Heat Client
+"""
+
+#import sys
+import time
+
+from heatclient import exc
+from oslo_log import log as logging
+from timeout_decorator import timeout
+
+LOG = logging.getLogger(__name__)
+
+
+def create_stack(heat_client, stack_name, template, parameters,
+ environment=None):
+ """
+ Create Stack
+ """
+ stack_params = {
+ 'stack_name': stack_name,
+ 'template': template,
+ 'parameters': parameters,
+ 'environment': environment,
+ }
+
+ stack = heat_client.stacks.create(**stack_params)['stack']
+ LOG.info('New stack: %s', stack)
+
+ wait_stack_completion(heat_client, stack['id'])
+
+ return stack['id']
+
+
+def get_stack_status(heat_client, stack_id):
+ """
+ Get Stack Status
+ """
+ # stack.get operation may take long time and run out of time. The reason
+ # is that it resolves all outputs which is done serially. On the other hand
+ # stack status can be retrieved from the list operation. Internally listing
+ # supports paging and every request should not take too long.
+ for stack in heat_client.stacks.list():
+ if stack.id == stack_id:
+ return stack.status, stack.stack_status_reason
+ else:
+ raise exc.HTTPNotFound(message='Stack %s is not found' % stack_id)
+ return None
+
+def get_id_with_name(heat_client, stack_name):
+ """
+ Get Stack ID by name
+ """
+ # This method isn't really necessary since the Heat client accepts
+ # stack_id and stack_name interchangeably. This is provided more as a
+ # safety net to use ids which are guaranteed to be unique and provides
+ # the benefit of keeping the Shaker code consistent and more easily
+ # traceable.
+ stack = heat_client.stacks.get(stack_name)
+ return stack.id
+
+
+def wait_stack_completion(heat_client, stack_id):
+ """
+ Wait for Stack completion
+ """
+ reason = None
+ status = None
+
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s', status)
+ if status not in ['IN_PROGRESS', '']:
+ break
+
+ time.sleep(5)
+
+ if status != 'COMPLETE':
+ resources = heat_client.resources.list(stack_id)
+ for res in resources:
+ if (res.resource_status != 'CREATE_COMPLETE' and
+ res.resource_status_reason):
+ LOG.error('Heat stack resource %(res)s of type %(type)s '
+ 'failed with %(reason)s',
+ dict(res=res.logical_resource_id,
+ type=res.resource_type,
+ reason=res.resource_status_reason))
+
+ raise exc.StackFailure(stack_id, status, reason)
+
+
+# set the timeout for this method so we don't get stuck polling indefinitely
+# waiting for a delete
+@timeout(600)
+def wait_stack_deletion(heat_client, stack_id):
+ """
+ Wait for stack deletion
+ """
+ try:
+ heat_client.stacks.delete(stack_id)
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s Stack reason: %s', status, reason)
+ if status == 'FAILED':
+ raise exc.StackFailure('Failed to delete stack %s' % stack_id)
+
+ time.sleep(5)
+
+ except TimeoutError:
+ LOG.error('Timed out waiting for deletion of stack %s' % stack_id)
+
+ except exc.HTTPNotFound:
+ # once the stack is gone we can assume it was successfully deleted
+ # clear the exception so it doesn't confuse the logs
+ #if sys.version_info < (3, 0):
+ # sys.exc_clear()
+ LOG.info('Stack %s was successfully deleted', stack_id)
+
+
+def get_stack_outputs(heat_client, stack_id):
+ """
+ Get Stack Output
+ """
+ # try to use optimized way to retrieve outputs, fallback otherwise
+ if hasattr(heat_client.stacks, 'output_list'):
+ try:
+ output_list = heat_client.stacks.output_list(stack_id)['outputs']
+
+ result = {}
+ for output in output_list:
+ output_key = output['output_key']
+ value = heat_client.stacks.output_show(stack_id, output_key)
+ result[output_key] = value['output']['output_value']
+
+ return result
+ except BaseException as err:
+ LOG.info('Cannot get output list, fallback to old way: %s', err)
+
+ outputs_list = heat_client.stacks.get(stack_id).to_dict()['outputs']
+ return dict((item['output_key'], item['output_value'])
+ for item in outputs_list)
diff --git a/tools/os_deploy_tgen/osclients/neutron.py b/tools/os_deploy_tgen/osclients/neutron.py
new file mode 100644
index 00000000..f75077dc
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/neutron.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Neutron client
+"""
+
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def choose_external_net(neutron_client):
+ """
+ Choose External Network
+ """
+ ext_nets = neutron_client.list_networks(
+ **{'router:external': True})['networks']
+ if not ext_nets:
+ raise Exception('No external networks found')
+ return ext_nets[0]['name']
diff --git a/tools/os_deploy_tgen/osclients/nova.py b/tools/os_deploy_tgen/osclients/nova.py
new file mode 100644
index 00000000..b2baa34f
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/nova.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Nova Client
+"""
+
+import itertools
+import re
+import time
+
+from novaclient import client as nova_client_pkg
+from oslo_log import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class ForbiddenException(nova_client_pkg.exceptions.Forbidden):
+ """
+ Custome Exception
+ """
+
+
+
+def get_available_compute_nodes(nova_client, flavor_name):
+ """
+ Return available compute nodes
+ """
+ try:
+ host_list = [dict(host=svc.host, zone=svc.zone)
+ for svc in
+ nova_client.services.list(binary='nova-compute')
+ if svc.state == 'up' and svc.status == 'enabled']
+
+ # If the flavor has aggregate_instance_extra_specs set then filter
+ # host_list to pick only the hosts matching the chosen flavor.
+ flavor = get_flavor(nova_client, flavor_name)
+
+ if flavor is not None:
+ extra_specs = flavor.get_keys()
+
+ for item in extra_specs:
+ if "aggregate_instance_extra_specs" in item:
+ LOG.debug('Flavor contains %s, using compute node '
+ 'filtering', extra_specs)
+
+ # getting the extra spec seting for flavor in the
+ # standard format of extra_spec:value
+ extra_spec = item.split(":")[1]
+ extra_spec_value = extra_specs.get(item)
+
+ # create a set of aggregate host which match
+ agg_hosts = set(itertools.chain(
+ *[agg.hosts for agg in
+ nova_client.aggregates.list() if
+ agg.metadata.get(extra_spec) == extra_spec_value]))
+
+ # update list of available hosts with
+ # host_aggregate cross-check
+ host_list = [elem for elem in host_list if
+ elem['host'] in agg_hosts]
+
+ LOG.debug('Available compute nodes: %s ', host_list)
+
+ return host_list
+
+ except nova_client_pkg.exceptions.Forbidden as error:
+ msg = 'Forbidden to get list of compute nodes'
+ raise ForbiddenException(msg) from error
+
+
+def does_flavor_exist(nova_client, flavor_name):
+ """
+ Check if flavor exists
+ """
+ for flavor in nova_client.flavors.list():
+ if flavor.name == flavor_name:
+ return True
+ return False
+
+
+def create_flavor(nova_client, **kwargs):
+ """
+ Create a flavor
+ """
+ try:
+ nova_client.flavors.create(**kwargs)
+ except nova_client_pkg.exceptions.Forbidden as error:
+ msg = 'Forbidden to create flavor'
+ raise ForbiddenException(msg) from error
+
+
+def get_server_ip(nova_client, server_name, ip_type):
+ """
+ Get IP of the compute
+ """
+ server = nova_client.servers.find(name=server_name)
+ addresses = server.addresses
+ ips = [v['addr'] for v in itertools.chain(*addresses.values())
+ if v['OS-EXT-IPS:type'] == ip_type]
+ if not ips:
+ raise Exception('Could not get IP address of server: %s' % server_name)
+ if len(ips) > 1:
+ raise Exception('Server %s has more than one IP addresses: %s' %
+ (server_name, ips))
+ return ips[0]
+
+
+def get_server_host_id(nova_client, server_name):
+ """
+ Get the host id
+ """
+ server = nova_client.servers.find(name=server_name)
+ return server.hostId
+
+
+def check_server_console(nova_client, server_id, len_limit=100):
+ """
+ Check Server console
+ """
+ try:
+ console = (nova_client.servers.get(server_id)
+ .get_console_output(len_limit))
+ except nova_client_pkg.exceptions.ClientException as exc:
+ LOG.warning('Error retrieving console output: %s. Ignoring', exc)
+ return None
+
+ for line in console.splitlines():
+ if (re.search(r'\[critical\]', line, flags=re.IGNORECASE) or
+ re.search(r'Cloud-init.*Datasource DataSourceNone\.', line)):
+ message = ('Instance %(id)s has critical cloud-init error: '
+ '%(msg)s. Check metadata service availability' %
+ dict(id=server_id, msg=line))
+ LOG.error(message)
+ return message
+ if re.search(r'\[error', line, flags=re.IGNORECASE):
+ LOG.error('Error message in instance %(id)s console: %(msg)s',
+ dict(id=server_id, msg=line))
+ elif re.search(r'warn', line, flags=re.IGNORECASE):
+ LOG.info('Warning message in instance %(id)s console: %(msg)s',
+ dict(id=server_id, msg=line))
+
+ return None
+
+
+def _poll_for_status(nova_client, server_id, final_ok_states, poll_period=20,
+ status_field="status"):
+ """
+ Poll for status
+ """
+ LOG.debug('Poll instance %(id)s, waiting for any of statuses %(statuses)s',
+ dict(id=server_id, statuses=final_ok_states))
+ while True:
+ obj = nova_client.servers.get(server_id)
+
+ err_msg = check_server_console(nova_client, server_id)
+ if err_msg:
+ raise Exception('Critical error in instance %s console: %s' %
+ (server_id, err_msg))
+
+ status = getattr(obj, status_field)
+ if status:
+ status = status.lower()
+
+ LOG.debug('Instance %(id)s has status %(status)s',
+ dict(id=server_id, status=status))
+
+ if status in final_ok_states:
+ break
+ if status in ('error', 'paused'):
+ raise Exception(obj.fault['message'])
+
+ time.sleep(poll_period)
+
+
+def wait_server_shutdown(nova_client, server_id):
+ """
+ Wait server shutdown
+ """
+ _poll_for_status(nova_client, server_id, ['shutoff'])
+
+
+def wait_server_snapshot(nova_client, server_id):
+ """
+ Wait server snapshot
+ """
+ task_state_field = "OS-EXT-STS:task_state"
+ server = nova_client.servers.get(server_id)
+ if hasattr(server, task_state_field):
+ _poll_for_status(nova_client, server.id, [None, '-', ''],
+ status_field=task_state_field)
+
+
+def get_flavor(nova_client, flavor_name):
+ """
+ Get the flavor
+ """
+ for flavor in nova_client.flavors.list():
+ if flavor.name == flavor_name:
+ return flavor
+ return None
diff --git a/tools/os_deploy_tgen/osclients/openstack.py b/tools/os_deploy_tgen/osclients/openstack.py
new file mode 100644
index 00000000..58297e6c
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/openstack.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Openstack Client - Main File
+"""
+
+import os_client_config
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenStackClientException(Exception):
+ '''
+ Custom Exception
+ '''
+
+
+def init_profiling(os_profile):
+ """
+ Initialize Profiling
+ """
+ if os_profile:
+ osprofiler_profiler = importutils.try_import("osprofiler.profiler")
+
+ if osprofiler_profiler: # lib is present
+ osprofiler_profiler.init(os_profile)
+ trace_id = osprofiler_profiler.get().get_base_id()
+ LOG.info('Profiling is enabled, trace id: %s', trace_id)
+ else: # param is set, but lib is not present
+ LOG.warning('Profiling could not be enabled. To enable profiling '
+ 'please install "osprofiler" library')
+
+
+class OpenStackClient():
+ """
+ Client Class
+ """
+ def __init__(self, openstack_params):
+ """
+ Initialize
+ """
+ LOG.debug('Establishing connection to OpenStack')
+
+ init_profiling(openstack_params.get('os_profile'))
+
+ config = os_client_config.OpenStackConfig()
+ cloud_config = config.get_one_cloud(**openstack_params)
+ if openstack_params['os_insecure']:
+ cloud_config.config['verify'] = False
+ cloud_config.config['cacert'] = None
+ self.keystone_session = cloud_config.get_session()
+ self.nova = cloud_config.get_legacy_client('compute')
+ self.neutron = cloud_config.get_legacy_client('network')
+ self.glance = cloud_config.get_legacy_client('image')
+
+ # heat client wants endpoint to be always set
+ endpoint = cloud_config.get_session_endpoint('orchestration')
+ if not endpoint:
+ raise OpenStackClientException(
+ 'Endpoint for orchestration service is not found')
+ self.heat = cloud_config.get_legacy_client('orchestration',
+ endpoint=endpoint)
+
+ # Ping OpenStack
+ self.keystone_session.get_token()
+
+ LOG.info('Connection to OpenStack is initialized')
diff --git a/tools/os_deploy_tgen/osdt.py b/tools/os_deploy_tgen/osdt.py
new file mode 100644
index 00000000..0aad8597
--- /dev/null
+++ b/tools/os_deploy_tgen/osdt.py
@@ -0,0 +1,601 @@
+# Copyright 2020 Spirent Communications, Mirantis
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Code to deploy Trafficgenerator on Openstack.
+This Code is based on Openstack Shaker.
+"""
+
+
+import collections
+import functools
+import random
+#import sys
+import os
+import copy
+import logging
+#import json
+import jinja2
+#import shutil
+#import datetime
+#import time
+
+#from conf import merge_spec
+from conf import settings as S
+
+from tools.os_deploy_tgen.utilities import utils
+from tools.os_deploy_tgen.osclients import heat
+from tools.os_deploy_tgen.osclients import neutron
+from tools.os_deploy_tgen.osclients import nova
+from tools.os_deploy_tgen.osclients import openstack
+
+LOG = logging.getLogger(__name__)
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
+
+class DeploymentException(Exception):
+ """ Exception Handling """
+
+
+def prepare_for_cross_az(compute_nodes, zones):
+ """
+ Deployment across Availability Zones
+ """
+ if len(zones) != 2:
+ LOG.info('cross_az is specified, but len(zones) is not 2')
+ return compute_nodes
+
+ masters = []
+ slaves = []
+ for node in compute_nodes:
+ if node['zone'] == zones[0]:
+ masters.append(node)
+ else:
+ slaves.append(node)
+
+ res = []
+ for i in range(min(len(masters), len(slaves))):
+ res.append(masters[i])
+ res.append(slaves[i])
+
+ return res
+
+
+def generate_agents(compute_nodes, accommodation, unique):
+ """
+ Generate TestVNF Instances
+ """
+ print('Number of compute nodes')
+ print(compute_nodes)
+ density = accommodation.get('density') or 1
+
+ zones = accommodation.get('zones')
+ if zones:
+ compute_nodes = [
+ c for c in compute_nodes if c['zone'] in zones or
+ ':'.join(filter(None, [c['zone'], c['host']])) in zones]
+ if 'cross_az' in accommodation:
+ compute_nodes = prepare_for_cross_az(compute_nodes, zones)
+
+ best_effort = accommodation.get('best_effort', False)
+ compute_nodes_requested = accommodation.get('compute_nodes')
+ if compute_nodes_requested:
+ if compute_nodes_requested > len(compute_nodes):
+ print(str(len(compute_nodes)))
+ if best_effort:
+ LOG.info('Allowing best_effort accommodation:')
+ else:
+ raise DeploymentException(
+ 'Exception Not enough compute nodes %(cn)s for requested '
+ 'instance accommodation %(acc)s' %
+ dict(cn=compute_nodes, acc=accommodation))
+ else:
+ compute_nodes = random.sample(compute_nodes,
+ compute_nodes_requested)
+
+ cn_count = len(compute_nodes)
+ iterations = cn_count * density
+ ite = 0
+ if 'single_room' in accommodation and 'pair' in accommodation:
+ # special case to allow pair, single_room on single compute node
+ if best_effort and iterations == 1:
+ LOG.info('Allowing best_effort accommodation: '
+ 'single_room, pair on one compute node')
+ else:
+ iterations //= 2
+ node_formula = lambda x: compute_nodes[x % cn_count]
+
+ agents = {}
+
+ for ite in range(iterations):
+ if 'pair' in accommodation:
+ master_id = '%s_master_%s' % (unique, ite)
+ slave_id = '%s_slave_%s' % (unique, ite)
+ master = dict(id=master_id, mode='master', slave_id=slave_id)
+ slave = dict(id=slave_id, mode='slave', master_id=master_id)
+
+ if 'single_room' in accommodation:
+ master_formula = lambda x: ite * 2
+ slave_formula = lambda x: ite * 2 + 1
+ elif 'double_room' in accommodation:
+ master_formula = lambda x: ite
+ slave_formula = lambda x: ite
+ else: # mixed_room
+ master_formula = lambda x: ite
+ slave_formula = lambda x: ite + 1
+
+ mas = node_formula(master_formula(ite))
+ master['node'], master['zone'] = mas['host'], mas['zone']
+ sla = node_formula(slave_formula(ite))
+ slave['node'], slave['zone'] = sla['host'], sla['zone']
+
+ agents[master['id']] = master
+ agents[slave['id']] = slave
+ else:
+ if 'single_room' in accommodation:
+ agent_id = '%s_agent_%s' % (unique, ite)
+ agents[agent_id] = dict(id=agent_id,
+ node=node_formula(ite)['host'],
+ zone=node_formula(ite)['zone'],
+ mode='alone')
+
+ if not agents:
+ raise DeploymentException('Not enough compute nodes %(cn)s for '
+ 'requested instance accommodation %(acc)s' %
+ dict(cn=compute_nodes, acc=accommodation))
+
+ # inject availability zone
+ for agent in agents.values():
+ avz = agent['zone']
+ if agent['node']:
+ avz += ':' + agent['node']
+ agent['availability_zone'] = avz
+
+ return agents
+
+
+def _get_stack_values(stack_outputs, vm_name, params):
+ """
+ Collect the output from Heat Stack Deployment
+ """
+ result = {}
+ for param in params:
+ out = stack_outputs.get(vm_name + '_' + param)
+ if out:
+ result[param] = out
+ return result
+
+
+def filter_agents(agents, stack_outputs, override=None):
+ """
+ Filter Deployed Instances - If Required.
+ """
+ deployed_agents = {}
+
+ # first pass, ignore non-deployed
+ for agent in agents.values():
+ stack_values = _get_stack_values(stack_outputs, agent['id'], ['ip'])
+ new_stack_values = _get_stack_values(stack_outputs, agent['id'], ['pip'])
+ mac_values = _get_stack_values(stack_outputs, agent['id'], ['dmac'])
+
+ if override:
+ stack_values.update(override(agent))
+
+ if not stack_values.get('ip'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ if not new_stack_values.get('pip'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ if not mac_values.get('dmac'):
+ LOG.info('Ignore non-deployed agent: %s', agent)
+ continue
+
+ agent.update(stack_values)
+ agent.update(new_stack_values)
+
+ # workaround of Nova bug 1422686
+ if agent.get('mode') == 'slave' and not agent.get('ip'):
+ LOG.info('IP address is missing in agent: %s', agent)
+ continue
+
+ deployed_agents[agent['id']] = agent
+
+ # second pass, check pairs
+ result = {}
+ for agent in deployed_agents.values():
+ print(agent.get('mode'))
+ print(agent.get('ip'))
+ print(agent.get('pip'))
+ print(agent.get('dmac'))
+ if (agent.get('mode') == 'alone' or
+ (agent.get('mode') == 'master' and
+ agent.get('slave_id') in deployed_agents) or
+ (agent.get('mode') == 'slave' and
+ agent.get('master_id') in deployed_agents)):
+ result[agent['id']] = agent
+
+ return result
+
+
+def distribute_agents(agents, get_host_fn):
+ """
+ Distribute TestVNF Instances
+ """
+ result = {}
+
+ hosts = set()
+ buckets = collections.defaultdict(list)
+ for agent in agents.values():
+ agent_id = agent['id']
+ # we assume that server name equals to agent_id
+ host_id = get_host_fn(agent_id)
+
+ if host_id not in hosts:
+ hosts.add(host_id)
+ agent['node'] = host_id
+ buckets[agent['mode']].append(agent)
+ else:
+ LOG.info('Filter out agent %s, host %s is already occupied',
+ agent_id, host_id)
+
+ if buckets['alone']:
+ result = dict((a['id'], a) for a in buckets['alone'])
+ else:
+ for master, slave in zip(buckets['master'], buckets['slave']):
+ master['slave_id'] = slave['id']
+ slave['master_id'] = master['id']
+
+ result[master['id']] = master
+ result[slave['id']] = slave
+
+ return result
+
+
+def normalize_accommodation(accommodation):
+ """
+ Planning the Accomodation of TestVNFs
+ """
+ result = {}
+
+ for stk in accommodation:
+ if isinstance(stk, dict):
+ result.update(stk)
+ else:
+ result[stk] = True
+
+ # override scenario's availability zone accommodation
+ if S.hasValue('SCENARIO_AVAILABILITY_ZONE'):
+ result['zones'] = S.getValue('SCENARIO_AVAILABILITY_ZONE')
+ # override scenario's compute_nodes accommodation
+ if S.hasValue('SCENARIO_COMPUTE_NODES'):
+ result['compute_nodes'] = S.getValue('SCENARIO_COMPUTE_NODES')
+
+ return result
+
+
+class Deployment():
+ """
+ Main Deployment Class
+ """
+ def __init__(self):
+ """
+ Initialize
+ """
+ self.openstack_client = None
+ self.stack_id = None
+ self.privileged_mode = True
+ self.flavor_name = None
+ self.image_name = None
+ self.stack_name = None
+ self.external_net = None
+ self.dns_nameservers = None
+ # The current run "owns" the support stacks, it is tracked
+ # so it can be deleted later.
+ self.support_stacks = []
+ self.trackstack = collections.namedtuple('TrackStack', 'name id')
+
+ def connect_to_openstack(self, openstack_params, flavor_name, image_name,
+ external_net, dns_nameservers):
+ """
+ Connect to Openstack
+ """
+ LOG.debug('Connecting to OpenStack')
+
+ self.openstack_client = openstack.OpenStackClient(openstack_params)
+
+ self.flavor_name = flavor_name
+ self.image_name = image_name
+
+ if S.hasValue('STACK_NAME'):
+ self.stack_name = S.getValue('STACK_NAME')
+ else:
+ self.stack_name = 'testvnf_%s' % utils.random_string()
+
+ self.dns_nameservers = dns_nameservers
+ # intiailizing self.external_net last so that other attributes don't
+ # remain uninitialized in case user forgets to create external network
+ self.external_net = (external_net or
+ neutron.choose_external_net(
+ self.openstack_client.neutron))
+
+ def _get_compute_nodes(self, accommodation):
+ """
+ Get available comput nodes
+ """
+ try:
+ comps = nova.get_available_compute_nodes(self.openstack_client.nova,
+ self.flavor_name)
+ print(comps)
+ return comps
+ except nova.ForbiddenException:
+ # user has no permissions to list compute nodes
+ LOG.info('OpenStack user does not have permission to list compute '
+ 'nodes - treat him as non-admin')
+ self.privileged_mode = False
+ count = accommodation.get('compute_nodes')
+ if not count:
+ raise DeploymentException(
+ 'When run with non-admin user the scenario must specify '
+ 'number of compute nodes to use')
+
+ zones = accommodation.get('zones') or ['nova']
+ return [dict(host=None, zone=zones[n % len(zones)])
+ for n in range(count)]
+
+ #def _deploy_from_hot(self, specification, server_endpoint, base_dir=None):
+ def _deploy_from_hot(self, specification, base_dir=None):
+ """
+ Perform Heat stack deployment
+ """
+ accommodation = normalize_accommodation(
+ specification.get('accommodation') or
+ specification.get('vm_accommodation'))
+
+ agents = generate_agents(self._get_compute_nodes(accommodation),
+ accommodation, self.stack_name)
+
+ # render template by jinja
+ vars_values = {
+ 'agents': agents,
+ 'unique': self.stack_name,
+ }
+ heat_template = utils.read_file(specification['template'],
+ base_dir=base_dir)
+ compiled_template = jinja2.Template(heat_template)
+ rendered_template = compiled_template.render(vars_values)
+ LOG.info('Rendered template: %s', rendered_template)
+
+ # create stack by Heat
+ try:
+ merged_parameters = {
+ 'external_net': self.external_net,
+ 'image': self.image_name,
+ 'flavor': self.flavor_name,
+ 'dns_nameservers': self.dns_nameservers,
+ }
+ except AttributeError as err:
+ LOG.error('Failed to gather required parameters to create '
+ 'heat stack: %s', err)
+ raise
+
+ merged_parameters.update(specification.get('template_parameters', {}))
+ try:
+ self.stack_id = heat.create_stack(
+ self.openstack_client.heat, self.stack_name,
+ rendered_template, merged_parameters, None)
+ except heat.exc.StackFailure as err:
+ self.stack_id = err.args[0]
+ raise
+
+ # get info about deployed objects
+ outputs = heat.get_stack_outputs(self.openstack_client.heat,
+ self.stack_id)
+ override = self._get_override(specification.get('override'))
+
+ agents = filter_agents(agents, outputs, override)
+
+ if (not self.privileged_mode) and accommodation.get('density', 1) == 1:
+ get_host_fn = functools.partial(nova.get_server_host_id,
+ self.openstack_client.nova)
+ agents = distribute_agents(agents, get_host_fn)
+
+ return agents
+
+ def _get_override(self, override_spec):
+ """
+ Collect the overrides
+ """
+ def override_ip(agent, ip_type):
+ """
+ Override the IP
+ """
+ return dict(ip=nova.get_server_ip(
+ self.openstack_client.nova, agent['id'], ip_type))
+
+ if override_spec:
+ if override_spec.get('ip'):
+ return functools.partial(override_ip,
+ ip_type=override_spec.get('ip'))
+
+
+ #def deploy(self, deployment, base_dir=None, server_endpoint=None):
+ def deploy(self, deployment, base_dir=None):
+ """
+ Perform Deployment
+ """
+ agents = {}
+
+ if not deployment:
+ # local mode, create fake agent
+ agents.update(dict(local=dict(id='local', mode='alone',
+ node='localhost')))
+
+ if deployment.get('template'):
+ if self.openstack_client:
+ # deploy topology specified by HOT
+ agents.update(self._deploy_from_hot(
+ #deployment, server_endpoint, base_dir=base_dir))
+ deployment, base_dir=base_dir))
+ else:
+ raise DeploymentException(
+ 'OpenStack client is not initialized. '
+ 'Template-based deployment is ignored.')
+
+ if not agents:
+ print("No VM Deployed - Deploy")
+ raise Exception('No agents deployed.')
+
+ if deployment.get('agents'):
+ # agents are specified statically
+ agents.update(dict((a['id'], a) for a in deployment.get('agents')))
+
+ return agents
+
+def read_scenario(scenario_name):
+ """
+ Collect all Information about the scenario
+ """
+ scenario_file_name = scenario_name
+ LOG.debug('Scenario %s is resolved to %s', scenario_name,
+ scenario_file_name)
+
+ scenario = utils.read_yaml_file(scenario_file_name)
+
+ schema = utils.read_yaml_file(S.getValue('SCHEMA'))
+ utils.validate_yaml(scenario, schema)
+
+ scenario['title'] = scenario.get('title') or scenario_file_name
+ scenario['file_name'] = scenario_file_name
+
+ return scenario
+
+def _extend_agents(agents_map):
+ """
+ Add More info to deployed Instances
+ """
+ extended_agents = {}
+ for agent in agents_map.values():
+ extended = copy.deepcopy(agent)
+ if agent.get('slave_id'):
+ extended['slave'] = copy.deepcopy(agents_map[agent['slave_id']])
+ if agent.get('master_id'):
+ extended['master'] = copy.deepcopy(agents_map[agent['master_id']])
+ extended_agents[agent['id']] = extended
+ return extended_agents
+
+def play_scenario(scenario):
+ """
+ Deploy a scenario
+ """
+ deployment = None
+ output = dict(scenarios={}, agents={})
+ output['scenarios'][scenario['title']] = scenario
+
+ try:
+ deployment = Deployment()
+
+ openstack_params = utils.pack_openstack_params()
+ try:
+ deployment.connect_to_openstack(
+ openstack_params, S.getValue('FLAVOR_NAME'),
+ S.getValue('IMAGE_NAME'), S.getValue('EXTERNAL_NET'),
+ S.getValue('DNS_NAMESERVERS'))
+ except BaseException as excep:
+ LOG.warning('Failed to connect to OpenStack: %s. Please '
+ 'verify parameters: %s', excep, openstack_params)
+
+ base_dir = os.path.dirname(scenario['file_name'])
+ scenario_deployment = scenario.get('deployment', {})
+ agents = deployment.deploy(scenario_deployment, base_dir=base_dir)
+
+ if not agents:
+ print("No VM Deployed - Play-Scenario")
+ raise Exception('No agents deployed.')
+
+ agents = _extend_agents(agents)
+ output['agents'] = agents
+ LOG.debug('Deployed agents: %s', agents)
+ print(agents)
+
+ if not agents:
+ raise Exception('No agents deployed.')
+
+ except BaseException as excep:
+ if isinstance(excep, KeyboardInterrupt):
+ LOG.info('Caught SIGINT. Terminating')
+ # record = dict(id=utils.make_record_id(), status='interrupted')
+ else:
+ error_msg = 'Error while executing scenario: %s' % excep
+ LOG.exception(error_msg)
+ return output
+
+def act():
+ """
+ Kickstart the Scenario Deployment
+ """
+ for scenario_name in S.getValue('SCENARIOS'):
+ LOG.info('Play scenario: %s', scenario_name)
+ print('Play scenario: {}'.format(scenario_name))
+ scenario = read_scenario(scenario_name)
+ play_output = play_scenario(scenario)
+ print(play_output)
+ return play_output
+ return None
+
+def update_vsperf_configuration(agents):
+ """
+ Create Configuration file for VSPERF.
+ """
+ tgen = S.getValue('TRAFFICGEN')
+ east_chassis_ip = agents[0]['public_ip']
+ # east_data_ip = agents[0]['private_ip']
+ if len(agents) == 2:
+ west_chassis_ip = agents[1]['public_ip']
+ # west_data_ip = agents[1]['private_ip']
+ else:
+ west_chassis_ip = east_chassis_ip
+ # west_data_ip = east_chassis_ip
+ if "TestCenter" in tgen:
+ S.setValue('TRAFFICGEN_STC_EAST_CHASSIS_ADDR', east_chassis_ip)
+ S.setValue('TRAFFICGEN_STC_WEST_CHASSIS_ADDR', west_chassis_ip)
+ if "Ix" in tgen:
+ S.setValue("TRAFFICGEN_EAST_IXIA_HOST", east_chassis_ip)
+ S.setValue("TRAFFICGEN_WEST_IXIA_HOST", west_chassis_ip)
+
+def deploy_testvnf():
+ """
+ Starting function.
+ """
+ output = act()
+ list_of_agents = []
+ if output:
+ for count in range(len(output['agents'])):
+ # ag_dict = collections.defaultdict()
+ name = str(list(output['agents'].keys())[count])
+ private_ip = output['agents'][name]['ip']
+ public_ip = output['agents'][name]['pip']
+ node = output['agents'][name]['node']
+ list_of_agents.append({'name': name,
+ 'private_ip': private_ip,
+ 'public_ip': public_ip,
+ 'compute_node': node})
+ if list_of_agents:
+ update_vsperf_configuration(list_of_agents)
+ return True
+ return False
+
+if __name__ == "__main__":
+ deploy_testvnf()
diff --git a/tools/os_deploy_tgen/templates/hotfiles.md b/tools/os_deploy_tgen/templates/hotfiles.md
new file mode 100644
index 00000000..6e21157e
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/hotfiles.md
@@ -0,0 +1,13 @@
+# How to use these HOT Files.
+
+These hot files are referenced in the yaml files.
+Please ensure you are using correct HOT file.
+
+## L2 - No Routers are setup - Same Subnet.
+
+l2fip.hot - Floating IP is configured. Use this if the Openstack environment supports floating IP.
+l2up - Use this if you want username and password configured for the TestVNFs.
+l2.hot - Use this if the 2 interfaces has fixed IPs from 2 different networks. This applies when TestVNF has connectivity to provider network.
+
+## L3 - Routers are setup - Different Subnets
+l3.hot - Setup TestVNFs on two different subnet and connect them with a router.
diff --git a/tools/os_deploy_tgen/templates/l2.hot b/tools/os_deploy_tgen/templates/l2.hot
new file mode 100644
index 00000000..226e8433
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2.hot
@@ -0,0 +1,89 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_mgmt_port }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+
+ {{ agent.id }}_mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: external_net }
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_port, fixed_ips, 0, ip_address ] }
+# value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_mgmt_port, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_port, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2_1c_1i.yaml b/tools/os_deploy_tgen/templates/l2_1c_1i.yaml
new file mode 100644
index 00000000..ec931107
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_1c_1i.yaml
@@ -0,0 +1,8 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches single instances on a tenant network.
+
+deployment:
+ template: l2.hot
+ accommodation: [single_room, compute_nodes: 1]
diff --git a/tools/os_deploy_tgen/templates/l2_1c_2i.yaml b/tools/os_deploy_tgen/templates/l2_1c_2i.yaml
new file mode 100644
index 00000000..4241a80c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_1c_2i.yaml
@@ -0,0 +1,10 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances in the same tenant
+ network. Both the instances are hosted on same compute node.
+ The traffic goes within the tenant network (L2 domain).
+
+deployment:
+ template: l2up.hot
+ accommodation: [pair, single_room, best_effort, compute_nodes: 1]
diff --git a/tools/os_deploy_tgen/templates/l2_2c_2i.yaml b/tools/os_deploy_tgen/templates/l2_2c_2i.yaml
new file mode 100644
index 00000000..b1f54f0a
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_2c_2i.yaml
@@ -0,0 +1,10 @@
+title: OpenStack L2 Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances in the same tenant
+ network. Each instance is hosted on a separate compute node. The traffic goes
+ within the tenant network (L2 domain).
+
+deployment:
+ template: l2fip.hot
+ accommodation: [pair, single_room, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/l2_old.hot b/tools/os_deploy_tgen/templates/l2_old.hot
new file mode 100644
index 00000000..d2553d76
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2_old.hot
@@ -0,0 +1,93 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 10.0.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2fip.hot b/tools/os_deploy_tgen/templates/l2fip.hot
new file mode 100644
index 00000000..4d4b52f7
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2fip.hot
@@ -0,0 +1,122 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ user_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ spirent:
+ driver: "sockets"
+
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+ port_security_enabled: false
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ private_datanet:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_datanet
+ port_security_enabled: false
+
+ private_datasubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_datanet }
+ cidr: 172.172.168.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_dataport }
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+
+ {{ agent.id }}_dataport:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_datanet }
+ port_security_enabled: false
+ fixed_ips:
+ - subnet_id: { get_resource: private_datasubnet }
+
+ {{ agent.id }}_fip_port:
+ type: OS::Neutron::FloatingIP
+ depends_on:
+ - router_interface
+ properties:
+ floating_network: { get_param: external_net }
+ port_id: { get_resource: {{ agent.id }}_port }
+
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_dataport, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_fip_port, floating_ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_dataport, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l2up.hot b/tools/os_deploy_tgen/templates/l2up.hot
new file mode 100644
index 00000000..58f25831
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l2up.hot
@@ -0,0 +1,126 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a new Neutron network, a router to the external
+ network and plugs instances into this new network. All instances are located
+ in the same L2 domain.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnet
+
+resources:
+ private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: private_net }
+ cidr: 172.172.172.0/24
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ user_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ users:
+ - default
+ - name: test
+ groups: "users,root"
+ lock-passwd: false
+ passwd: 'test'
+ shell: "/bin/bash"
+ sudo: "ALL=(ALL) NOPASSWD:ALL"
+ ssh_pwauth: true
+ chpasswd:
+ list: |
+ test:test
+ expire: False
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+ - port: { get_resource: {{ agent.id }}_mgmt_port }
+ user_data: {get_resource: user_config}
+ user_data_format: RAW
+
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+
+ {{ agent.id }}_mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: external_net }
+ security_groups: [{ get_resource: server_security_group }]
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}_port, fixed_ips, 0, ip_address ] }
+# value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [private_net, name] }, 0 ] }
+ {{ agent.id }}_pip:
+ value: { get_attr: [ {{ agent.id }}_mgmt_port, fixed_ips, 0, ip_address ] }
+ {{ agent.id }}_dmac:
+ value: { get_attr: [ {{ agent.id }}_port, mac_address ] }
+
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l3.hot b/tools/os_deploy_tgen/templates/l3.hot
new file mode 100644
index 00000000..4a5ea02c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3.hot
@@ -0,0 +1,125 @@
+heat_template_version: 2013-05-23
+
+description:
+ This Heat template creates a pair of networks plugged into the same router.
+ Master instances and slave instances are connected into different networks.
+
+parameters:
+ image:
+ type: string
+ description: Name of image to use for servers
+ flavor:
+ type: string
+ description: Flavor to use for servers
+ external_net:
+ type: string
+ description: ID or name of external network for which floating IP addresses will be allocated
+# server_endpoint:
+# type: string
+# description: Server endpoint address
+ dns_nameservers:
+ type: comma_delimited_list
+ description: DNS nameservers for the subnets
+
+resources:
+ east_private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net_east
+
+ east_private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: east_private_net }
+ cidr: 10.1.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: external_net }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: east_private_subnet }
+
+ west_private_net:
+ type: OS::Neutron::Net
+ properties:
+ name: {{ unique }}_net_west
+
+ west_private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: { get_resource: west_private_net }
+ cidr: 10.2.0.0/16
+ dns_nameservers: { get_param: dns_nameservers }
+
+ router_interface_2:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: west_private_subnet }
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: icmp}]
+
+{% for agent in agents.values() %}
+
+ {{ agent.id }}:
+ type: OS::Nova::Server
+ properties:
+ name: {{ agent.id }}
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ availability_zone: "{{ agent.availability_zone }}"
+ networks:
+ - port: { get_resource: {{ agent.id }}_port }
+
+{% if agent.mode == 'master' %}
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: east_private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: east_private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+{% else %}
+ {{ agent.id }}_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_resource: west_private_net }
+ fixed_ips:
+ - subnet_id: { get_resource: west_private_subnet }
+ security_groups: [{ get_resource: server_security_group }]
+{% endif %}
+
+{% endfor %}
+
+outputs:
+{% for agent in agents.values() %}
+ {{ agent.id }}_instance_name:
+ value: { get_attr: [ {{ agent.id }}, instance_name ] }
+{% if agent.mode == 'master' %}
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [east_private_net, name] }, 0 ] }
+{% else %}
+ {{ agent.id }}_ip:
+ value: { get_attr: [ {{ agent.id }}, networks, { get_attr: [west_private_net, name] }, 0 ] }
+{% endif %}
+{% endfor %}
diff --git a/tools/os_deploy_tgen/templates/l3_1c_2i.yaml b/tools/os_deploy_tgen/templates/l3_1c_2i.yaml
new file mode 100644
index 00000000..0908843c
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3_1c_2i.yaml
@@ -0,0 +1,11 @@
+title: OpenStack L3 East-West Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances, both instances on same
+ compute node. Instances are connected to one of 2 tenant networks, which
+ plugged into single router. The traffic goes from one network to the other
+ (L3 east-west).
+
+deployment:
+ template: l3.hot
+ accommodation: [pair, single_room, best_effort, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/l3_2c_2i.yaml b/tools/os_deploy_tgen/templates/l3_2c_2i.yaml
new file mode 100644
index 00000000..67aee170
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/l3_2c_2i.yaml
@@ -0,0 +1,11 @@
+title: OpenStack L3 East-West Performance
+
+description:
+ In this scenario tdep launches 1 pair of instances, each instance on its own
+ compute node. Instances are connected to one of 2 tenant networks, which
+ plugged into single router. The traffic goes from one network to the other
+ (L3 east-west).
+
+deployment:
+ template: l3.hot
+ accommodation: [pair, single_room, compute_nodes: 2]
diff --git a/tools/os_deploy_tgen/templates/scenario.yaml b/tools/os_deploy_tgen/templates/scenario.yaml
new file mode 100644
index 00000000..c66ec734
--- /dev/null
+++ b/tools/os_deploy_tgen/templates/scenario.yaml
@@ -0,0 +1,44 @@
+name: tdep scenario schema
+type: map
+allowempty: True
+mapping:
+ title:
+ type: str
+ description:
+ type: str
+ deployment:
+ type: map
+ mapping:
+ support_templates:
+ type: seq
+ sequence:
+ - type: map
+ mapping:
+ name:
+ type: str
+ template:
+ type: str
+ env_file:
+ type: str
+ template:
+ type: str
+ env_file:
+ type: str
+ agents:
+ type: any
+ accommodation:
+ type: seq
+ matching: any
+ sequence:
+ - type: str
+ enum: [pair, alone, double_room, single_room, mixed_room, cross_az, best_effort]
+ - type: map
+ mapping:
+ density:
+ type: number
+ compute_nodes:
+ type: number
+ zones:
+ type: seq
+ sequence:
+ - type: str
diff --git a/tools/os_deploy_tgen/utilities/__init__.py b/tools/os_deploy_tgen/utilities/__init__.py
new file mode 100644
index 00000000..56f22a9e
--- /dev/null
+++ b/tools/os_deploy_tgen/utilities/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities package
+"""
diff --git a/tools/os_deploy_tgen/utilities/utils.py b/tools/os_deploy_tgen/utilities/utils.py
new file mode 100644
index 00000000..5208fd2a
--- /dev/null
+++ b/tools/os_deploy_tgen/utilities/utils.py
@@ -0,0 +1,183 @@
+# Copyright 2020 Spirent Communications, Mirantis
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities for deploying Trafficgenerator on Openstack.
+This Code is based on Openstack Shaker.
+"""
+
+#import errno
+#import functools
+import logging
+import os
+import random
+import re
+import uuid
+#import collections
+import yaml
+from pykwalify import core as pykwalify_core
+from pykwalify import errors as pykwalify_errors
+
+from conf import settings as S
+
+LOG = logging.getLogger(__name__)
+
+def read_file(file_name, base_dir=''):
+ """
+ Read Files
+ """
+ full_path = os.path.normpath(os.path.join(base_dir, file_name))
+
+ if not os.path.exists(full_path):
+ full_path = os.path.normpath(os.path.join('tools',
+ 'os_deploy_tgen',
+ file_name))
+ if not os.path.exists(full_path):
+ full_path = os.path.normpath(os.path.join('tools',
+ 'os_deploy_tgen',
+ 'templates',
+ file_name))
+ if not os.path.exists(full_path):
+ msg = ('File %s not found by absolute nor by relative path' %
+ file_name)
+ LOG.error(msg)
+ raise IOError(msg)
+
+ fid = None
+ try:
+ fid = open(full_path)
+ return fid.read()
+ except IOError as exc:
+ LOG.error('Error reading file: %s', exc)
+ raise
+ finally:
+ if fid:
+ fid.close()
+
+
+def write_file(data, file_name, base_dir=''):
+ """
+ Write to file
+ """
+ full_path = os.path.normpath(os.path.join(base_dir, file_name))
+ fid = None
+ try:
+ fid = open(full_path, 'w')
+ return fid.write(data)
+ except IOError as err:
+ LOG.error('Error writing file: %s', err)
+ raise
+ finally:
+ if fid:
+ fid.close()
+
+
+def read_yaml_file(file_name):
+ """
+ Read Yaml File
+ """
+ raw = read_file(file_name)
+ return read_yaml(raw)
+
+
+def read_yaml(raw):
+ """
+ Read YAML
+ """
+ try:
+ parsed = yaml.safe_load(raw)
+ return parsed
+ except Exception as error:
+ LOG.error('Failed to parse input %(yaml)s in YAML format: %(err)s',
+ dict(yaml=raw, err=error))
+ raise
+
+
+def split_address(address):
+ """
+ Split addresses
+ """
+ try:
+ host, port = address.split(':')
+ except ValueError:
+ LOG.error('Invalid address: %s, "host:port" expected', address)
+ raise
+ return host, port
+
+
+def random_string(length=6):
+ """
+ Generate Random String
+ """
+ return ''.join(random.sample('adefikmoprstuz', length))
+
+
+def make_record_id():
+ """
+ Create record-ID
+ """
+ return str(uuid.uuid4())
+
+def strict(strc):
+ """
+ Strict Check
+ """
+ return re.sub(r'[^\w\d]+', '_', re.sub(r'\(.+\)', '', strc)).lower()
+
+
+def validate_yaml(data, schema):
+ """
+ Validate Yaml
+ """
+ cor = pykwalify_core.Core(source_data=data, schema_data=schema)
+ try:
+ cor.validate(raise_exception=True)
+ except pykwalify_errors.SchemaError as err:
+ raise Exception('File does not conform to schema') from err
+
+
+def pack_openstack_params():
+ """
+ Packe Openstack Parameters
+ """
+ if not S.hasValue('OS_AUTH_URL'):
+ raise Exception(
+ 'OpenStack authentication endpoint is missing')
+
+ params = dict(auth=dict(username=S.getValue('OS_USERNAME'),
+ password=S.getValue('OS_PASSWORD'),
+ auth_url=S.getValue('OS_AUTH_URL')),
+ os_region_name=S.getValue('OS_REGION_NAME'),
+ os_cacert=S.getValue('OS_CA_CERT'),
+ os_insecure=S.getValue('OS_INSECURE'))
+
+ if S.hasValue('OS_PROJECT_NAME'):
+ value = S.getValue('OS_PROJECT_NAME')
+ params['auth']['project_name'] = value
+ if S.hasValue('OS_PROJECT_DOMAIN_NAME'):
+ value = S.getValue('OS_PROJECT_DOMAIN_NAME')
+ params['auth']['project_domain_name'] = value
+ if S.hasValue('OS_USER_DOMAIN_NAME'):
+ value = S.getValue('OS_USER_DOMAIN_NAME')
+ params['auth']['user_domain_name'] = value
+ if S.hasValue('OS_INTERFACE'):
+ value = S.getValue('OS_INTERFACE')
+ params['os_interface'] = value
+ if S.hasValue('OS_API_VERSION'):
+ value = S.getValue('OS_API_VERSION')
+ params['identity_api_version'] = value
+ if S.hasValue('OS_PROFILE'):
+ value = S.getValue('OS_PROFILE')
+ params['os_profile'] = value
+ return params
diff --git a/tools/pkt_gen/ixnet/ixnet.py b/tools/pkt_gen/ixnet/ixnet.py
index 87fb2c65..c7036606 100755
--- a/tools/pkt_gen/ixnet/ixnet.py
+++ b/tools/pkt_gen/ixnet/ixnet.py
@@ -83,6 +83,7 @@ import logging
import os
import re
import csv
+import random
from collections import OrderedDict
from tools.pkt_gen import trafficgen
@@ -129,7 +130,7 @@ def _build_set_cmds(values, prefix='dict set'):
if isinstance(value, list):
value = '{{{}}}'.format(' '.join(str(x) for x in value))
- yield ' '.join([prefix, 'set', key, value]).strip()
+ yield ' '.join([prefix, key, value]).strip()
continue
# tcl doesn't recognise the strings "True" or "False", only "1"
@@ -176,10 +177,9 @@ class IxNet(trafficgen.ITrafficGenerator):
:returns: Output of command, where applicable.
"""
self._logger.debug('%s%s', trafficgen.CMD_PREFIX, cmd)
-
output = self._tclsh.eval(cmd)
- return output.split()
+ return output
def configure(self):
"""Configure system for IxNetwork.
@@ -193,12 +193,16 @@ class IxNet(trafficgen.ITrafficGenerator):
'port': settings.getValue('TRAFFICGEN_IXNET_PORT'),
'user': settings.getValue('TRAFFICGEN_IXNET_USER'),
# IXIA chassis configuration
- 'chassis': settings.getValue('TRAFFICGEN_IXIA_HOST'),
- 'card': settings.getValue('TRAFFICGEN_IXIA_CARD'),
- 'port1': settings.getValue('TRAFFICGEN_IXIA_PORT1'),
- 'port2': settings.getValue('TRAFFICGEN_IXIA_PORT2'),
+ 'chassis_east': settings.getValue('TRAFFICGEN_EAST_IXIA_HOST'),
+ 'card_east': settings.getValue('TRAFFICGEN_EAST_IXIA_CARD'),
+ 'port_east': settings.getValue('TRAFFICGEN_EAST_IXIA_PORT'),
+ 'chassis_west': settings.getValue('TRAFFICGEN_WEST_IXIA_HOST'),
+ 'card_west': settings.getValue('TRAFFICGEN_WEST_IXIA_CARD'),
+ 'port_west': settings.getValue('TRAFFICGEN_WEST_IXIA_PORT'),
'output_dir':
settings.getValue('TRAFFICGEN_IXNET_TESTER_RESULT_DIR'),
+ 'frame_size_list':
+ settings.getValue('TRAFFICGEN_PKT_SIZES'),
}
self._logger.debug('IXIA configuration configuration : %s', self._cfg)
@@ -256,11 +260,12 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_path = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start continuous traffic test')
raise RuntimeError('Continuous traffic test failed to start.')
+ return results_path
def stop_cont_traffic(self):
"""See ITrafficGenerator for description
@@ -271,9 +276,12 @@ class IxNet(trafficgen.ITrafficGenerator):
lossrate=0.0):
"""See ITrafficGenerator for description
"""
- self.start_rfc2544_throughput(traffic, tests, duration, lossrate)
-
- return self.wait_rfc2544_throughput()
+ results_file = self.start_rfc2544_throughput(traffic, tests, duration, lossrate)
+ run_result = self.wait_rfc2544_throughput()
+ dest_file_name = 'Traffic_Item_Statistics_' + str(random.randrange(1, 100)) + '.csv'
+ self.copy_results_file(results_file,
+ os.path.join(settings.getValue('RESULTS_PATH'), dest_file_name))
+ return run_result
def start_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
lossrate=0.0):
@@ -313,12 +321,14 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_file = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start RFC2544 test')
raise RuntimeError('RFC2544 test failed to start.')
+ return results_file
+
def wait_rfc2544_throughput(self):
"""See ITrafficGenerator for description
"""
@@ -397,12 +407,34 @@ class IxNet(trafficgen.ITrafficGenerator):
return results
output = self.run_tcl('waitForRfc2544Test')
-
# the run_tcl function will return a list with one element. We extract
# that one element (a string representation of an IXIA-specific Tcl
# datatype), parse it to find the path of the results file then parse
# the results file
- return parse_ixnet_rfc_results(parse_result_string(output[0]))
+ test_result = parse_ixnet_rfc_results(parse_result_string(output))
+ return test_result
+
+ def copy_results_file(self, source_file=None, dest_file=None):
+ """Copy a file from a source address to destination
+ """
+ dest_dict = {}
+ source_dict = {}
+ srcfile = ''
+ if isinstance(source_file, list):
+ for i in source_file:
+ srcfile = srcfile + ' ' + i
+ else:
+ srcfile = source_file
+
+ source = (srcfile.replace("\\", "/")).strip()
+ source_dict['source_file'] = {'source_file': '\"{}\"'.format(source)}
+ dest_dict['dest_file'] = {'dest_file': '{}'.format(dest_file)}
+ for cmd in _build_set_cmds(source_dict):
+ self.run_tcl(cmd)
+ for cmd in _build_set_cmds(dest_dict):
+ self.run_tcl(cmd)
+ self.run_tcl('copyFileResults $source_file $dest_file')
+ return dest_dict['dest_file']
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=2,
lossrate=0.0):
@@ -411,9 +443,12 @@ class IxNet(trafficgen.ITrafficGenerator):
# NOTE 2 seconds is the recommended duration for a back 2 back
# test in RFC2544. 50 trials is the recommended number from the
# RFC also.
- self.start_rfc2544_back2back(traffic, tests, duration, lossrate)
-
- return self.wait_rfc2544_back2back()
+ b2b_results_file = self.start_rfc2544_back2back(traffic, tests, duration, lossrate)
+ b2b_run_result = self.wait_rfc2544_back2back()
+ dest_file_name = 'Traffic_Item_Statistics_' + str(random.randrange(1, 100)) + '.csv'
+ self.copy_results_file(b2b_results_file,
+ os.path.join(settings.getValue('RESULTS_PATH'), dest_file_name))
+ return b2b_run_result
def start_rfc2544_back2back(self, traffic=None, tests=1, duration=2,
lossrate=0.0):
@@ -453,15 +488,18 @@ class IxNet(trafficgen.ITrafficGenerator):
'An error occured when connecting to IxNetwork machine...')
raise RuntimeError('Ixia failed to initialise.')
- self.run_tcl('startRfc2544Test $config $traffic')
+ results_file = self.run_tcl('startRfc2544Test $config $traffic')
if output:
self._logger.critical(
'Failed to start RFC2544 test')
raise RuntimeError('RFC2544 test failed to start.')
+ return results_file
+
def wait_rfc2544_back2back(self):
"""Wait for results.
"""
+
def parse_result_string(results):
"""Get path to results file from output
@@ -487,7 +525,7 @@ class IxNet(trafficgen.ITrafficGenerator):
# transform path into something useful
path = result_path.group(1).replace('\\', '/')
- path = os.path.join(path, 'iteration.csv')
+ path = os.path.join(path, 'AggregateResults.csv')
path = path.replace(
settings.getValue('TRAFFICGEN_IXNET_TESTER_RESULT_DIR'),
settings.getValue('TRAFFICGEN_IXNET_DUT_RESULT_DIR'))
@@ -511,11 +549,11 @@ class IxNet(trafficgen.ITrafficGenerator):
for row in reader:
# if back2back count higher than previously found, store it
# Note: row[N] here refers to the Nth column of a row
- if float(row[14]) <= self._params['config']['lossrate']:
- if int(row[12]) > \
+ if float(row[10]) <= self._params['config']['lossrate']:
+ if int(float(row[8])) > \
int(results[ResultsConstants.B2B_FRAMES]):
- results[ResultsConstants.B2B_FRAMES] = int(row[12])
- results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = float(row[14])
+ results[ResultsConstants.B2B_FRAMES] = int(float(row[8]))
+ results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = float(row[10])
return results
@@ -526,7 +564,7 @@ class IxNet(trafficgen.ITrafficGenerator):
# datatype), parse it to find the path of the results file then parse
# the results file
- return parse_ixnet_rfc_results(parse_result_string(output[0]))
+ return parse_ixnet_rfc_results(parse_result_string(output))
def send_burst_traffic(self, traffic=None, duration=20):
return NotImplementedError('IxNet does not implement send_burst_traffic')
diff --git a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
index 1ed12968..8089ef42 100644
--- a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
+++ b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py
@@ -22,12 +22,27 @@ TestCenter REST APIs. This test supports Python 3.4
'''
import argparse
+import collections
import logging
import os
import sqlite3
+import time
_LOGGER = logging.getLogger(__name__)
+GENOME_PKTSIZE_ENCODING = {"a": 64, "b": 128, "c": 256, "d": 512,
+ "e": 1024, "f": 1280, "g": 1518, "h": 2112}
+
+
+def genome2weights(sequence):
+ """ Convert genome sequence to packetsize weights"""
+ weights = collections.defaultdict(int)
+ for char in GENOME_PKTSIZE_ENCODING:
+ charcount = sequence.count(char)
+ if charcount:
+ weights[GENOME_PKTSIZE_ENCODING[char]] = charcount
+ return weights
+
def create_dir(path):
"""Create the directory as specified in path """
@@ -62,6 +77,46 @@ def write_query_results_to_csv(results_path, csv_results_file_prefix,
result_file.write(row.replace(" ", ",") + "\n")
+def write_headers(results_path, file_name, rx_tx):
+ """ Write headers for the live-results files """
+ filec = os.path.join(results_path, file_name + rx_tx)
+ with open(filec, "a") as result_file:
+ if 'rx' in rx_tx:
+ result_file.write('Time,RxPrt,DrpFrCnt,SeqRnLen,AvgLat,' +
+ 'DrpFrRate,FrCnt,FrRate,MaxLat,MinLat,' +
+ 'OctCnt,OctRate\n')
+ else:
+ result_file.write('Time,StrId,BlkId,FrCnt,FrRate,ERxFrCnt,' +
+ 'OctCnt,OctRate,bitCnt,bitRate\n')
+
+
+def write_rx_live_results_to_file(results_path, file_name, results):
+ """ Write live results from the rx-ports"""
+ filec = os.path.join(results_path, file_name + ".rx")
+ with open(filec, "a") as result_file:
+ result_file.write('{0},{3},{1},{2},{4},{5},{6},{7},{8},{9},{10},{11}\n'
+ .format(time.time(), results['DroppedFrameCount'],
+ results['SeqRunLength'], results['RxPort'],
+ results['AvgLatency'],
+ results['DroppedFrameRate'],
+ results['FrameCount'], results['FrameRate'],
+ results['MaxLatency'], results['MinLatency'],
+ results['OctetCount'], results['OctetRate']))
+
+
+def write_tx_live_results_to_file(results_path, file_name, results):
+ """ Write live results from the tx-ports"""
+ filec = os.path.join(results_path, file_name + ".tx")
+ with open(filec, "a") as result_file:
+ result_file.write('{0},{1},{9},{2},{3},{4},{5},{6},{7},{8}\n'
+ .format(time.time(), results['StreamId'],
+ results['FrameCount'], results['FrameRate'],
+ results['ExpectedRxFrameCount'],
+ results['OctetCount'], results['OctetRate'],
+ results['BitCount'], results['BitRate'],
+ results['BlockId']))
+
+
def positive_int(value):
""" Positive Integer type for Arguments """
ivalue = int(value)
@@ -291,6 +346,22 @@ def main():
action="store_true",
help="latency histogram is required in output?",
dest="latency_histogram")
+ optional_named.add_argument("--imix",
+ required=False,
+ default="",
+ help=("IMIX specification as genome"
+ "Encoding - RFC 6985"),
+ dest="imix")
+ optional_named.add_argument("--live_results",
+ required=False,
+ action="store_true",
+ help="Live Results required?",
+ dest="live_results")
+ optional_named.add_argument("--logfile",
+ required=False,
+ default="./traffic_gen.log",
+ help="Log file to log live results",
+ dest="logfile")
parser.add_argument("-v",
"--verbose",
required=False,
@@ -348,6 +419,10 @@ def main():
_LOGGER.debug("Creating project ...")
project = stc.get("System1", "children-Project")
+ # Configure the Result view
+ resultopts = stc.get('project1', 'children-resultoptions')
+ stc.config(resultopts, {'ResultViewMode': 'BASIC'})
+
# Configure any custom traffic parameters
if args.traffic_custom == "cont":
if args.verbose:
@@ -377,7 +452,7 @@ def main():
# Create the DeviceGenEthIIIfParams object
stc.create("DeviceGenEthIIIfParams",
under=east_device_gen_params,
- attributes={'UseDefaultPhyMac':True})
+ attributes={'UseDefaultPhyMac': True})
# Configuring Ipv4 interfaces
stc.create("DeviceGenIpv4IfParams",
@@ -400,7 +475,7 @@ def main():
# Create the DeviceGenEthIIIfParams object
stc.create("DeviceGenEthIIIfParams",
under=west_device_gen_params,
- attributes={'UseDefaultPhyMac':True})
+ attributes={'UseDefaultPhyMac': True})
# Configuring Ipv4 interfaces
stc.create("DeviceGenIpv4IfParams",
@@ -443,6 +518,19 @@ def main():
gBucketSizeList = stc.get(wLatHist, 'BucketSizeList')
# gLimitSizeList = stc.get(wLatHist, 'LimitList')
+ # IMIX configuration
+ fld = None
+ if args.imix:
+ args.frame_size_list = []
+ weights = genome2weights(args.imix)
+ fld = stc.create('FrameLengthDistribution', under=project)
+ def_slots = stc.get(fld, "children-framelengthdistributionslot")
+ stc.perform("Delete", params={"ConfigList": def_slots})
+ for fsize in weights:
+ stc.create('framelengthdistributionslot', under=fld,
+ attributes={'FixedFrameLength': fsize,
+ 'Weight': weights[fsize]})
+
# Create the RFC 2544 'metric test
if args.metric == "throughput":
if args.verbose:
@@ -460,7 +548,8 @@ def main():
"RateUpperLimit": args.rate_upper_limit_pct,
"Resolution": args.resolution_pct,
"SearchMode": args.search_mode,
- "TrafficPattern": args.traffic_pattern})
+ "TrafficPattern": args.traffic_pattern,
+ "FrameSizeDistributionList": fld})
elif args.metric == "backtoback":
stc.perform("Rfc2544SetupBackToBackTestCommand",
params={"AcceptableFrameLoss":
@@ -520,20 +609,93 @@ def main():
_LOGGER.debug("Apply configuration...")
stc.apply()
+ # Register for the results
+ hResDataRx = stc.create('ResultDataSet', under='project1')
+ strmBlockList = stc.get('project1', 'children-streamblock')
+ stc.create('ResultQuery', under=hResDataRx, attributes={
+ 'ResultRootList': strmBlockList,
+ 'ConfigClassId': 'StreamBlock',
+ 'ResultClassId': 'RxStreamSummaryResults',
+ 'PropertyIdArray': "RxStreamSummaryResults.RxPort \
+ RxStreamSummaryResults.AvgLatency \
+ RxStreamSummaryResults.BitCount \
+ RxStreamSummaryResults.BitRate \
+ RxStreamSummaryResults.DroppedFrameCount\
+ RxStreamSummaryResults.DroppedFrameRate \
+ RxStreamSummaryResults.FrameCount \
+ RxStreamSummaryResults.FrameRate \
+ RxStreamSummaryResults.MaxLatency \
+ RxStreamSummaryResults.MinLatency \
+ RxStreamSummaryResults.OctetCount \
+ RxStreamSummaryResults.OctetRate \
+ RxStreamSummaryResults.SeqRunLength"})
+ hResDataTx = stc.create('ResultDataSet', under='project1')
+ strmBlockList = stc.get('project1', 'children-streamblock')
+ stc.create('ResultQuery', under=hResDataTx, attributes={
+ 'ResultRootList': strmBlockList,
+ 'ConfigClassId': 'StreamBlock',
+ 'ResultClassId': 'TxStreamResults',
+ 'PropertyIdArray': "TxStreamResults.BlockId \
+ TxStreamResults.BitCount \
+ TxStreamResults.BitRate \
+ TxStreamResults.FrameCount \
+ TxStreamResults.FrameRate \
+ TxStreamResults.OctetCount \
+ TxStreamResults.OctetRate"})
+ stc.perform('ResultDataSetSubscribe', params={'ResultDataSet': hResDataRx})
+ stc.perform('ResultDataSetSubscribe', params={'ResultDataSet': hResDataTx})
+ time.sleep(3)
+ stc.perform('RefreshResultView', params={'ResultDataSet': hResDataTx})
+ hndListRx = stc.get(hResDataRx, 'ResultHandleList')
+ hndListTx = stc.get(hResDataTx, 'ResultHandleList')
+
if args.verbose:
_LOGGER.debug("Starting the sequencer...")
stc.perform("SequencerStart")
- # Wait for sequencer to finish
- _LOGGER.info(
- "Starting test... Please wait for the test to complete...")
- stc.wait_until_complete()
+ sequencer = stc.get("system1", "children-sequencer")
+ state = stc.get(sequencer, 'State')
+
+ # If Live-results are required, we don't wait for the test to complete
+ if args.live_results:
+ write_headers(args.vsperf_results_dir, args.logfile, '.rx')
+ write_headers(args.vsperf_results_dir, args.logfile, '.tx')
+ while state != 'IDLE':
+ state = stc.get(sequencer, 'State')
+ hndListTx = stc.get(hResDataTx, 'ResultHandleList')
+ if hndListTx:
+ handles = hndListTx.split(' ')
+ for handle in handles:
+ tx_values = stc.get(handle)
+ write_tx_live_results_to_file(args.vsperf_results_dir,
+ args.logfile,
+ tx_values)
+ if hndListRx:
+ handles = hndListRx.split(' ')
+ for handle in handles:
+ rx_values = stc.get(handle)
+ write_rx_live_results_to_file(args.vsperf_results_dir,
+ args.logfile,
+ rx_values)
+ time.sleep(1)
+ # Live results not needed, so just wait!
+ else:
+ # Wait for sequencer to finish
+ _LOGGER.info(
+ "Starting test... Please wait for the test to complete...")
+ stc.wait_until_complete()
+
_LOGGER.info("The test has completed... Saving results...")
# Determine what the results database filename is...
lab_server_resultsdb = stc.get(
"system1.project.TestResultSetting", "CurrentResultFileName")
+ if not lab_server_resultsdb or 'Results' not in lab_server_resultsdb:
+ _LOGGER.info("Failed to find results.")
+ stc.end_session()
+ return
+
if args.verbose:
_LOGGER.debug("The lab server results database is %s",
lab_server_resultsdb)
@@ -654,6 +816,7 @@ def main():
args.results_dir, args.csv_results_file_prefix, resultsdict)
except RuntimeError as e:
+ stc.end_session()
_LOGGER.error(e)
if args.verbose:
diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py
index 7afa3d8d..a15c502c 100644
--- a/tools/pkt_gen/testcenter/testcenter.py
+++ b/tools/pkt_gen/testcenter/testcenter.py
@@ -171,6 +171,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
Spirent TestCenter
"""
_logger = logging.getLogger(__name__)
+ _liveresults_file = settings.getValue("TRAFFICGEN_STC_LIVERESULTS_FILE")
def connect(self):
"""
@@ -332,11 +333,13 @@ class TestCenter(trafficgen.ITrafficGenerator):
return self.get_rfc2889_addr_learning_results(filec)
- def get_rfc2544_results(self, filename):
+ def get_rfc2544_results(self, filename, genome=None):
"""
Reads the CSV file and return the results
"""
result = {}
+ if not os.path.exists(filename):
+ return result
with open(filename, "r") as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
@@ -367,6 +370,10 @@ class TestCenter(trafficgen.ITrafficGenerator):
row["AverageLatency(us)"]) * 1000
result[ResultsConstants.FRAME_LOSS_PERCENT] = float(
row["PercentLoss"])
+ if genome:
+ result[ResultsConstants.IMIX_GENOME] = genome
+ result[ResultsConstants.IMIX_AVG_FRAMESIZE] = float(
+ row["AvgFrameSize"])
return result
def send_cont_traffic(self, traffic=None, duration=30):
@@ -426,6 +433,19 @@ class TestCenter(trafficgen.ITrafficGenerator):
if traffic['latency_histogram']['type'] == 'Default':
args.append("--latency_histogram")
+ genome = ''
+ if traffic and 'imix' in traffic:
+ if traffic['imix']['enabled']:
+ if traffic['imix']['type'] == 'genome':
+ genome = traffic['imix']['genome']
+ args.append('--imix')
+ args.append(genome)
+
+ if settings.getValue("TRAFFICGEN_STC_LIVE_RESULTS") == "True":
+ args.append('--live_results')
+ args.append('--logfile')
+ args.append(self._liveresults_file)
+
if settings.getValue("TRAFFICGEN_STC_VERBOSE") == "True":
args.append("--verbose")
verbose = True
@@ -440,7 +460,7 @@ class TestCenter(trafficgen.ITrafficGenerator):
if verbose:
self._logger.info("file: %s", filec)
- return self.get_rfc2544_results(filec)
+ return self.get_rfc2544_results(filec, genome)
def send_rfc2544_back2back(self, traffic=None, tests=1, duration=20,
lossrate=0.0):
diff --git a/tools/pkt_gen/trex/trex.py b/tools/pkt_gen/trex/trex_client.py
index 94b793d6..3d6836d8 100644
--- a/tools/pkt_gen/trex/trex.py
+++ b/tools/pkt_gen/trex/trex_client.py
@@ -26,7 +26,7 @@ import re
from collections import OrderedDict
# pylint: disable=unused-import
import netaddr
-import zmq
+#import zmq
from conf import settings
from conf import merge_spec
from core.results.results_constants import ResultsConstants
@@ -35,7 +35,7 @@ try:
# pylint: disable=wrong-import-position, import-error
sys.path.append(settings.getValue('PATHS')['trafficgen']['Trex']['src']['path'])
from trex_stl_lib.api import *
- from trex_stl_lib import trex_stl_exceptions
+ # from trex_stl_lib import trex_stl_exceptions
except ImportError:
# VSPERF performs detection of T-Rex api during testcase initialization. So if
# T-Rex is requsted and API is not available it will fail before this code
@@ -160,7 +160,7 @@ class Trex(ITrafficGenerator):
try:
self._stlclient = STLClient(username=self._trex_user, server=self._trex_host_ip_addr,
- verbose_level=0)
+ verbose_level='info')
self._stlclient.connect()
except STLError:
raise RuntimeError('T-Rex: Cannot connect to T-Rex server. Please check if it is '
@@ -351,6 +351,8 @@ class Trex(ITrafficGenerator):
return (stream_1, stream_2, stream_1_lat, stream_2_lat)
+
+ # pylint: disable=too-many-locals, too-many-statements
def generate_traffic(self, traffic, duration, disable_capture=False):
"""The method that generate a stream
"""
@@ -414,7 +416,70 @@ class Trex(ITrafficGenerator):
core_mask=self._stlclient.CORE_MASK_PIN)
except STLError:
self._stlclient.start(ports=my_ports, force=True, duration=duration, mult="{}gbps".format(gbps_speed))
- self._stlclient.wait_on_traffic(ports=my_ports)
+
+ if settings.getValue('TRAFFICGEN_TREX_LIVE_RESULTS'):
+ filec = os.path.join(settings.getValue('RESULTS_PATH'),
+ settings.getValue('TRAFFICGEN_TREX_LC_FILE'))
+ filee = os.path.join(settings.getValue('RESULTS_PATH'),
+ settings.getValue('TRAFFICGEN_TREX_LE_FILE'))
+ pgids = self._stlclient.get_active_pgids()
+ rx_port_0 = 1
+ tx_port_0 = 0
+ rx_port_1 = 0
+ tx_port_1 = 1
+ with open(filec, 'a') as fcp, open(filee, 'a') as fep:
+ fcp.write("ts,rx_port,tx_port,rx_pkts,tx_pkts,rx_pps,tx_pps,"+
+ "rx_bps_num,rx_bps_den,tx_bps_num,tx_bps_den\n")
+ fep.write('ts,dropped,ooo,dup,seq_too_high,seq_too_low\n')
+ while True:
+ tr_status = self._stlclient.is_traffic_active(ports=my_ports)
+ if not tr_status:
+ break
+ time.sleep(1)
+ stats = self._stlclient.get_pgid_stats(pgids['flow_stats'])
+ lat_stats = stats['latency'].get(0)
+ flow_stats_0 = stats['flow_stats'].get(0)
+ flow_stats_1 = stats['flow_stats'].get(1)
+ if flow_stats_0:
+ rx_pkts = flow_stats_0['rx_pkts'][rx_port_0]
+ tx_pkts = flow_stats_0['tx_pkts'][tx_port_0]
+ rx_pps = flow_stats_0['rx_pps'][rx_port_0]
+ tx_pps = flow_stats_0['tx_pps'][tx_port_0]
+ rx_bps = flow_stats_0['rx_bps'][rx_port_0]
+ tx_bps = flow_stats_0['tx_bps'][tx_port_0]
+ rx_bps_l1 = flow_stats_0['rx_bps_l1'][rx_port_0]
+ tx_bps_l1 = flow_stats_0['tx_bps_l1'][tx_port_0]
+ # https://github.com/cisco-system-traffic-generator/\
+ # trex-core/blob/master/scripts/automation/\
+ # trex_control_plane/interactive/trex/examples/\
+ # stl/stl_flow_latency_stats.py
+ fcp.write("{10},{8},{9},{0},{1},{2},{3},{4},{5},{6},{7}\n"
+ .format(rx_pkts, tx_pkts, rx_pps, tx_pps,
+ rx_bps, rx_bps_l1, tx_bps, tx_bps_l1,
+ rx_port_0, tx_port_0, time.time()))
+ if flow_stats_1:
+ rx_pkts = flow_stats_1['rx_pkts'][rx_port_1]
+ tx_pkts = flow_stats_1['tx_pkts'][tx_port_1]
+ rx_pps = flow_stats_1['rx_pps'][rx_port_1]
+ tx_pps = flow_stats_1['tx_pps'][tx_port_1]
+ rx_bps = flow_stats_1['rx_bps'][rx_port_1]
+ tx_bps = flow_stats_1['tx_bps'][tx_port_1]
+ rx_bps_l1 = flow_stats_1['rx_bps_l1'][rx_port_1]
+ tx_bps_l1 = flow_stats_1['tx_bps_l1'][tx_port_1]
+ fcp.write("{10},{8},{9},{0},{1},{2},{3},{4},{5},{6},{7}\n"
+ .format(rx_pkts, tx_pkts, rx_pps, tx_pps,
+ rx_bps, rx_bps_l1, tx_bps, tx_bps_l1,
+ rx_port_1, tx_port_1, time.time()))
+ if lat_stats:
+ drops = lat_stats['err_cntrs']['dropped']
+ ooo = lat_stats['err_cntrs']['out_of_order']
+ dup = lat_stats['err_cntrs']['dup']
+ sth = lat_stats['err_cntrs']['seq_too_high']
+ stl = lat_stats['err_cntrs']['seq_too_low']
+ fep.write('{5},{0},{1},{2},{3},{4}\n'
+ .format(drops, ooo, dup, sth, stl, time.time()))
+ else:
+ self._stlclient.wait_on_traffic(ports=my_ports)
stats = self._stlclient.get_stats(sync_now=True)
# export captured data into pcap file if possible
@@ -529,9 +594,14 @@ class Trex(ITrafficGenerator):
:return: passing stats as dictionary
"""
threshold = settings.getValue('TRAFFICGEN_TREX_RFC2544_TPUT_THRESHOLD')
+ max_repeat = settings.getValue('TRAFFICGEN_TREX_RFC2544_MAX_REPEAT')
+ loss_verification = settings.getValue('TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION')
+ if loss_verification:
+ self._logger.info("Running Binary Search with Loss Verification")
stats_ok = _EMPTY_STATS
new_params = copy.deepcopy(traffic)
iteration = 1
+ repeat = 0
left = boundaries['left']
right = boundaries['right']
center = boundaries['center']
@@ -555,11 +625,20 @@ class Trex(ITrafficGenerator):
if test_lossrate == 0.0 and new_params['frame_rate'] == traffic['frame_rate']:
return copy.deepcopy(stats)
elif test_lossrate > lossrate:
+ if loss_verification:
+ if repeat < max_repeat:
+ repeat += 1
+ iteration += 1
+ continue
+ else:
+ repeat = 0
right = center
center = (left + right) / 2
new_params = copy.deepcopy(traffic)
new_params['frame_rate'] = center
else:
+ if loss_verification:
+ repeat = 0
stats_ok = copy.deepcopy(stats)
left = center
center = (left + right) / 2
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index d3e1b343..fb87ed27 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -46,12 +46,14 @@ class IVnfQemu(IVnf):
Initialisation function.
"""
super(IVnfQemu, self).__init__()
-
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_QEMU'))
+ name = name + str(self._number)
+ rename_qemu = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue('LOG_TIMESTAMP'),
+ ex=ext)
self._expect = S.getValue('GUEST_PROMPT_LOGIN')[self._number]
self._logger = logging.getLogger(__name__)
- self._logfile = os.path.join(
- S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_QEMU')) + str(self._number)
+ self._logfile = os.path.join(S.getValue('RESULTS_PATH'), rename_qemu)
self._timeout = S.getValue('GUEST_TIMEOUT')[self._number]
self._monitor = '%s/vm%dmonitor' % ('/tmp', self._number)
# read GUEST NICs configuration and use only defined NR of NICS
@@ -115,10 +117,13 @@ class IVnfQemu(IVnf):
self.GuestCommandFilter.prefix = self._log_prefix
logger = logging.getLogger()
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_GUEST_CMDS'))
+ name = name + str(self._number)
+ rename_gcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue('LOG_TIMESTAMP'),
+ ex=ext)
cmd_logger = logging.FileHandler(
- filename=os.path.join(S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_GUEST_CMDS')) +
- str(self._number))
+ filename=os.path.join(S.getValue('RESULTS_PATH'), rename_gcmd))
cmd_logger.setLevel(logging.DEBUG)
cmd_logger.addFilter(self.GuestCommandFilter())
logger.addHandler(cmd_logger)
@@ -393,6 +398,8 @@ class IVnfQemu(IVnf):
self.execute_and_wait('./testpmd {}'.format(testpmd_params), 60, "Done")
self.execute_and_wait('set fwd ' + self._testpmd_fwd_mode, 20, 'testpmd>')
+ for entry in S.getValue('GUEST_QUEUE_STATS_MAPPING'):
+ self.execute_and_wait('set stat_qmap ' + entry, 2, 'testpmd>')
self.execute_and_wait('start', 20, 'testpmd>')
def _configure_l2fwd(self):
@@ -498,11 +505,16 @@ class IVnfQemu(IVnf):
pci_slots)
elif driver == 'igb_uio_from_src':
# build and insert igb_uio and rebind interfaces to it
- self.execute_and_wait('make RTE_OUTPUT=$RTE_SDK/$RTE_TARGET -C '
- '$RTE_SDK/lib/librte_eal/linuxapp/igb_uio')
+ # from DPDK 18.05 Linux kernel driver changed location
+ # also it is not possible to compile driver without
+ # passing EXTRA_CFLAGS
+ self.execute_and_wait("make RTE_OUTPUT=$RTE_SDK/{0} \
+ EXTRA_CFLAGS=\"-I$RTE_SDK/{1}/include\" \
+ -C $RTE_SDK/kernel/linux/igb_uio"\
+ .format(S.getValue('RTE_TARGET'), S.getValue('RTE_TARGET')))
self.execute_and_wait('modprobe uio')
- self.execute_and_wait('insmod %s/kmod/igb_uio.ko' %
- S.getValue('RTE_TARGET'))
+ self.execute_and_wait('insmod {}/kmod/igb_uio.ko'\
+ .format(S.getValue('RTE_TARGET')))
self.execute_and_wait('./*tools/dpdk*bind.py -b igb_uio ' + pci_slots)
else:
self._logger.error(
diff --git a/vsperf b/vsperf
index 5c1d88a1..773ad759 100755
--- a/vsperf
+++ b/vsperf
@@ -40,11 +40,13 @@ import core.component_factory as component_factory
from core.loader import Loader
from testcases import PerformanceTestCase
from testcases import IntegrationTestCase
+from testcases import K8sPerformanceTestCase
from tools import tasks
from tools import networkcard
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
+from tools.os_deploy_tgen import osdt
sys.dont_write_bytecode = True
VERBOSITY_LEVELS = {
@@ -68,6 +70,7 @@ _TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
"currently run tests. The metric used for comparison is {}.\n\n{}\n\n"
_LOGGER = logging.getLogger()
+logging.getLogger('matplotlib').setLevel(logging.ERROR)
def parse_param_string(values):
"""
@@ -178,6 +181,8 @@ def parse_arguments():
help='list all system vnfs and exit')
parser.add_argument('--list-loadgens', action='store_true',
help='list all background load generators')
+ parser.add_argument('--list-pods', action='store_true',
+ help='list all system pods')
parser.add_argument('--list-settings', action='store_true',
help='list effective settings configuration and exit')
parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
@@ -201,6 +206,8 @@ def parse_arguments():
group.add_argument('--verbosity', choices=list_logging_levels(),
help='debug level')
group.add_argument('--integration', action='store_true', help='execute integration tests')
+ group.add_argument('--k8s', action='store_true', help='execute Kubernetes tests')
+ group.add_argument('--openstack', action='store_true', help='Run VSPERF with openstack')
group.add_argument('--trafficgen', help='traffic generator to use')
group.add_argument('--vswitch', help='vswitch implementation to use')
group.add_argument('--fwdapp', help='packet forwarding application to use')
@@ -235,20 +242,29 @@ def parse_arguments():
def configure_logging(level):
"""Configure logging.
"""
- date = datetime.datetime.fromtimestamp(time.time())
- timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
- settings.setValue('LOG_TIMEMSTAMP', timestamp)
name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
- rename_default = "{name}_{uid}{ex}".format(name=name, uid=timestamp, ex=ext)
+ rename_default = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_default = os.path.join(
- settings.getValue('LOG_DIR'), rename_default)
+ settings.getValue('RESULTS_PATH'), rename_default)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_HOST_CMDS'))
+ rename_hostcmd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_host_cmds = os.path.join(
- settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
+ settings.getValue('RESULTS_PATH'), rename_hostcmd)
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ rename_traffic = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
log_file_traffic_gen = os.path.join(
- settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_TRAFFIC_GEN'))
+ settings.getValue('RESULTS_PATH'), rename_traffic)
metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
- timestamp + '.log')
+ settings.getValue('LOG_TIMESTAMP') + '.log')
log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
metrics_file)
@@ -568,6 +584,10 @@ def handle_list_options(args):
print(Loader().get_loadgens_printable())
sys.exit(0)
+ if args['list_pods']:
+ print(Loader().get_pods_printable())
+ sys.exit(0)
+
if args['list_settings']:
print(str(settings))
sys.exit(0)
@@ -585,6 +605,8 @@ def list_testcases(args):
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
@@ -663,9 +685,26 @@ def main():
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))
- # Load non performance/integration tests
+ # define the timestamp to be used by logs and results
+ date = datetime.datetime.fromtimestamp(time.time())
+ timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
+ settings.setValue('LOG_TIMESTAMP', timestamp)
+
+ # generate results directory name
+ # integration test use vswitchd log in test step assertions, ensure that
+ # correct value will be set before loading integration test configuration
+ results_dir = "results_" + timestamp
+ results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
+ settings.setValue('RESULTS_PATH', results_path)
+ # create results directory
+ if not os.path.exists(results_path):
+ os.makedirs(results_path)
+
+ # load non performance/integration tests
if args['integration']:
settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))
+ if args['k8s']:
+ settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/kubernetes'))
# load command line parameters first in case there are settings files
# to be used
@@ -683,6 +722,19 @@ def main():
settings.setValue('mode', args['mode'])
+ if args['k8s']:
+ settings.setValue('K8S', True)
+ else:
+ settings.setValue('K8S', False)
+
+ if args['openstack']:
+ result = osdt.deploy_testvnf()
+ if result:
+ _LOGGER.info('TestVNF successfully deployed on Openstack')
+ settings.setValue('mode', 'trafficgen')
+ else:
+ _LOGGER.error('Failed to deploy TestVNF in Openstac')
+ sys.exit(1)
# update paths to trafficgens if required
if settings.getValue('mode') == 'trafficgen':
functions.settings_update_paths()
@@ -692,6 +744,9 @@ def main():
configure_logging(settings.getValue('VERBOSITY'))
+ # CI build support
+ _LOGGER.info("Creating result directory: %s", results_path)
+
# check and fix locale
check_and_set_locale()
@@ -771,16 +826,7 @@ def main():
# for backward compatibility
settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))
- # generate results directory name
- date = datetime.datetime.fromtimestamp(time.time())
- results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
- results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
- settings.setValue('RESULTS_PATH', results_path)
- # create results directory
- if not os.path.exists(results_path):
- _LOGGER.info("Creating result directory: %s", results_path)
- os.makedirs(results_path)
# pylint: disable=too-many-nested-blocks
if settings.getValue('mode') == 'trafficgen':
# execute only traffic generator
@@ -805,6 +851,8 @@ def main():
# configure tests
if args['integration']:
testcases = settings.getValue('INTEGRATION_TESTS')
+ elif args['k8s']:
+ testcases = settings.getValue('K8SPERFORMANCE_TESTS')
else:
testcases = settings.getValue('PERFORMANCE_TESTS')
@@ -847,6 +895,8 @@ def main():
if args['integration']:
test = IntegrationTestCase(cfg)
+ elif args['k8s']:
+ test = K8sPerformanceTestCase(cfg)
else:
test = PerformanceTestCase(cfg)
@@ -879,8 +929,9 @@ def main():
output=settings.getValue('XUNIT_DIR'), outsuffix="",
verbosity=0).run(suite)
- if args['opnfvpod']:
- pod_name = args['opnfvpod']
+ if args['opnfvpod'] or settings.getValue('OPNFVPOD'):
+ pod_name = (args['opnfvpod'] if args['opnfvpod'] else
+ settings.getValue('OPNFVPOD'))
installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
opnfv_url = settings.getValue('OPNFV_URL')
pkg_list = settings.getValue('PACKAGE_LIST')
diff --git a/vswitches/ovs.py b/vswitches/ovs.py
index 6dbf0cf8..853bef85 100644
--- a/vswitches/ovs.py
+++ b/vswitches/ovs.py
@@ -47,8 +47,12 @@ class IVSwitchOvs(IVSwitch, tasks.Process):
"""See IVswitch for general description
"""
super().__init__()
- self._logfile = os.path.join(settings.getValue('LOG_DIR'),
- settings.getValue('LOG_FILE_VSWITCHD'))
+
+ name, ext = os.path.splitext(settings.getValue('LOG_FILE_VSWITCHD'))
+ rename_vswitchd = "{name}_{uid}{ex}".format(name=name,
+ uid=settings.getValue('LOG_TIMESTAMP'),
+ ex=ext)
+ self._logfile = os.path.join(settings.getValue('RESULTS_PATH'), rename_vswitchd)
self._ovsdb_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
"ovsdb-server.pid")
self._vswitchd_pidfile_path = os.path.join(settings.getValue('TOOLS')['ovs_var_tmp'],
diff --git a/vswitches/vpp_dpdk_vhost.py b/vswitches/vpp_dpdk_vhost.py
index 5d676a01..f88ed95e 100644
--- a/vswitches/vpp_dpdk_vhost.py
+++ b/vswitches/vpp_dpdk_vhost.py
@@ -37,8 +37,12 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
"""See IVswitch for general description
"""
super().__init__()
- self._logfile = os.path.join(S.getValue('LOG_DIR'),
- S.getValue('LOG_FILE_VPP'))
+ name, ext = os.path.splitext(S.getValue('LOG_FILE_VPP'))
+ rename_vpplf = "{name}_{uid}{ex}".format(name=name,
+ uid=S.getValue(
+ 'LOG_TIMESTAMP'),
+ ex=ext)
+ self._logfile = os.path.join(S.getValue('RESULTS_PATH'), rename_vpplf)
self._expect = r'vpp#'
self._cmd_template = ['sudo', '-E', S.getValue('TOOLS')['vpp']]
self._phy_ports = []
@@ -100,12 +104,20 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
tmpif = iface.split()
if not tmpif:
continue
+ if 'Link' in iface or 'local' in iface:
+ continue
# get PCI address of given interface
output = self.run_vppctl(['show', 'hardware', tmpif[1], 'detail'])
- match = re.search(r'pci address:\s*([\d:\.]+)', output[0])
+ lines = output[0].split('\n')
+ #match = re.search(r'pci address:\s*([\d:\.]+)', output[0])
+ match = ''
+ for line in lines:
+ if "pci:" in line:
+ match = line.split(' ')[6]
if match:
# normalize PCI address, e.g. 0000:05:10.01 => 0000:05:10.1
- tmp_pci = match.group(1).split('.')
+ tmp_pci = match.split('.')
+ # tmp_pci = match.group(1).split('.')
tmp_pci[1] = str(int(tmp_pci[1]))
tmpif.append('.'.join(tmp_pci))
else:
@@ -450,4 +462,4 @@ class VppDpdkVhost(IVSwitch, tasks.Process):
def get_ports(self, switch_name):
"""See IVswitch for general description
"""
- raise NotImplementedError()
+ return self._phy_ports
diff --git a/xtesting/baremetal/Dockerfile b/xtesting/baremetal/Dockerfile
new file mode 100644
index 00000000..b78594b5
--- /dev/null
+++ b/xtesting/baremetal/Dockerfile
@@ -0,0 +1,36 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM opnfv/xtesting
+LABEL maintainer="sridhar.rao@spirent.com"
+
+ADD . /src/
+RUN apk add --no-cache --update --virtual .build-deps python3 \
+ py3-pip py3-wheel git python3-dev linux-headers libffi-dev \
+ make openssl-dev gcc musl-dev && \
+ pip3 install --upgrade pip chainmap oslo.utils \
+ paramiko scp && \
+ git init /src && pip3 install /src
+
+ENV DUT_IP_ADDRESS=10.10.120.24
+ENV DUT_USERNAME=opnfv
+ENV DUT_PASSWORD=opnfv
+ENV VSPERF_TESTS=phy2phy_tput
+ENV VSPERF_CONFFILE=/vsperf.conf
+ENV RES_PATH=/tmp
+ENV VSPERF_TRAFFICGEN_MODE=NO
+
+COPY vsperf.conf /vsperf.conf
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+CMD ["run_tests", "-t", "all"]
diff --git a/xtesting/baremetal/exceptions.py b/xtesting/baremetal/exceptions.py
new file mode 100644
index 00000000..c4e0e097
--- /dev/null
+++ b/xtesting/baremetal/exceptions.py
@@ -0,0 +1,65 @@
+"""
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+#pylint: disable=import-error
+from oslo_utils import excutils
+
+
+class VsperfCException(Exception):
+ """Base VSPERF-C Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VsperfCException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VsperfCException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """ #pylint: disable=no-self-use
+ return False
+
+
+class InvalidType(VsperfCException):
+ """Invalid type"""
+ message = 'Type "%(type_to_convert)s" is not valid'
+
+
+class SSHError(VsperfCException):
+ """ssh error"""
+ message = '%(error_msg)s'
+
+
+class SSHTimeout(SSHError):
+ """ssh timeout""" #pylint: disable=unnecessary-pass
+ pass
diff --git a/xtesting/baremetal/requirements.txt b/xtesting/baremetal/requirements.txt
new file mode 100644
index 00000000..f2da6ad5
--- /dev/null
+++ b/xtesting/baremetal/requirements.txt
@@ -0,0 +1,2 @@
+xtesting
+requests!=2.20.0,!=2.24.0 # Apache-2.0
diff --git a/xtesting/baremetal/setup.cfg b/xtesting/baremetal/setup.cfg
new file mode 100644
index 00000000..9ca38236
--- /dev/null
+++ b/xtesting/baremetal/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = vsperf
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ vsperf_controller = vsperf_controller:VsperfBm
diff --git a/xtesting/baremetal/setup.py b/xtesting/baremetal/setup.py
new file mode 100644
index 00000000..fa9d59ac
--- /dev/null
+++ b/xtesting/baremetal/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/xtesting/baremetal/site.yml b/xtesting/baremetal/site.yml
new file mode 100644
index 00000000..06f8c2e2
--- /dev/null
+++ b/xtesting/baremetal/site.yml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: vsperf
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: vsperfbm
+ tests:
+ - phy2phy_tput
diff --git a/xtesting/baremetal/ssh.py b/xtesting/baremetal/ssh.py
new file mode 100644
index 00000000..ce560c49
--- /dev/null
+++ b/xtesting/baremetal/ssh.py
@@ -0,0 +1,546 @@
+# Copyright 2020: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#pylint: disable=I,C,R,locally-disabled
+#pylint: disable=import-error,arguments-differ
+
+# this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+ ssh = sshclient.SSH("root", "example.com", port=33)
+ status, stdout, stderr = ssh.execute("ps ax")
+ if status:
+ raise Exception("Command failed with non-zero status.")
+ print(stdout.splitlines())
+
+Execute command with huge output:
+
+ class PseudoFile(io.RawIOBase):
+ def write(chunk):
+ if "error" in chunk:
+ email_admin(chunk)
+
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+ ssh = sshclient.SSH("user", "example.com")
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
+
+Upload file:
+
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+ eventlet.monkey_patch(select=True, time=True)
+ or
+ eventlet.monkey_patch()
+ or
+ sshclient = eventlet.import_patched("vsperf.ssh")
+
+"""
+from __future__ import print_function
+import io
+import logging
+import os
+import re
+import select
+import socket
+import time
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+
+# When building container change this to
+import exceptions as exceptions
+#else keep it as
+#import exceptions
+# When building container change this to
+from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+#else keep it as
+#from utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
+
+
+def convert_key_to_str(key):
+ if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+ return key
+ k = io.StringIO()
+ key.write_private_key(k)
+ return k.getvalue()
+
+
+# class SSHError(Exception):
+# pass
+#
+#
+# class SSHTimeout(SSHError):
+# pass
+
+
+class SSH(object):
+ """Represent ssh connection."""
+ #pylint: disable=no-member
+
+ SSH_PORT = paramiko.config.SSH_PORT
+ DEFAULT_WAIT_TIMEOUT = 120
+
+ @staticmethod
+ def gen_keys(key_filename, bit_count=2048):
+ rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+ rsa_key.write_private_key_file(key_filename)
+ print("Writing %s ..." % key_filename)
+ with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+ pubkey_file.write(rsa_key.get_name())
+ pubkey_file.write(' ')
+ pubkey_file.write(rsa_key.get_base64())
+ pubkey_file.write('\n')
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else
+ # refers to the calling class
+ # i.e. the subclass, not the superclass
+ return SSH
+
+ @classmethod
+ def get_arg_key_map(cls):
+ return {
+ 'user': ('user', NON_NONE_DEFAULT),
+ 'host': ('ip', NON_NONE_DEFAULT),
+ 'port': ('ssh_port', cls.SSH_PORT),
+ 'pkey': ('pkey', None),
+ 'key_filename': ('key_filename', None),
+ 'password': ('password', None),
+ 'name': ('name', None),
+ }
+
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None):
+ """Initialize SSH client.
+
+ :param user: ssh username
+ :param host: hostname or ip address of remote ssh server
+ :param port: remote ssh port
+ :param pkey: RSA or DSS private key string or file object
+ :param key_filename: private key filename
+ :param password: password
+ """
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
+
+ self.wait_timeout = self.DEFAULT_WAIT_TIMEOUT
+ self.user = user
+ self.host = host
+ # everybody wants to debug this in the caller, do it here instead
+ self.log.debug("user:%s host:%s", user, host)
+
+ # we may get text port from YAML, convert to int
+ self.port = try_int(port, self.SSH_PORT)
+ self.pkey = self._get_pkey(pkey) if pkey else None
+ self.password = password
+ self.key_filename = key_filename
+ self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
+
+ @classmethod
+ def args_from_node(cls, node, overrides=None, defaults=None):
+ if overrides is None:
+ overrides = {}
+ if defaults is None:
+ defaults = {}
+
+ params = ChainMap(overrides, node, defaults)
+ return make_dict_from_map(params, cls.get_arg_key_map())
+
+ @classmethod
+ def from_node(cls, node, overrides=None, defaults=None):
+ return cls(**cls.args_from_node(node, overrides, defaults))
+
+ def _get_pkey(self, key):
+ if isinstance(key, six.string_types):
+ key = six.moves.StringIO(key)
+ errors = []
+ for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+ try:
+ return key_class.from_private_key(key)
+ except paramiko.SSHException as e:
+ errors.append(e)
+ raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors)
+
+ @property
+ def is_connected(self):
+ return bool(self._client)
+
+ def _get_client(self):
+ if self.is_connected:
+ return self._client
+ try:
+ self._client = paramiko.SSHClient()
+ self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.connect(self.host, username=self.user,
+ port=self.port, pkey=self.pkey,
+ key_filename=self.key_filename,
+ password=self.password,
+ allow_agent=False, look_for_keys=False,
+ timeout=1)
+ return self._client
+ except Exception as e:
+ message = ("Exception %(exception_type)s was raised "
+ "during connect. Exception value is: %(exception)r" %
+ {"exception": e, "exception_type": type(e)})
+ self._client = False
+ raise exceptions.SSHError(error_msg=message)
+
+ def _make_dict(self):
+ return {
+ 'user': self.user,
+ 'host': self.host,
+ 'port': self.port,
+ 'pkey': self.pkey,
+ 'key_filename': self.key_filename,
+ 'password': self.password,
+ 'name': self.name,
+ }
+
+ def copy(self):
+ return self.get_class()(**self._make_dict())
+
+ def close(self):
+ if self._client:
+ self._client.close()
+ self._client = False
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ """Execute specified command on the server.
+
+ :param cmd: Command to be executed.
+ :type cmd: str
+ :param stdin: Open file or string to pass to stdin.
+ :param stdout: Open file to connect to stdout.
+ :param stderr: Open file to connect to stderr.
+ :param raise_on_error: If False then exit code will be return. If True
+ then exception will be raized if non-zero code.
+ :param timeout: Timeout in seconds for command execution.
+ Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
+ :param pty: Request a pseudo terminal for this connection.
+ This allows passing control characters.
+ Default False.
+ :type pty: bool
+ """
+
+ client = self._get_client()
+
+ if isinstance(stdin, six.string_types):
+ stdin = six.moves.StringIO(stdin)
+
+ return self._run(client, cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, raise_on_error=raise_on_error,
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open, pty=pty)
+
+ def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+
+ transport = client.get_transport()
+ session = transport.open_session()
+ if pty:
+ session.get_pty()
+ session.exec_command(cmd)
+ start_time = time.time()
+
+ # encode on transmit, decode on receive
+ data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+ stderr_data = None
+
+ # If we have data to be sent to stdin then `select' should also
+ # check for stdin availability.
+ if stdin and not stdin.closed:
+ writes = [session]
+ else:
+ writes = []
+
+ while True:
+ # Block until data can be read/write.
+ e = select.select([session], writes, [session], 1)[2]
+
+ if session.recv_ready():
+ data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+ self.log.debug("stdout: %r", data)
+ if stdout is not None:
+ stdout.write(data)
+ continue
+
+ if session.recv_stderr_ready():
+ stderr_data = encodeutils.safe_decode(
+ session.recv_stderr(4096), 'utf-8')
+ self.log.debug("stderr: %r", stderr_data)
+ if stderr is not None:
+ stderr.write(stderr_data)
+ continue
+
+ if session.send_ready():
+ if stdin is not None and not stdin.closed:
+ if not data_to_send:
+ stdin_txt = stdin.read(4096)
+ if stdin_txt is None:
+ stdin_txt = ''
+ data_to_send = encodeutils.safe_encode(
+ stdin_txt, incoming='utf-8')
+ if not data_to_send:
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
+
+ if session.exit_status_ready():
+ break
+
+ if timeout and (time.time() - timeout) > start_time:
+ message = ('Timeout executing command %(cmd)s on host %(host)s'
+ % {"cmd": cmd, "host": self.host})
+ raise exceptions.SSHTimeout(error_msg=message)
+ if e:
+ raise exceptions.SSHError(error_msg='Socket error')
+
+ exit_status = session.recv_exit_status()
+ if exit_status != 0 and raise_on_error:
+ fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+ details = fmt % {"cmd": cmd, "status": exit_status}
+ if stderr_data:
+ details += " Last stderr data: '%s'." % stderr_data
+ raise exceptions.SSHError(error_msg=details)
+ return exit_status
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ """Execute the specified command on the server.
+
+ :param cmd: (str) Command to be executed.
+ :param stdin: (StringIO) Open file to be sent on process stdin.
+ :param timeout: (int) Timeout for execution of the command.
+ :param raise_on_error: (bool) If True, then an SSHError will be raised
+ when non-zero exit code.
+
+ :returns: tuple (exit_status, stdout, stderr)
+ """
+ stdout = six.moves.StringIO()
+ stderr = six.moves.StringIO()
+
+ exit_status = self.run(cmd, stderr=stderr,
+ stdout=stdout, stdin=stdin,
+ timeout=timeout, raise_on_error=raise_on_error)
+ stdout.seek(0)
+ stderr.seek(0)
+ return exit_status, stdout.read(), stderr.read()
+
+ def wait(self, timeout=None, interval=1):
+ """Wait for the host will be available via ssh."""
+ if timeout is None:
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self.execute("uname")
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
+
+ def get(self, remote_path, local_path='/tmp/', recursive=True):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.get(remote_path, local_path, recursive)
+
+ # keep shell running in the background, e.g. screen
+ def send_command(self, command):
+ client = self._get_client()
+ client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ self.run("&& ".join(cmd), stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)
+
+ def put_file_obj(self, file_obj, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.putfo(file_obj, remotepath)
+ if mode is not None:
+ sftp.chmod(remotepath, mode)
+
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+ @classmethod
+ def get_arg_key_map(cls):
+ arg_key_map = super(AutoConnectSSH, cls).get_arg_key_map()
+ arg_key_map['wait'] = ('wait', True)
+ return arg_key_map
+
+ # always wait or we will get OpenStack SSH errors
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None, wait=True):
+ super(AutoConnectSSH, self).__init__(user, host, port, pkey,
+ key_filename, password, name)
+ if wait and wait is not True:
+ self.wait_timeout = int(wait)
+
+ def _make_dict(self):
+ data = super(AutoConnectSSH, self)._make_dict()
+ data.update({
+ 'wait': self.wait_timeout
+ })
+ return data
+
+ def _connect(self):
+ if not self.is_connected:
+ interval = 1
+ timeout = self.wait_timeout
+
+ end_time = time.time() + timeout
+ while True:
+ try:
+ return self._get_client()
+ except (socket.error, exceptions.SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > end_time:
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
+
+ def drop_connection(self):
+ """ Don't close anything, just force creation of a new client """
+ self._client = False
+
+ def execute(self, cmd, stdin=None, timeout=3600, raise_on_error=False):
+ self._connect()
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout,
+ raise_on_error)
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ self._connect()
+ return super(AutoConnectSSH, self).run(cmd, stdin, stdout,
+ stderr, raise_on_error,
+ timeout, keep_stdin_open, pty)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ self._connect()
+ return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+ def put_file(self, local_path, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file(local_path,
+ remote_path, mode)
+
+ def put_file_obj(self, file_obj, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file_obj(file_obj,
+ remote_path, mode)
+
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+ @staticmethod
+ def get_class():
+ # must return static class name,
+ # anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return AutoConnectSSH
diff --git a/xtesting/baremetal/testcases.yaml b/xtesting/baremetal/testcases.yaml
new file mode 100644
index 00000000..91cef451
--- /dev/null
+++ b/xtesting/baremetal/testcases.yaml
@@ -0,0 +1,16 @@
+---
+tiers:
+ -
+ name: vsperfbm
+ order: 1
+ description: ''
+ testcases:
+ -
+ case_name: phy2phy_tput
+ project_name: vsperf
+ criteria: 100
+ blocking: true
+ clean_flag: false
+ description: ''
+ run:
+ name: vsperf_controller
diff --git a/xtesting/baremetal/utils.py b/xtesting/baremetal/utils.py
new file mode 100644
index 00000000..d945381e
--- /dev/null
+++ b/xtesting/baremetal/utils.py
@@ -0,0 +1,41 @@
+"""
+# Copyright 2013: Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+
+
+NON_NONE_DEFAULT = object()
+
+
+def get_key_with_default(data, key, default=NON_NONE_DEFAULT):
+ """get default key"""
+ value = data.get(key, default)
+ if value is NON_NONE_DEFAULT:
+ raise KeyError(key)
+ return value
+
+
+def make_dict_from_map(data, key_map):
+ """mapping dict"""
+ return {dest_key: get_key_with_default(data, src_key, default)
+ for dest_key, (src_key, default) in key_map.items()}
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ #pylint: disable=invalid-name
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
diff --git a/xtesting/baremetal/vsperf.conf b/xtesting/baremetal/vsperf.conf
new file mode 100644
index 00000000..8ed7115f
--- /dev/null
+++ b/xtesting/baremetal/vsperf.conf
@@ -0,0 +1,21 @@
+VSWITCH_BRIDGE_NAME = 'vsperf-br0'
+WHITELIST_NICS = ['02:00.0', '02:00.1']
+TRAFFICGEN = 'Trex'
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.86/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_DURATION=1
+TRAFFICGEN_LOSSRATE=0
+TRAFFICGEN_RFC2544_TESTS=10
+#TRAFFICGEN_PKT_SIZES=(64,128,256,512,1024,1280,1518)
+TRAFFICGEN_PKT_SIZES=(1024,)
+GUEST_TESTPMD_FWD_MODE = ['io']
+GUEST_IMAGE = ['/home/opnfv/vnfs/vloop-vnf-ubuntu-18.04_20180920.qcow2']
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = True
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
+
diff --git a/xtesting/baremetal/vsperf_controller.py b/xtesting/baremetal/vsperf_controller.py
new file mode 100644
index 00000000..91bad766
--- /dev/null
+++ b/xtesting/baremetal/vsperf_controller.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+
+# Copyright 2018-19 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+VSPERF-controller
+"""
+
+# Fetching Environment Variable for controller, You can configure or
+# modifies list.env file for setting your environment variable.
+
+#pylint: disable=global-statement,no-else-continue
+#pylint: disable=too-many-branches
+
+import os
+import sys
+from stat import S_ISDIR
+import time
+import paramiko
+from xtesting.core import testcase
+import ssh
+
+TIMER = float()
+
+
+
+DUT_IP = os.getenv('DUT_IP_ADDRESS')
+DUT_USER = os.getenv('DUT_USERNAME')
+DUT_PWD = os.getenv('DUT_PASSWORD')
+RES_PATH= os.getenv('RES_PATH')
+
+VSPERF_TEST = os.getenv('VSPERF_TESTS')
+VSPERF_CONF = os.getenv('VSPERF_CONFFILE')
+VSPERF_TRAFFICGEN_MODE = str(os.getenv('VSPERF_TRAFFICGEN_MODE'))
+
+DUT_CLIENT = None
+TGEN_CLIENT = None
+
+RECV_BYTES = 4096
+
+def host_connect():
+ """
+ Handle host connectivity to DUT
+ """
+ global DUT_CLIENT
+ DUT_CLIENT = ssh.SSH(host=DUT_IP, user=DUT_USER, password=DUT_PWD)
+ print("DUT Successfully Connected ..............................................[OK] \n ")
+
+def upload_test_config_file():
+ """
+ #Upload Test Config File on DUT
+ """
+ #localpath = '/usr/src/app/vsperf/vsperf.conf'
+ if VSPERF_CONF:
+ localpath = VSPERF_CONF
+ else:
+ localpath = 'vsperf.conf'
+ if not os.path.exists(localpath):
+ print("VSPERF Test config File does not exists.......................[Failed]")
+ return
+ remotepath = '~/vsperf.conf'
+ check_test_config_cmd = "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ check_test_result = str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result:
+ DUT_CLIENT.run("rm -f {}".format(remotepath[2:]))
+ DUT_CLIENT.put_file(localpath, remotepath)
+ check_test_config_cmd_1= "find ~/ -maxdepth 1 -name '{}'".format(
+ remotepath[2:])
+ print(check_test_config_cmd_1)
+ check_test_result_1= str(DUT_CLIENT.execute(check_test_config_cmd)[1])
+ if remotepath[2:] in check_test_result_1:
+ print(
+ "Test Configuration File Uploaded on DUT-Host.............................[OK] \n ")
+ else:
+ print("VSPERF Test config file upload failed.....................................[Critical]")
+
+def run_vsperf_test():
+ """
+ Here we will perform the actual vsperf test
+ """
+ global TIMER
+ rmv_cmd = "cd /mnt/huge && echo {} | sudo -S rm -rf *".format(DUT_PWD)
+ DUT_CLIENT.run(rmv_cmd, pty=True)
+ cmd = "source ~/vsperfenv/bin/activate ; "
+ #cmd = "scl enable python33 bash ; "
+ cmd += "cd vswitchperf && "
+ cmd += "./vsperf "
+ cmd += "--conf-file ~/vsperf.conf "
+ if "yes" in VSPERF_TRAFFICGEN_MODE.lower():
+ cmd += "--mode trafficgen"
+ vsperf_test_list = VSPERF_TEST.split(",")
+ print(vsperf_test_list)
+ for test in vsperf_test_list:
+ atest = cmd
+ atest += test
+ DUT_CLIENT.run(atest, pty=True)
+ print(
+ "Test Successfully Completed................................................[OK]\n ")
+
+def get_result():
+ """
+ Get Latest results from DUT
+ """
+ stdout_data = []
+ stderr_data = []
+ client = paramiko.Transport((DUT_IP, 22))
+ client.connect(username=DUT_USER, password=DUT_PWD)
+ session = client.open_channel(kind='session')
+ directory_to_download = ''
+ session.exec_command('ls /tmp | grep results')
+ if not directory_to_download:
+ while True:
+ if session.recv_ready():
+ stdout_data.append(session.recv(RECV_BYTES))
+ if session.recv_stderr_ready():
+ stderr_data.append(session.recv_stderr(RECV_BYTES))
+ if session.exit_status_ready():
+ break
+ if stdout_data:
+ line = stdout_data[0]
+ filenames = line.decode("utf-8").rstrip("\n").split("\n")
+ filenames = sorted(filenames)
+ latest = filenames[-1]
+ directory_to_download = os.path.join('/tmp', latest)
+ stdout_data = []
+ stderr_data = []
+ if directory_to_download:
+ destination = os.path.join(RES_PATH,
+ os.path.basename(os.path.normpath(
+ directory_to_download)))
+ os.makedirs(destination)
+ print(directory_to_download)
+ # Begin the actual downlaod
+ sftp = paramiko.SFTPClient.from_transport(client)
+ def sftp_walk(remotepath):
+ path=remotepath
+ files=[]
+ folders=[]
+ for fle in sftp.listdir_attr(remotepath):
+ if S_ISDIR(fle.st_mode):
+ folders.append(fle.filename)
+ else:
+ files.append(fle.filename)
+ if files:
+ yield path, files
+ # Filewise download happens here
+ for path,files in sftp_walk(directory_to_download):
+ for fil in files:
+ remote = os.path.join(path,fil)
+ local = os.path.join(destination, fil)
+ print(local)
+ sftp.get(remote, local)
+ # Ready to work with downloaded data, close the session and client.
+ session.close()
+ client.close()
+
+class VsperfBm(testcase.TestCase):
+ """
+ VSPERF-Xtesting Baremetal Control Class
+ """
+ def run(self, **kwargs):
+ global RES_PATH
+ try:
+ self.start_time = time.time()
+ self.result=100
+ os.makedirs(self.res_dir, exist_ok=True)
+ RES_PATH = self.res_dir
+ if DUT_IP:
+ host_connect()
+ if not DUT_CLIENT:
+ print('Failed to connect to DUT ...............[Critical]')
+ self.result = 0
+ else:
+ upload_test_config_file()
+ run_vsperf_test()
+ get_result()
+ self.stop_time = time.time()
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()
diff --git a/xtesting/openstack/Dockerfile b/xtesting/openstack/Dockerfile
new file mode 100644
index 00000000..2e613872
--- /dev/null
+++ b/xtesting/openstack/Dockerfile
@@ -0,0 +1,61 @@
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM opnfv/xtesting
+LABEL maintainer="sridhar.rao@spirent.com"
+
+# Install required packages
+RUN apk add --no-cache --update python3 python3-dev \
+ py3-wheel py3-pip git openssh-client python3-tkinter \
+ tk gcc musl-dev libffi-dev openssl-dev make
+
+# Clone VSPERF.
+RUN git clone https://gerrit.opnfv.org/gerrit/vswitchperf /vswitchperf
+
+#
+# Remove unnecessary python packages.
+#
+RUN cd /vswitchperf && \
+ sed -e '/numpy/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/matplotlib/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pycrypto/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pypsi/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/paramiko/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/pyzmq/ s/^#*/#\ /' -i requirements.txt && \
+ sed -e '/kubernetes/ s/^#*/#\ /' -i requirements.txt
+
+#
+# Build VSPERF
+#
+RUN cd /vswitchperf && \
+ pip3 install --ignore-installed distlib -r requirements.txt && \
+ cd /vswitchperf/src/trex && make
+
+# Include vsperf into Path.
+ENV PATH "$PATH:/vswitchperf"
+
+COPY vsperfostack.conf /vsperfostack.conf
+
+# Required step for Xtesting
+ADD . /src/
+RUN git init /src && pip3 install /src
+
+# Copy Testcase
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+
+# Set working directory - This helps to resolve path to templates.
+WORKDIR /vswitchperf
+
+# Command Run
+CMD ["run_tests", "-t", "all"]
diff --git a/xtesting/openstack/cloud.rc b/xtesting/openstack/cloud.rc
new file mode 100644
index 00000000..3f867743
--- /dev/null
+++ b/xtesting/openstack/cloud.rc
@@ -0,0 +1,10 @@
+export OS_AUTH_URL=http://10.10.180.21/identity
+export OS_PROJECT_ID=0440a230a799460facec0d09dde64497
+export OS_PROJECT_NAME="admin"
+export OS_USER_DOMAIN_NAME="Default"
+export OS_PROJECT_DOMAIN_ID="default"
+export OS_USERNAME="admin"
+export OS_PASSWORD="admin123"
+export OS_REGION_NAME="RegionOne"
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
diff --git a/xtesting/openstack/setup.cfg b/xtesting/openstack/setup.cfg
new file mode 100644
index 00000000..4b98992a
--- /dev/null
+++ b/xtesting/openstack/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = vsperfostack
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ vsperfostack = vsperfostack:VsperfOstack
diff --git a/xtesting/openstack/setup.py b/xtesting/openstack/setup.py
new file mode 100644
index 00000000..1394cdfe
--- /dev/null
+++ b/xtesting/openstack/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/xtesting/openstack/site.yml b/xtesting/openstack/site.yml
new file mode 100644
index 00000000..1ca663f4
--- /dev/null
+++ b/xtesting/openstack/site.yml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: vsperfostack
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: vsperfos
+ tests:
+ - phy2phy_tput
diff --git a/xtesting/openstack/testcases.yaml b/xtesting/openstack/testcases.yaml
new file mode 100644
index 00000000..aab3b16a
--- /dev/null
+++ b/xtesting/openstack/testcases.yaml
@@ -0,0 +1,19 @@
+---
+tiers:
+ -
+ name: vsperfostack
+ order: 1
+ description: 'VSPERF Openstack Testing'
+ testcases:
+ -
+ case_name: phy2phy_tput
+ project_name: vsperfostack
+ criteria: 100
+ blocking: true
+ clean_flag: false
+ description: 'VSPERF Openstack RFC2544 Throughput Test'
+ run:
+ name: vsperfostack
+ args:
+ conf_file: vsperfostack.conf
+ deploy_tgen: false
diff --git a/xtesting/openstack/vsperfostack.conf b/xtesting/openstack/vsperfostack.conf
new file mode 100644
index 00000000..489054a7
--- /dev/null
+++ b/xtesting/openstack/vsperfostack.conf
@@ -0,0 +1,80 @@
+# Copyright 20202 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# OPenstack Information
+
+OS_AUTH_URL="http://10.10.180.21/identity"
+OS_PROJECT_ID="0440a230a799460facec0d09dde64497"
+OS_PROJECT_NAME="admin"
+OS_USER_DOMAIN_NAME="Default"
+OS_PROJECT_DOMAIN_ID="default"
+OS_USERNAME="admin"
+OS_PASSWORD="admin123"
+OS_REGION_NAME="RegionOne"
+OS_INTERFACE="public"
+OS_IDENTITY_API_VERSION=3
+OS_INSECURE=False
+OS_CA_CERT= 'None'
+
+# Deployment Information
+SCENARIOS = ['templates/l2_2c_2i.yaml']
+FLAVOR_NAME = 'm1.large'
+IMAGE_NAME = 'stcv'
+EXTERNAL_NET = 'public'
+
+# Traffic Information
+TRAFFICGEN_PKT_SIZES = (1024,)
+TRAFFICGEN_DURATION = 10
+
+# Traffigen to Use
+TRAFFICGEN='TestCenter'
+
+
+# Trafficgen Specific Information
+# STC
+TRAFFICGEN_STC_LAB_SERVER_ADDR = "10.10.180.245"
+TRAFFICGEN_STC_LICENSE_SERVER_ADDR = "10.10.50.226"
+TRAFFICGEN_STC_EAST_SLOT_NUM = "1"
+TRAFFICGEN_STC_EAST_PORT_NUM = "1"
+TRAFFICGEN_STC_WEST_SLOT_NUM = "1"
+TRAFFICGEN_STC_WEST_PORT_NUM = "1"
+TRAFFICGEN_STC_PYTHON2_PATH = "/usr/bin/python3"
+TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME = "testcenter-rfc2544-rest.py"
+TRAFFICGEN_STC_RFC2544_METRIC="throughput"
+
+
+# Ixia
+TRAFFICGEN_EAST_IXIA_CARD = '1'
+TRAFFICGEN_WEST_IXIA_CARD = '1'
+TRAFFICGEN_EAST_IXIA_PORT = '1'
+TRAFFICGEN_WEST_IXIA_PORT = '1'
+TRAFFICGEN_IXIA_LIB_PATH = '/opt/ixia/ixos-api/9.00.0.20/lib/ixTcl1.0'
+TRAFFICGEN_IXNET_LIB_PATH = '/opt/ixia/ixnetwork/9.00.1915.16/lib/TclApi/IxTclNetwork'
+TRAFFICGEN_IXNET_MACHINE = '10.10.180.240' # quad dotted ip address
+TRAFFICGEN_IXNET_PORT = '443'
+TRAFFICGEN_IXNET_USER = 'admin'
+TRAFFICGEN_IXNET_TESTER_RESULT_DIR = 'c:/ixia_results/vsperf_sandbox'
+TRAFFICGEN_IXNET_DUT_RESULT_DIR = '/mnt/ixia_results/vsperf_sandbox'
+
+# Trex
+TRAFFICGEN_TREX_HOST_IP_ADDR = '10.10.120.25'
+TRAFFICGEN_TREX_USER = 'root'
+TRAFFICGEN_TREX_BASE_DIR = '/root/trex_2.86/'
+TRAFFICGEN_TREX_LINE_SPEED_GBPS = '10'
+TRAFFICGEN_TREX_PORT1 = '0000:81:00.0'
+TRAFFICGEN_TREX_PORT2 = '0000:81:00.1'
+TRAFFICGEN_TREX_PROMISCUOUS = False
+TRAFFICGEN_TREX_LATENCY_PPS = 1000
+TRAFFICGEN_TREX_RFC2544_BINARY_SEARCH_LOSS_VERIFICATION = False
+TRAFFICGEN_TREX_RFC2544_MAX_REPEAT = 2
diff --git a/xtesting/openstack/vsperfostack.py b/xtesting/openstack/vsperfostack.py
new file mode 100755
index 00000000..437f8492
--- /dev/null
+++ b/xtesting/openstack/vsperfostack.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+
+# Copyright 2020 Spirent Communications.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""VSPERF-Xtesting-Openstack Control script.
+"""
+
+import os
+import subprocess
+import sys
+import time
+
+from xtesting.core import testcase
+
+
+class VsperfOstack(testcase.TestCase):
+ """
+ Implement Xtesting's testcase class
+ """
+ def run(self, **kwargs):
+ """
+ Main Run.
+ """
+ custom_conffile = '/vswitchperf/conf/99_xtesting.conf'
+ try:
+ test_params = {}
+ for key in kwargs:
+ test_params[key] = kwargs[key]
+ # Make results directory - Xtesting Requirement
+ os.makedirs(self.res_dir, exist_ok=True)
+ # Start the timer
+ self.start_time = time.time()
+
+ # Get the parameter
+ if 'conf_file' in test_params.keys():
+ conffile = os.path.join('/', test_params['conf_file'])
+ else:
+ conffile = '/vsperfostack.conf'
+
+ # Remove customfile if it exists.
+ if os.path.exists(custom_conffile):
+ os.remove(custom_conffile)
+
+ # Write custom configuration.
+ with open(custom_conffile, 'a+') as fil:
+ fil.writelines("LOG_DIR='{}'".format(self.res_dir))
+ fil.close()
+ # Start the vsperf command
+ if('deploy_tgen' in test_params.keys() and
+ test_params['deploy_tgen']):
+ output = subprocess.check_output(['vsperf',
+ '--conf-file',
+ conffile,
+ '--openstack',
+ '--load-env',
+ '--tests',
+ self.case_name])
+ else:
+ output = subprocess.check_output(['vsperf',
+ '--conf-file',
+ conffile,
+ '--load-env',
+ '--mode',
+ 'trafficgen',
+ '--tests',
+ self.case_name])
+ print(output)
+ self.result = 100
+ self.stop_time = time.time()
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()