aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO1
-rw-r--r--INFO.yaml4
-rw-r--r--ansible/infra_deploy.yml30
-rw-r--r--ansible/install.yaml42
-rw-r--r--ansible/multi_port_baremetal_ixia_correlated_test.yaml1
-rw-r--r--ansible/multi_port_baremetal_ixia_test.yaml1
-rw-r--r--ansible/nsb_setup.yml4
-rw-r--r--ansible/roles/add_repos_jumphost/tasks/Debian.yml81
-rw-r--r--ansible/roles/add_repos_jumphost/tasks/main.yml (renamed from ansible/install_dependencies.yml)9
-rw-r--r--ansible/roles/add_repos_jumphost/vars/main.yml17
-rw-r--r--ansible/roles/configure_gui/tasks/main.yml33
-rw-r--r--ansible/roles/configure_nginx/tasks/main.yml33
-rw-r--r--ansible/roles/configure_nginx/templates/yardstick.conf.j218
-rw-r--r--ansible/roles/configure_rabbitmq/tasks/main.yml30
-rw-r--r--ansible/roles/configure_uwsgi/tasks/main.yml45
-rw-r--r--ansible/roles/configure_uwsgi/templates/yardstick.ini.j218
-rw-r--r--ansible/roles/docker/tasks/Debian.yml14
-rw-r--r--ansible/roles/docker/vars/main.yml2
-rw-r--r--ansible/roles/download_trex/tasks/main.yml5
-rw-r--r--ansible/roles/infra_check_requirements/tasks/main.yml45
-rw-r--r--ansible/roles/infra_create_vms/tasks/configure_vm.yml2
-rw-r--r--ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml40
-rw-r--r--ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml67
-rw-r--r--ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml54
-rw-r--r--ansible/roles/infra_deploy_openstack/tasks/main.yml125
-rw-r--r--ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml43
-rw-r--r--ansible/roles/infra_deploy_openstack/templates/multinode.j239
-rw-r--r--ansible/roles/infra_deploy_openstack/vars/main.yml18
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml27
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml8
-rw-r--r--ansible/roles/infra_destroy_previous_configuration/tasks/main.yml36
-rw-r--r--ansible/roles/infra_prepare_vms/tasks/main.yml105
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml48
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml85
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/tasks/main.yml39
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml69
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml48
-rw-r--r--ansible/roles/infra_rampup_stack_nodes/vars/main.yml16
-rwxr-xr-xansible/roles/install_dependencies_jumphost/tasks/Debian.yml76
-rw-r--r--ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml (renamed from ansible/roles/install_dependencies/tasks/RedHat.yml)12
-rw-r--r--[-rwxr-xr-x]ansible/roles/install_dependencies_jumphost/tasks/Suse.yml (renamed from ansible/roles/install_dependencies/tasks/Debian.yml)41
-rw-r--r--ansible/roles/install_dependencies_jumphost/tasks/main.yml (renamed from ansible/roles/install_dependencies/tasks/main.yml)0
-rw-r--r--ansible/roles/install_yardstick/tasks/main.yml46
-rw-r--r--ansible/roles/install_yardstick/tasks/regular_install.yml22
-rw-r--r--ansible/roles/install_yardstick/tasks/virtual_install.yml25
-rw-r--r--ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml3
-rw-r--r--ansible/standalone_ovs_scale_out_ixia_test.yaml1
-rw-r--r--ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml3
-rw-r--r--ansible/standalone_sriov_scale_out_ixia_test.yaml1
-rw-r--r--dashboard/opnfv_yardstick_tc058.json265
-rw-r--r--docker/Dockerfile22
-rw-r--r--docker/Dockerfile.aarch64.patch33
-rw-r--r--docs/release/release-notes/release-notes.rst514
-rwxr-xr-xdocs/testing/developer/devguide/devguide.rst3
-rw-r--r--docs/testing/user/userguide/13-nsb-installation.rst85
-rw-r--r--docs/testing/user/userguide/code/pod_ixia.yaml31
-rw-r--r--etc/infra/infra_deploy_multi.yaml.sample97
-rw-r--r--etc/infra/infra_deploy_one.yaml.sample (renamed from etc/infra/infra_deploy.yaml.sample)35
-rw-r--r--etc/infra/infra_deploy_two.yaml.sample63
-rw-r--r--etc/yardstick/nodes/apex_baremetal/pod.yaml46
-rw-r--r--etc/yardstick/nodes/apex_virtual/pod.yaml40
-rw-r--r--etc/yardstick/nodes/pod.yaml.nsb.sample.ixia1
-rw-r--r--etc/yardstick/nodes/standalone/ixia_correlated_template.yaml3
-rw-r--r--etc/yardstick/nodes/standalone/ixia_template.yaml1
-rwxr-xr-xinstall.sh2
-rwxr-xr-xnsb_setup.sh8
-rw-r--r--requirements.txt1
-rw-r--r--samples/test_suite.yaml3
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg6
-rw-r--r--samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg13
-rw-r--r--samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml1
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_ixload.yaml1
-rw-r--r--samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml1
-rwxr-xr-xtests/ci/prepare_env.sh67
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml276
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml313
-rw-r--r--tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml15
-rw-r--r--tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml8
-rw-r--r--tests/unit/network_services/helpers/test_samplevnf_helper.py8
-rw-r--r--tests/unit/network_services/nfvi/test_resource.py33
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_base.py8
-rw-r--r--tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py104
-rwxr-xr-xtools/virt_ci_rampup.sh28
-rw-r--r--yardstick/benchmark/contexts/base.py7
-rw-r--r--yardstick/benchmark/contexts/heat.py9
-rw-r--r--yardstick/benchmark/core/task.py29
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py15
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py22
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash13
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py12
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py28
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py27
-rw-r--r--yardstick/benchmark/scenarios/lib/attach_volume.py35
-rw-r--r--yardstick/benchmark/scenarios/lib/create_keypair.py49
-rw-r--r--yardstick/benchmark/scenarios/lib/create_server.py74
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_keypair.py29
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_server.py33
-rw-r--r--yardstick/benchmark/scenarios/lib/get_flavor.py37
-rw-r--r--yardstick/benchmark/scenarios/lib/get_server.py84
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py2
-rw-r--r--yardstick/common/ansible_common.py2
-rw-r--r--yardstick/common/constants.py3
-rw-r--r--yardstick/common/exceptions.py40
-rw-r--r--yardstick/common/openstack_utils.py412
-rw-r--r--yardstick/common/utils.py34
-rw-r--r--yardstick/network_services/collector/subscriber.py40
-rw-r--r--yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py22
-rw-r--r--yardstick/network_services/nfvi/resource.py61
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py32
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py113
-rw-r--r--yardstick/orchestrator/heat.py17
-rw-r--r--yardstick/tests/integration/dummy-scenario-heat-context.yaml7
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py2
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_base.py7
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py18
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py35
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py19
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py56
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py63
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py48
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py51
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py69
-rw-r--r--yardstick/tests/unit/common/test_openstack_utils.py223
-rw-r--r--yardstick/tests/unit/common/test_packages.py88
-rw-r--r--yardstick/tests/unit/common/test_utils.py44
-rw-r--r--yardstick/tests/unit/network_services/__init__.py (renamed from tests/unit/network_services/collector/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/collector/__init__.py (renamed from tests/unit/network_services/libs/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/collector/test_publisher.py (renamed from tests/unit/network_services/collector/test_publisher.py)3
-rw-r--r--yardstick/tests/unit/network_services/collector/test_subscriber.py (renamed from tests/unit/network_services/collector/test_subscriber.py)44
-rw-r--r--yardstick/tests/unit/network_services/libs/__init__.py (renamed from tests/unit/network_services/libs/ixia_libs/__init__.py)0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py (renamed from tests/unit/network_services/libs/ixia_libs/test_IxNet.py)34
-rw-r--r--yardstick/tests/unit/network_services/test_utils.py (renamed from tests/unit/network_services/test_utils.py)2
-rw-r--r--yardstick/tests/unit/network_services/test_yang_model.py (renamed from tests/unit/network_services/test_yang_model.py)12
-rw-r--r--yardstick/tests/unit/orchestrator/test_heat.py19
141 files changed, 4480 insertions, 1435 deletions
diff --git a/INFO b/INFO
index 1a49af295..9bcc2922d 100644
--- a/INFO
+++ b/INFO
@@ -11,7 +11,6 @@ IRC: #opnfv-yardstick
Repository: yardstick
Committers:
-jorgen.w.karlsson@ericsson.com
jean.gaoliang@huawei.com
lvjing5@huawei.com
wu.zhihui1@zte.com.cn
diff --git a/INFO.yaml b/INFO.yaml
index 730cd4a6b..677c470cb 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -34,10 +34,6 @@ repositories:
- 'yardstick'
committers:
- <<: *opnfv_yardstick_ptl
- - name: 'Jörgen Karlsson'
- email: 'jorgen.w.karlsson@ericsson.com'
- company: 'ericsson.com'
- id: 'jnon'
- name: 'Kubi'
email: 'jean.gaoliang@huawei.com'
company: 'huawei.com'
diff --git a/ansible/infra_deploy.yml b/ansible/infra_deploy.yml
index 4ad21af00..8cf5dffef 100644
--- a/ansible/infra_deploy.yml
+++ b/ansible/infra_deploy.yml
@@ -13,9 +13,37 @@
# limitations under the License.
---
- hosts: jumphost
+ vars:
+ rs_file: "{{ RS_FILE }}"
+ clean_up: "{{ CLEAN_UP | default(False) }}" # If True will be delete all VMs, networks, disk images
+
+ tasks:
+ - set_fact:
+ proxy_host: "{{ lookup('env', 'http_proxy') | urlsplit('hostname') }}"
+ proxy_proto: "{{ lookup('env', 'http_proxy') | urlsplit('scheme') }}"
+ proxy_port: "{{ lookup('env', 'http_proxy') | urlsplit('port') }}"
+
+ - set_fact:
+ proxy_host_ip: "{{ lookup('dig', proxy_host) }}"
roles:
- - infra_check_requirements
- infra_destroy_previous_configuration
+ - infra_check_requirements
- infra_create_network
- infra_create_vms
+ - infra_prepare_vms
+
+- hosts: deploy,regular,yardstickG
+ gather_facts: no
+ become: yes
+
+ roles:
+ - infra_rampup_stack_nodes
+
+
+- hosts: deploy
+ become: yes
+ environment: "{{ proxy_env }}"
+
+ roles:
+ - infra_deploy_openstack
diff --git a/ansible/install.yaml b/ansible/install.yaml
new file mode 100644
index 000000000..afffbede2
--- /dev/null
+++ b/ansible/install.yaml
@@ -0,0 +1,42 @@
+---
+- hosts: localhost
+
+ vars:
+ arch_amd64: "amd64"
+ arch_arm64: "arm64"
+ inst_mode_container: "container"
+ inst_mode_baremetal: "baremetal"
+ ubuntu_archive:
+ amd64: "http://archive.ubuntu.com/ubuntu/"
+ arm64: "http://ports.ubuntu.com/ubuntu-ports/"
+ installation_mode: "{{ INSTALLATION_MODE | default('baremetal') }}"
+ yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}"
+ virtual_environment: "{{ VIRTUAL_ENVIRONMENT | default(False) }}"
+ nsb_dir: "{{ NSB_DIR | default('/opt/nsb_bin/') }}"
+
+ pre_tasks:
+
+ - name: Create NSB binaries directory, accesible to any user
+ file:
+ path: "{{ nsb_dir }}"
+ state: directory
+ owner: root
+ mode: 0777
+
+ roles:
+ - add_repos_jumphost
+ - install_dependencies_jumphost
+ - install_yardstick
+ - configure_uwsgi
+ - configure_nginx
+ - download_trex
+ - install_trex
+ - configure_rabbitmq
+
+ post_tasks:
+
+ - service:
+ name: nginx
+ state: restarted
+
+ - shell: uwsgi -i /etc/yardstick/yardstick.ini
diff --git a/ansible/multi_port_baremetal_ixia_correlated_test.yaml b/ansible/multi_port_baremetal_ixia_correlated_test.yaml
index ba92b5cd3..0d223181d 100644
--- a/ansible/multi_port_baremetal_ixia_correlated_test.yaml
+++ b/ansible/multi_port_baremetal_ixia_correlated_test.yaml
@@ -42,7 +42,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/multi_port_baremetal_ixia_test.yaml b/ansible/multi_port_baremetal_ixia_test.yaml
index 52bc40b43..d2dfaa3c4 100644
--- a/ansible/multi_port_baremetal_ixia_test.yaml
+++ b/ansible/multi_port_baremetal_ixia_test.yaml
@@ -42,7 +42,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml
index 98a59f984..fcde1d1b2 100644
--- a/ansible/nsb_setup.yml
+++ b/ansible/nsb_setup.yml
@@ -22,7 +22,7 @@
environment:
"{{ proxy_env }}"
roles:
- - install_dependencies
+ - install_dependencies_jumphost
- docker
- name: "handle all openstack stuff when: openrc_file is defined"
@@ -37,7 +37,7 @@
name: yardstick
pull: yes
recreate: yes
- image: opnfv/yardstick:latest
+ image: "{{ yardstick_docker_image|default('opnfv/yardstick:latest') }}"
state: started
restart_policy: always
privileged: yes
diff --git a/ansible/roles/add_repos_jumphost/tasks/Debian.yml b/ansible/roles/add_repos_jumphost/tasks/Debian.yml
new file mode 100644
index 000000000..626f0b037
--- /dev/null
+++ b/ansible/roles/add_repos_jumphost/tasks/Debian.yml
@@ -0,0 +1,81 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Arguments needed: arch_arm64, arch_amd64, ubuntu_archive
+
+- name: Set the repositories architecture name
+ set_fact:
+ arch: "{{ arch_arm64 if ansible_architecture == 'aarch64' else arch_amd64 }}"
+ extra_arch: "{{ arch_amd64 if ansible_architecture == 'aarch64' else arch_arm64 }}"
+
+- name: Define the repositories names
+ set_fact:
+ repo: "{{ ubuntu_archive[arch] }}"
+ extra_repo: "{{ ubuntu_archive[extra_arch] }}"
+
+- name: Add architecture to the default repository list
+ replace:
+ path: "{{ sources_list_file }}"
+ regexp: '(^deb\s+)([^\[].*)$'
+ replace: 'deb [arch={{ arch }}] \2'
+
+- name: Remove support for source repositories
+ replace:
+ path: "{{ sources_list_file }}"
+ regexp: "^deb-src "
+ replace: "# deb-src "
+
+- name: Add extra architecture
+ command: "dpkg --add-architecture {{ extra_arch }}"
+
+- name: Define the default release version
+ copy:
+ dest: "{{ default_distro_file }}"
+ content: 'APT::Default-Release "{{ ansible_distribution_release }}";'
+
+- name: Remove extra repository file
+ file:
+ path: "{{ repo_file }}"
+ state: absent
+ ignore_errors: yes
+
+- name: Add extra repository file
+ file:
+ path: "{{ repo_file }}"
+ state: touch
+
+- name: Add the repository for qemu_static_user/xenial
+ blockinfile:
+ path: "{{ repo_file }}"
+ marker: "MARKER"
+ content: |
+ deb [arch={{ arch }}] {{ repo }} xenial-updates universe
+ when: ansible_distribution_release != "xenial"
+
+- name: Add extra architecture repositories if installing in container
+ blockinfile:
+ path: "{{ repo_file }}"
+ marker: "MARKER"
+ content: |
+ deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }} main universe multiverse restricted
+ deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-updates main universe multiverse restricted
+ deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-security main universe multiverse restricted
+ deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-proposed main universe multiverse restricted
+ when: installation_mode == "container"
+
+- name: Remove the marker
+ lineinfile:
+ dest: "{{ repo_file }}"
+ state: absent
+ regexp: "MARKER"
diff --git a/ansible/install_dependencies.yml b/ansible/roles/add_repos_jumphost/tasks/main.yml
index 1c7d20170..f50fd9f0d 100644
--- a/ansible/install_dependencies.yml
+++ b/ansible/roles/add_repos_jumphost/tasks/main.yml
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation.
+# Copyright (c) 2018 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,8 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- name: install yardstick dependencies
- hosts: all
-
- roles:
- - install_dependencies
+- include: "{{ ansible_os_family }}.yml"
+ when: ansible_os_family == "Debian"
diff --git a/ansible/roles/add_repos_jumphost/vars/main.yml b/ansible/roles/add_repos_jumphost/vars/main.yml
new file mode 100644
index 000000000..30e444711
--- /dev/null
+++ b/ansible/roles/add_repos_jumphost/vars/main.yml
@@ -0,0 +1,17 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+repo_file: "/etc/apt/sources.list.d/yardstick.list"
+sources_list_file: "/etc/apt/sources.list"
+default_distro_file: "/etc/apt/apt.conf.d/default-distro"
diff --git a/ansible/roles/configure_gui/tasks/main.yml b/ansible/roles/configure_gui/tasks/main.yml
new file mode 100644
index 000000000..846a9cb47
--- /dev/null
+++ b/ansible/roles/configure_gui/tasks/main.yml
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+ set_fact:
+ gui_dir: "{{ yardstick_dir }}/gui/"
+
+- name: Run gui.sh
+ shell:
+ cmd: /bin/bash gui.sh
+ chdir: "{{ gui_dir }}"
+
+- name: Create nginx/yardstick directory
+ file:
+ path: /etc/nginx/yardstick
+ state: directory
+ recurse: yes
+
+- name: Move dist to /etc/nginx/yardstick/gui
+ shell:
+ cmd: mv dist /etc/nginx/yardstick/gui
+ chdir: "{{ gui_dir }}"
diff --git a/ansible/roles/configure_nginx/tasks/main.yml b/ansible/roles/configure_nginx/tasks/main.yml
new file mode 100644
index 000000000..37b052725
--- /dev/null
+++ b/ansible/roles/configure_nginx/tasks/main.yml
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+ set_fact:
+ socket_file: "{{ socket_file|default('/var/run/yardstick.sock') }}"
+
+- name: Make sure conf.d directory exists
+ file:
+ path: /etc/nginx/conf.d
+ state: directory
+
+- name: Create the nginx config file
+ template:
+ src: yardstick.conf.j2
+ dest: "/etc/nginx/conf.d/yardstick.conf"
+
+- name: Configure ports if RedHat OS
+ shell: |
+ semanage port -m -t http_port_t -p tcp 5000
+ semanage port -m -t http_port_t -p udp 5000
+ when: ansible_os_family == "RedHat" \ No newline at end of file
diff --git a/ansible/roles/configure_nginx/templates/yardstick.conf.j2 b/ansible/roles/configure_nginx/templates/yardstick.conf.j2
new file mode 100644
index 000000000..484096cec
--- /dev/null
+++ b/ansible/roles/configure_nginx/templates/yardstick.conf.j2
@@ -0,0 +1,18 @@
+server {
+ listen 5000;
+ server_name localhost;
+ index index.htm index.html;
+ location / {
+ include uwsgi_params;
+ client_max_body_size 2000m;
+ uwsgi_pass unix://{{ socket_file }};
+ }
+
+ location /gui/ {
+ alias /etc/nginx/yardstick/gui/;
+ }
+
+ location /report/ {
+ alias /tmp/;
+ }
+}
diff --git a/ansible/roles/configure_rabbitmq/tasks/main.yml b/ansible/roles/configure_rabbitmq/tasks/main.yml
new file mode 100644
index 000000000..3ad60c1ea
--- /dev/null
+++ b/ansible/roles/configure_rabbitmq/tasks/main.yml
@@ -0,0 +1,30 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Restart rabbitmq
+ service:
+ name: rabbitmq-server
+ state: restarted
+
+- name: rabbitmqctl start_app
+ shell: rabbitmqctl start_app
+
+- name: Configure rabbitmq
+ rabbitmq_user:
+ user: yardstick
+ password: yardstick
+ configure_priv: .*
+ read_priv: .*
+ write_priv: .*
+ state: present
diff --git a/ansible/roles/configure_uwsgi/tasks/main.yml b/ansible/roles/configure_uwsgi/tasks/main.yml
new file mode 100644
index 000000000..6a2244657
--- /dev/null
+++ b/ansible/roles/configure_uwsgi/tasks/main.yml
@@ -0,0 +1,45 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+ set_fact:
+ config_dir: "/etc/yardstick/"
+ log_dir: "/var/log/yardstick/"
+ socket_file: "/var/run/yardstick.sock"
+
+- name: Create UWSGI config directory
+ file:
+ path: "/etc/yardstick"
+ state: directory
+ owner: root
+ mode: 0755
+
+- name: Create API log directory
+ file:
+ path: "{{ log_dir }}"
+ state: directory
+ owner: root
+ mode: 0777
+
+- name: Create the socket for communicating
+ file:
+ path: "{{ socket_file }}"
+ state: touch
+ owner: root
+ mode: 0644
+
+- name: Create the UWSGI config file
+ template:
+ src: yardstick.ini.j2
+ dest: "{{ config_dir }}yardstick.ini"
diff --git a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
new file mode 100644
index 000000000..c049daf84
--- /dev/null
+++ b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
@@ -0,0 +1,18 @@
+[uwsgi]
+master = true
+debug = true
+chdir = {{ yardstick_dir }}api
+module = server
+plugins = python
+processes = 10
+threads = 5
+async = true
+max-requests = 5000
+chmod-socket = 666
+callable = app_wrapper
+enable-threads = true
+close-on-exec = 1
+daemonize = {{ log_dir }}uwsgi.log
+socket = {{ socket_file }}
+{# If virtual environment, we need to add:
+ virtualenv = <virtual_env> #} \ No newline at end of file
diff --git a/ansible/roles/docker/tasks/Debian.yml b/ansible/roles/docker/tasks/Debian.yml
index cf4128774..7f998de45 100644
--- a/ansible/roles/docker/tasks/Debian.yml
+++ b/ansible/roles/docker/tasks/Debian.yml
@@ -12,15 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
- - name: add Ubuntu docker repo
- apt_repository: repo='deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' state=present
-
- - name: ensure correct docker version
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
- with_items: "{{ docker_packages[ansible_os_family] }}"
-
- - name: remove Ubuntu docker repo
- apt_repository:
- repo: 'deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main'
- state: absent
- update_cache: no
+ - name: Install docker.io
+ action: "{{ ansible_pkg_mgr }} name=docker.io state=present force=yes"
diff --git a/ansible/roles/docker/vars/main.yml b/ansible/roles/docker/vars/main.yml
index 8b5077490..a735d523d 100644
--- a/ansible/roles/docker/vars/main.yml
+++ b/ansible/roles/docker/vars/main.yml
@@ -16,5 +16,3 @@ docker_project_url: https://yum.dockerproject.org
docker_packages:
"RedHat":
- docker-engine-1.13.1
- "Debian":
- - docker-engine=1.13.1*
diff --git a/ansible/roles/download_trex/tasks/main.yml b/ansible/roles/download_trex/tasks/main.yml
index baa964fd8..9df67d939 100644
--- a/ansible/roles/download_trex/tasks/main.yml
+++ b/ansible/roles/download_trex/tasks/main.yml
@@ -12,6 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
+- name: make sure trex_dest exists
+ file:
+ path: "{{ trex_dest }}"
+ state: directory
+
- name: fetch Trex
get_url:
url: "{{ trex_url }}"
diff --git a/ansible/roles/infra_check_requirements/tasks/main.yml b/ansible/roles/infra_check_requirements/tasks/main.yml
index a11bc56a1..991bd7383 100644
--- a/ansible/roles/infra_check_requirements/tasks/main.yml
+++ b/ansible/roles/infra_check_requirements/tasks/main.yml
@@ -12,27 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
+- name: Reread system properties
+ setup:
+
- name: Include
include_vars:
- file: "{{rs_file}}"
+ file: "{{ rs_file }}"
name: infra_deploy_vars
- name: Store total CPU, RAM, Disk requested resources
set_fact:
- vcpu_t: "{{item.vcpus|int + vcpu_t|int}}"
- vram_t: "{{item.ram|int + vram_t|int}}"
- disk_t: "{{item.disk|int + disk_t|int}}"
- with_items: "{{infra_deploy_vars.nodes}}"
+ vcpu_t: "{{ item.vcpus|int + vcpu_t | int }}"
+ vram_t: "{{ item.ram|int + vram_t | int }}"
+ disk_t: "{{ item.disk|int + disk_t | int }}"
+ with_items: "{{ infra_deploy_vars.nodes }}"
- name: Fail if not enough RAM
fail:
msg: "Failed, not enough RAM, required: {{ vram_t }}, available {{ ansible_memory_mb.nocache.free }}"
- when: ansible_memory_mb.nocache.free < vram_t|int
+ when: ansible_memory_mb.nocache.free < vram_t | int
- name: Fail if not enough CPU
fail:
msg: "Failed, not enough CPU, required: {{ vcpu_t }}, available {{ ansible_processor_vcpus }}"
- when: ansible_processor_vcpus < vcpu_t|int
+ when: ansible_processor_vcpus < vcpu_t | int
- name: Define default network counter
set_fact:
@@ -40,20 +43,38 @@
- name: Increment counter for every default network detected
set_fact:
- num_default_network_detected: "{{ num_default_network_detected|int + 1 }}"
+ num_default_network_detected: "{{ num_default_network_detected | int + 1 }}"
when:
- item.default_gateway is defined
- item.default_gateway == True
- with_items: "{{infra_deploy_vars.networks}}"
+ with_items: "{{ infra_deploy_vars.networks }}"
- name: Fail if more than 1 or 0 default networks
fail:
msg: "Failed, there must be 1 default network: {{ num_default_network_detected }} detected"
- when: num_default_network_detected|int != 1
+ when: num_default_network_detected | int != 1
- name: Fail if not enough Disk space
set_fact:
- disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024)|int }}{% endfor %}"
+ disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024) | int }}{% endfor %}"
- fail:
msg: "Failed, not enough disk space, required {{ disk_t }}, available: {{ disk_avail }}"
- when: disk_avail|int < disk_t|int
+ when: disk_avail|int < disk_t | int
+
+- set_fact:
+ ostack_nodes: "{{ ostack_nodes | default([]) + [item.openstack_node] }}"
+ when: item.openstack_node is defined
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+# all-in-one node node type must be controller, multinode requires at least one controller and one compute node
+- fail:
+ msg: "OpenStack node types currently supported: controller, compute. Check input VMs file."
+ when: ostack_nodes is undefined or ostack_nodes | length < 1
+
+- fail:
+ msg: "In all-in-one configuration OpenStack node type must be controller."
+ when: ostack_nodes | length == 1 and 'controller' not in ostack_nodes
+
+- fail:
+ msg: "At least one controller and one compute node expected when total number of OpenStack nodes is more than one."
+ when: ostack_nodes | length > 1 and not ('compute' in ostack_nodes and 'controller' in ostack_nodes)
diff --git a/ansible/roles/infra_create_vms/tasks/configure_vm.yml b/ansible/roles/infra_create_vms/tasks/configure_vm.yml
index c20a0b175..a6a5e0618 100644
--- a/ansible/roles/infra_create_vms/tasks/configure_vm.yml
+++ b/ansible/roles/infra_create_vms/tasks/configure_vm.yml
@@ -47,8 +47,6 @@
output:
all: ">> /var/log/cloud-init.log"
ssh_pwauth: True
- bootcmd:
- - echo 127.0.0.1 {{ node_item.hostname }} >> /etc/hosts
users:
- name: {{ node_item.user }}
lock-passwd: False
diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml
new file mode 100644
index 000000000..9713c0d1e
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml
@@ -0,0 +1,40 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Modify globals.yml
+ replace:
+ path: /etc/kolla/globals.yml
+ regexp: "{{ item.find }}"
+ replace: "{{ item.insert_after }}"
+ with_items:
+ - { find: '^#kolla_base_distro:.*', insert_after: 'kolla_base_distro: "ubuntu"' }
+ - { find: '^#kolla_install_type:.*', insert_after: 'kolla_install_type: "source"' }
+ - { find: '^#openstack_release:.*', insert_after: 'openstack_release: "pike"' }
+ - { find: 'kolla_internal_vip_address:.*', insert_after: 'kolla_internal_vip_address: "{{ deployvm_ip }}"' }
+ - { find: '^#network_interface:.*', insert_after: 'network_interface: "{{ hostvars[ansible_host].ansible_default_ipv4.interface }}"' }
+ - { find: '^#neutron_external_interface:.*', insert_after: 'neutron_external_interface: "{{ neutron_iface }}"' }
+ - { find: '^#enable_haproxy:.*', insert_after: 'enable_haproxy: "no"'}
+ - { find: '^#enable_heat:.*' , insert_after: 'enable_heat: "yes"'}
+ - { find: '^#docker_registry:.*', insert_after: 'docker_registry: "{{ ansible_host }}:4000"' }
+
+- name: Generate multinode from inventory
+ template:
+ src: templates/multinode.j2
+ dest: "{{ git_repos_path + 'multinode' }}"
+
+- set_fact:
+ path2multinode: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/multinode' }}"
+
+- name: Append rest groups to multinode file
+ shell: line=`grep -n '\[deployment\]' {{ path2multinode }} | cut -d ':' -f1` && tail -n +$line {{ path2multinode }} >> "{{ git_repos_path + 'multinode' }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml
new file mode 100644
index 000000000..3963cb64c
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml
@@ -0,0 +1,67 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Create folders
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /etc/kolla/config/nova
+ - /etc/kolla/config/neutron
+
+- set_fact:
+ filter_ops: RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter
+
+- name: Configure OpenStack Nova
+ copy:
+ content: |
+ [filter_scheduler]
+ enabled_filters = {{ filter_ops }}
+ [libvirt]
+ cpu_mode = host-passthrough
+ dest: /etc/kolla/config/nova.conf
+
+- name: Configure OpenStack Neutron
+ copy:
+ content: |
+ [DEFAULT]
+ service_plugins=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
+ [securitygroup]
+ firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+ [ml2]
+ extension_drivers=port_security
+ [agent]
+ extensions=port_security
+ dest: /etc/kolla/config/neutron.conf
+
+- name: Configure OpenStack ml2_plugin.ini
+ copy:
+ content: |
+ [ml2]
+ tenant_network_types = vxlan
+ extension_drivers = port_security
+ type_drivers = vlan,flat,local,vxlan
+ mechanism_drivers = openvswitch
+ [ml2_type_flat]
+ flat_networks = physnet1
+ [ml2_type_vlan]
+ network_vlan_ranges = physnet1
+ [securitygroup]
+ firewall_driver = iptables_hybrid
+ [ovs]
+ datapath_type = system
+ bridge_mappings = physnet1:br-ex
+ tunnel_bridge = br-tun
+ local_ip = {{ deployvm_ip }}
+ dest: /etc/kolla/config/neutron/ml2_plugin.ini
diff --git a/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml
new file mode 100644
index 000000000..38c163c6c
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Download kolla and kolla-ansible from git repos
+ git:
+ repo: "{{ item.repo }}"
+ dest: "{{ git_repos_path + item.dir }}"
+ version: stable/pike
+ with_items:
+ - { dir: "{{ kolla_path }}", repo: 'https://git.openstack.org/openstack/kolla'}
+ - { dir: "{{ kolla_ans_path }}", repo: 'https://git.openstack.org/openstack/kolla-ansible' }
+
+- name: Copy kolla-ansible password.yml and globals.yml
+ shell: cp -r "{{ git_repos_path + kolla_ans_path + '/etc/kolla/' }}" /etc/
+
+- name: Copy kolla-ansible all-in-one, multinode
+ shell: cp * "{{ git_repos_path }}"
+ args:
+ chdir: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/' }}"
+
+- name: Install requirements
+ pip:
+ chdir: "{{ item[0] }}"
+ requirements: "{{ item[1] }}"
+ with_nested:
+ - [ "{{ git_repos_path + kolla_path }}", "{{ git_repos_path + kolla_ans_path }}" ]
+ - [ 'requirements.txt', 'test-requirements.txt' ]
+
+- name: pip install .
+ pip:
+ chdir: "{{ item }}"
+ name: '.'
+ with_items:
+ - "{{ git_repos_path + kolla_path }}"
+ - "{{ git_repos_path + kolla_ans_path }}"
+
+- name: Run setup.py
+ shell: "python setup.py install"
+ args:
+ chdir: "{{ item }}"
+ with_items:
+ - "{{ git_repos_path + kolla_path }}"
+ - "{{ git_repos_path + kolla_ans_path }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/main.yml b/ansible/roles/infra_deploy_openstack/tasks/main.yml
new file mode 100644
index 000000000..ba5d5bc54
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/tasks/main.yml
@@ -0,0 +1,125 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# This script is based on https://docs.openstack.org/kolla-ansible/pike/user/quickstart.html
+- name: Include variables
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- set_fact:
+ traffic_ip: "{{ item.interfaces[1].ip }}"
+ when: item.hostname == ansible_host
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Get neutron iface
+ set_fact:
+ neutron_iface: "{{ item }}"
+ when:
+ - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4 is defined
+ - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address is defined
+ - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address == traffic_ip
+ with_items: "{{ hostvars[ansible_host].ansible_interfaces }}"
+
+- name: Create a registry container
+ docker_container:
+ name: registry
+ image: registry:2
+ restart_policy: always
+ ports:
+ - "4000:5000"
+
+- name: Download and install Kolla
+ include_tasks: install_kolla.yml
+
+- name: Configure Kolla
+ include_tasks: configure_kolla.yml
+
+- name: Configure Open Stack
+ include_tasks: configure_openstack.yml
+
+- name: Ramp up Open Stack
+ include_tasks: rampup_openstack.yml
+
+- name: Update admin-openrc.sh
+ lineinfile:
+ path: /etc/kolla/admin-openrc.sh
+ regexp: "{{ item.find }}"
+ line: "{{ item.add }}"
+ with_items:
+ - { find: 'EXTERNAL_NETWORK', add: 'export EXTERNAL_NETWORK=public' }
+ - { find: 'OS_AUTH_TYPE', add: 'export OS_AUTH_TYPE=password' }
+
+- name: Copy env file
+ shell: cp /etc/kolla/admin-openrc.sh /tmp/admin-openrc.yaml
+
+- name: Rework as env vars
+ replace:
+ path: /tmp/admin-openrc.yaml
+ regexp: 'export\s+(.*)=(.*)'
+ replace: '\1: \2'
+
+- name: Download OpenStack env file
+ fetch:
+ src: /tmp/admin-openrc.yaml
+ dest: /tmp/
+ flat: yes
+
+- include_vars:
+ file: /tmp/admin-openrc.yaml
+ name: ostack_env
+
+- name: Re-assign IP address
+ shell: ip address show {{ neutron_iface }} | awk '/inet/ {print $2}'
+ when: neutron_iface is defined
+ register: ip_netmask
+
+- shell: >
+ ip addr del dev {{ neutron_iface }} {{ ip_netmask.stdout }} &&
+ ip addr add dev br-ex {{ infra_deploy_vars.networks[1].host_ip }}/{{ ip_netmask.stdout_lines[0].split('/')[1] }}
+ when:
+ - neutron_iface is defined
+ - ip_netmask.stdout | length > 0
+
+- name: Create external network
+ os_network:
+ name: public
+ external: yes
+ provider_physical_network: physnet1
+ provider_network_type: flat
+ environment:
+ - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}"
+ - "{{ ostack_env }}"
+
+- name: Create sub-network
+ os_subnet:
+ name: public-subnet
+ network_name: public
+ cidr: "{{ ip_netmask.stdout }}"
+ allocation_pool_start: "{{ infra_deploy_vars.networks[1].dhcp_ip_start }}"
+ allocation_pool_end: "{{ infra_deploy_vars.networks[1].dhcp_ip_stop }}"
+ gateway_ip: "{{ infra_deploy_vars.networks[1].host_ip }}"
+ enable_dhcp: no
+ environment:
+ - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}"
+ - "{{ ostack_env }}"
+
+- name: Upload OpenStack env file to Yardstick VM
+ copy:
+ src: /etc/kolla/admin-openrc.sh
+ dest: '/tmp/admin-openrc.sh'
+ delegate_to: "{{ item }}"
+ when: "groups['yardstickG'] is defined"
+ with_items:
+ - "{{ groups['yardstickG'] }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml
new file mode 100644
index 000000000..c75bec685
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml
@@ -0,0 +1,43 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Generate passwords
+ shell: kolla-genpwd
+
+- name: Generate the kolla-build.conf
+ shell: tox -e genconfig
+ args:
+ chdir: "{{ git_repos_path + kolla_path }}"
+
+- set_fact:
+ target: "{{ git_repos_path + 'all-in-one' }}"
+
+- set_fact:
+ target: "{{ git_repos_path + 'multinode' }}"
+ when: "groups['ostack'] | length > 1"
+
+- name: Run kolla-ansible precheck
+ shell: kolla-ansible prechecks -i "{{ target }}"
+
+- name: Build kolla-ansible
+ shell: kolla-build -b ubuntu -t source --profile default --tag pike --registry {{ ansible_host }}:4000 --push
+
+- name: Pull images from registry
+ shell: kolla-ansible pull -i "{{ target }}"
+
+- name: Run kolla-ansible deploy
+ shell: kolla-ansible deploy -i "{{ target }}"
+
+- name: Create an openrc file
+ shell: kolla-ansible post-deploy
diff --git a/ansible/roles/infra_deploy_openstack/templates/multinode.j2 b/ansible/roles/infra_deploy_openstack/templates/multinode.j2
new file mode 100644
index 000000000..57f87b521
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/templates/multinode.j2
@@ -0,0 +1,39 @@
+{% set control_dict = {} %}
+{% set compute_dict = {} %}
+{% for host in groups['ostack'] %}
+{% if hostvars[host].node_type is defined and hostvars[host].node_type == 'controller' %}
+{% set control_dict = control_dict.update({hostvars[host].ansible_host: hostvars[host].ansible_default_ipv4.interface}) %}
+{% endif %}
+{% endfor %}
+{% for host in groups['ostack'] %}
+{% if hostvars[host].node_type is defined and hostvars[host].node_type == 'compute' %}
+{% for iface in hostvars[host].ansible_interfaces %}
+{%- if ((hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4 is defined) and
+ (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address is defined) and
+ (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address == hostvars[host].secondary_ip)) -%}
+{% set compute_dict = compute_dict.update({hostvars[host].ansible_host: iface}) %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endfor %}
+{% macro print_node(in_dict, iface_str='', cnt=1) %}
+{%- for host, iface in in_dict | dictsort -%}
+{% if loop.index <= cnt %}
+{% if iface_str %}
+{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True {{ iface_str }}={{ iface }}
+{% else %}
+{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endmacro %}
+[control]
+{{ print_node(control_dict, iface_str='network_interface', cnt=control_dict | length) }}
+[compute]
+{{ print_node(compute_dict, iface_str='network_interface', cnt=compute_dict | length) }}
+[network]
+{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }}
+[monitoring]
+{{ print_node(control_dict) }}
+[storage]
+{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }}
diff --git a/ansible/roles/infra_deploy_openstack/vars/main.yml b/ansible/roles/infra_deploy_openstack/vars/main.yml
new file mode 100644
index 000000000..bbea56847
--- /dev/null
+++ b/ansible/roles/infra_deploy_openstack/vars/main.yml
@@ -0,0 +1,18 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kolla_path: "{{ 'https://git.openstack.org/openstack/kolla' | urlsplit('path') | basename }}"
+kolla_ans_path: "{{ 'https://git.openstack.org/openstack/kolla-ansible' | urlsplit('path') | basename }}"
+deployvm_ip: "{{ hostvars[ansible_host].host_ip }}"
+git_repos_path: '/tmp/repos/'
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
index 314ee30af..5e616335a 100644
--- a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
@@ -14,18 +14,18 @@
---
- name: Destroy old networks created by virt
virt_net:
- name: "{{ network_item.name }}"
+ name: "{{ network_item }}"
command: destroy
- when: network_item.name in virt_nets.list_nets
+ when: clean_up | bool or network_item in deploy_nets
-# Ignoring erros as network can be created without being defined.
+# Ignoring errors as network can be created without being defined.
# This can happen if a user manually creates a network using the virsh command.
# If the network is not defined the undefine code will throw an error.
- name: Undefine old networks defined by virt
virt_net:
- name: "{{ network_item.name }}"
+ name: "{{ network_item }}"
command: undefine
- when: network_item.name in virt_nets.list_nets
+ when: clean_up | bool or network_item in deploy_nets
ignore_errors: yes
- name: Check if "ovs-vsctl" command is present
@@ -34,15 +34,20 @@
ignore_errors: yes
- name: Destroy OVS bridge if it exists
- command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}"
- when: ovs_vsctl_present.rc == 0
+ command: ovs-vsctl --if-exists -- del-br "{{ network_item }}"
+ when:
+ - ovs_vsctl_present.rc == 0
+ - clean_up | bool or network_item in deploy_nets
+ ignore_errors: yes
- name: Check if linux bridge is present
- stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}"
+ stat: path="{{ '/sys/class/net/' + network_item + '/brif/' }}"
register: check_linux_bridge
- name: Remove linux bridge if it exists
shell: |
- ifconfig "{{ network_item.name }}" down
- brctl delbr "{{ network_item.name }}"
- when: check_linux_bridge.stat.exists
+ ifconfig "{{ network_item }}" down
+ brctl delbr "{{ network_item }}"
+ when:
+ - check_linux_bridge.stat.exists
+ - clean_up | bool or network_item in deploy_nets
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
index 5e43ee81e..91e949344 100644
--- a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
@@ -16,14 +16,14 @@
- name: Destroy old VMs
virt:
command: destroy
- name: "{{ node_item.hostname }}"
- when: node_item.hostname in virt_vms.list_vms
+ name: "{{ vmhost_item }}"
+ when: clean_up | bool or vmhost_item in deploy_vms
ignore_errors: yes
# Ignore errors as VM can be running while undefined
- name: Undefine old VMs
virt:
command: undefine
- name: "{{ node_item.hostname }}"
- when: node_item.hostname in virt_vms.list_vms
+ name: "{{ vmhost_item }}"
+ when: clean_up | bool or vmhost_item in deploy_vms
ignore_errors: yes
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
index e6c2c0229..6c4aa33cf 100644
--- a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
+++ b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
-- name: Include
+- name: Include input file
include_vars:
file: "{{ rs_file }}"
name: infra_deploy_vars
@@ -25,16 +25,40 @@
virt: command=list_vms
register: virt_vms
+- set_fact:
+ deploy_vms: "{{ deploy_vms | default([]) + [item.hostname] }}"
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Define old disk images to delete
+ shell: virsh domblklist {{ item }} | awk '/\/.*/ { print $2 }'
+ when: clean_up | bool or item in deploy_vms
+ with_items: "{{ virt_vms.list_vms }}"
+ register: virt_img
+
+- set_fact:
+ images: "{{ images | default([]) + item.stdout_lines }}"
+ when: item.stdout_lines is defined and item.stdout_lines | length > 0
+ with_items: "{{ virt_img.results }}"
+
- name: Destroy old VMs
include_tasks: delete_vm.yml
- extra_vars: "{{ virt_vms }}"
loop_control:
- loop_var: node_item
- with_items: "{{ infra_deploy_vars.nodes }}"
+ loop_var: vmhost_item
+ with_items: "{{ virt_vms.list_vms }}"
+
+- set_fact:
+ deploy_nets: "{{ deploy_nets | default([]) + [item.name] }}"
+ with_items: "{{ infra_deploy_vars.networks }}"
- name: Delete old networks
include_tasks: delete_network.yml
- extra_vars: "{{ virt_nets }}"
loop_control:
loop_var: network_item
- with_items: "{{ infra_deploy_vars.networks }}"
+ with_items: "{{ virt_nets.list_nets }}"
+
+- name: Delete old disk images
+ file:
+ path: "{{ item }}"
+ state: absent
+ when: images is defined and images | length > 0
+ with_items: "{{ images }}"
diff --git a/ansible/roles/infra_prepare_vms/tasks/main.yml b/ansible/roles/infra_prepare_vms/tasks/main.yml
new file mode 100644
index 000000000..d7ed08511
--- /dev/null
+++ b/ansible/roles/infra_prepare_vms/tasks/main.yml
@@ -0,0 +1,105 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include input file
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- name: Install setuptools
+ apt:
+ name: python-setuptools
+
+- name: Install pip
+ shell: easy_install pip
+ environment: "{{ proxy_env }}"
+
+- name: Install dependency for dns dig
+ pip:
+ name: dnspython
+ state: latest
+
+- set_fact:
+ block_str: "{{ block_str | default('') + item.interfaces[0].ip + ' ' + item.hostname + '\n'}}"
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Delete hosts between markers
+ blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} generated hosts file"
+ content: ""
+
+- name: Update /etc/hosts
+ blockinfile:
+ path: /etc/hosts
+ block: |
+ {{ block_str }}
+ marker: "# {mark} generated hosts file"
+
+- name: Clear known hosts
+ shell: >
+ ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.interfaces[0].ip }}";
+ ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.hostname }}"
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- set_fact:
+ controllers: "{{ controllers | default([]) + [item.hostname] }}"
+ when:
+ - item.openstack_node is defined
+ - item.openstack_node == 'controller'
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add host controller as deploy
+ add_host:
+ hostname: "{{ item.hostname }}"
+ host_ip: "{{ item.interfaces[0].ip }}"
+ groups: deploy, ostack
+ ansible_host: "{{ item.hostname }}"
+ ansible_user: "{{ item.user }}"
+ ansible_ssh_pass: "{{ item.password }}"
+ node_type: "{{ item.openstack_node }}"
+ secondary_ip: "{{ item.interfaces[1].ip }}"
+ when: item.hostname == controllers[0]
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add hosts others as controller, compute
+ add_host:
+ hostname: "{{ item.hostname }}"
+ host_ip: "{{ item.interfaces[0].ip }}"
+ groups: regular,ostack
+ ansible_host: "{{ item.hostname }}"
+ ansible_user: "{{ item.user }}"
+ ansible_ssh_pass: "{{ item.password }}"
+ node_type: "{{ item.openstack_node }}"
+ secondary_ip: "{{ item.interfaces[1].ip }}"
+ when:
+ - item.openstack_node is defined
+ - item.openstack_node == 'controller' or item.openstack_node == 'compute'
+ - item.hostname != controllers[0]
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add yardstick host to group
+ add_host:
+ hostname: "{{ item.hostname }}"
+ host_ip: "{{ item.interfaces[0].ip }}"
+ groups: yardstickG
+ ansible_host: "{{ item.hostname }}"
+ ansible_user: "{{ item.user }}"
+ ansible_ssh_pass: "{{ item.password }}"
+ secondary_ip: "{{ item.interfaces[1].ip }}"
+ when: item.hostname == 'yardstickvm'
+ with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Workaround, not all VMs are ready by that time
+ pause: seconds=20
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml
new file mode 100644
index 000000000..a6ae00e51
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml
@@ -0,0 +1,48 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- file:
+ path: /lib/systemd/system/docker.service.d
+ state: directory
+
+- copy:
+ content: |
+ [Service]
+ MountFlags=shared
+ dest: /lib/systemd/system/docker.service.d/kolla.conf
+
+- set_fact:
+ ostack_hosts: "{{ ostack_hosts | default([]) + [hostvars[item].ansible_host] }}"
+ with_items: "{{ groups['ostack'] }}"
+
+- name: Create proxy configuration for docker
+ copy:
+ content: |
+ [Service]
+ Environment="HTTP_PROXY={{ lookup('env', 'http_proxy') }}"
+ Environment="HTTPS_PROXY={{ lookup('env', 'https_proxy') }}"
+ Environment="FTP_PROXY={{ lookup('env', 'ftp_proxy') }}"
+ Environment="NO_PROXY={{ lookup('env', 'no_proxy') }},{{ hostvars[ansible_host].ansible_default_ipv4.address }},{{ ostack_hosts | join(',') }}"
+ dest: /lib/systemd/system/docker.service.d/http-proxy.conf
+
+- name: Update /etc/default/docker
+ lineinfile:
+ path: /etc/default/docker
+ line: 'DOCKER_OPTS="--dns {{ hostvars[ansible_host].ansible_default_ipv4.gateway }} --insecure-registry {{ deploy_host }}:4000"'
+
+- name: reload restart docker
+ systemd:
+ state: restarted
+ daemon_reload: yes
+ name: docker
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml
new file mode 100644
index 000000000..d22e8155a
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml
@@ -0,0 +1,85 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Update apt cache
+ apt:
+ update_cache: yes
+ upgrade: yes
+ cache_valid_time: 36000
+ environment: "{{ proxy_env }}"
+
+- name: Install packets
+ apt:
+ name: "{{ item }}"
+ with_items:
+ - python-tox
+ - python-dev
+ - libffi-dev
+ - libssl-dev
+ - python3-dev
+ - ethtool
+ - ipmitool
+ - git
+ - ntp
+ - apparmor-utils
+ - docker.io
+ - libvirt-bin
+ - python-setuptools
+ - build-essential
+ environment: "{{ proxy_env }}"
+
+- name: Install pip
+ shell: easy_install pip
+ environment: "{{ proxy_env }}"
+
+- name: Update pip ansible docker
+ pip:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - ansible
+ - docker
+ - tox
+ - shade
+ environment: "{{ proxy_env }}"
+
+- name: Remove conflicting packages
+ apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - lxd
+ - lxc
+
+- name: Stop and disable libvirt
+ systemd:
+ state: stopped
+ enabled: no
+ name: libvirt-bin.service
+
+- name: Stop and disable apparmor service
+ systemd:
+ name: apparmor
+ state: stopped
+ enabled: no
+
+- name: Get stat of libvirtd apparmor profile
+ stat:
+ path: /etc/apparmor.d/disable/usr.sbin.libvirtd
+ register: apparmor_libvirtd_profile
+
+- name: Remove apparmor profile for libvirt
+ shell: ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/ && apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd
+ when:
+ - apparmor_libvirtd_profile.stat.exists == False
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml
new file mode 100644
index 000000000..65d5e59d8
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Configure proxy and install python to support ansible
+- name: Create apt.conf proxy config
+ raw: >
+ echo 'Acquire::http::proxy "{{ hostvars[groups['jumphost'][0]].proxy_proto + '://' + hostvars[groups['jumphost'][0]].proxy_host_ip + ':' + hostvars[groups['jumphost'][0]].proxy_port }}";'
+ > /etc/apt/apt.conf.d/22proxy
+
+- name: Install python which is required to run ansible mudules
+ raw: apt-get update && apt-get install -y python
+
+- name: Gather facts
+ setup:
+
+- name: Update configuration files
+ include_tasks: update_conf_files.yml
+
+- name: Install packets
+ include_tasks: install_packets.yml
+ when: ansible_hostname in groups['ostack']
+
+- name: Configure docker settings
+ include_tasks: configure_docker.yml
+ when: ansible_hostname in groups['ostack']
+
+- name: generate and apply SSH keys
+ include_tasks: update_keys.yml
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml
new file mode 100644
index 000000000..424fb543b
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml
@@ -0,0 +1,69 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Set hostname
+ shell: hostname {{ ansible_hostname }}
+
+- name: Delete hosts between markers
+ blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} generated hosts file"
+ content: ""
+
+- set_fact:
+ block_str: "{{ block_str | default('') + hostvars[item].host_ip + ' ' + hostvars[item].ansible_host + '\n'}}"
+ with_items: "{{ groups['ostack'] }}"
+
+- name: Update /etc/hosts
+ blockinfile:
+ path: /etc/hosts
+ block: |
+ {{ block_str }}
+ marker: "# {mark} generated hosts file"
+
+- name: Update /etc/hosts
+ lineinfile:
+ path: /etc/hosts
+ regexp: ".*{{ hostvars[groups['jumphost'][0]].proxy_host }}.*"
+ line: "{{ hostvars[groups['jumphost'][0]].proxy_host_ip }} {{ hostvars[groups['jumphost'][0]].proxy_host }}"
+
+- name: Turn off IPv6
+ lineinfile:
+ path: /etc/sysctl.conf
+ regexp: '^{{ item }}.*'
+ line: "{{ item }} = 1"
+ with_items:
+ - 'net.ipv6.conf.all.disable_ipv6'
+ - 'net.ipv6.conf.default.disable_ipv6'
+ - 'net.ipv6.conf.lo.disable_ipv6'
+
+- name: Update IP configuration
+ shell: sysctl -p
+
+- name: Update resolv.conf
+ shell: echo "{{ 'nameserver ' + hostvars[ansible_host].ansible_default_ipv4.gateway }}" > /etc/resolvconf/resolv.conf.d/base
+
+- name: Update name servers
+ shell: resolvconf -u
+
+- name: Update /etc/environment
+ lineinfile:
+ path: /etc/environment
+ regexp: "{{ item.find }}"
+ line: "{{ item.add }}"
+ with_items:
+ - { find: 'http_proxy=', add: "{{ 'export http_proxy=' + lookup('env', 'http_proxy') }}" }
+ - { find: 'https_proxy=', add: "{{ 'export https_proxy=' + lookup('env', 'https_proxy') }}" }
+ - { find: 'ftp_proxy=', add: "{{ 'export ftp_proxy=' + lookup('env', 'ftp_proxy') }}" }
+ - { find: 'no_proxy=', add: "{{ 'export no_proxy=' + lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}" }
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml
new file mode 100644
index 000000000..816f7cbca
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml
@@ -0,0 +1,48 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Generate keys
+ user:
+ name: "{{ hostvars[ansible_host].ansible_user }}"
+ generate_ssh_key: yes
+ state: present
+ ssh_key_file: "/root/.ssh/id_rsa"
+
+- name: Get remote files
+ fetch:
+ src: "/root/.ssh/id_rsa.pub"
+ dest: "/tmp"
+
+- name: Update authorized_key
+ authorized_key:
+ key: "{{ lookup('file', '/tmp/{{ hostvars[item].ansible_host }}/root/.ssh/id_rsa.pub') }}"
+ state: present
+ user: "{{ hostvars[item].ansible_user }}"
+ with_items:
+ - "{{ groups['ostack'] }}"
+ - "{{ groups['yardstickG'] }}"
+
+- name: Make sure the known hosts file exists
+ file:
+ path: "{{ ssh_known_hosts_file }}"
+ state: touch
+
+- name: Add key to known hosts
+ known_hosts:
+ name: "{{ hostvars[item].ansible_host }}"
+ key: "{{ lookup('pipe', 'ssh-keyscan -t rsa {{ hostvars[item].ansible_host }}') }}"
+ path: "{{ ssh_known_hosts_file }}"
+ with_items:
+ - "{{ groups['ostack'] }}"
+ - "{{ groups['yardstickG'] }}"
diff --git a/ansible/roles/infra_rampup_stack_nodes/vars/main.yml b/ansible/roles/infra_rampup_stack_nodes/vars/main.yml
new file mode 100644
index 000000000..252eb86b3
--- /dev/null
+++ b/ansible/roles/infra_rampup_stack_nodes/vars/main.yml
@@ -0,0 +1,16 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+ssh_known_hosts_file: "/root/.ssh/known_hosts"
+deploy_host: "{{ hostvars[groups['deploy'][0]].ansible_host }}"
diff --git a/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml b/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml
new file mode 100755
index 000000000..9baf7e59e
--- /dev/null
+++ b/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Update repositories
+ apt:
+ update_cache: yes
+
+- name: Install core packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - wget
+ - curl
+ - screen
+ - procps
+ - socat
+ - sshpass
+ - sudo
+ - vim
+ - libffi-dev
+ - libfuse-dev
+ - libssl-dev
+ - libxft-dev
+ - libxml2-dev
+ - libxss-dev
+ - libxslt-dev
+ - libxslt1-dev
+ - libzmq-dev
+ - qemu-user-static
+ - qemu-utils
+ - kpartx
+ - python
+ - python-setuptools
+ - python-dev
+ - python-pip
+ - python-libvirt
+ - python-virtualenv
+ - bridge-utils
+ - ebtables
+ - openssl
+ - ccze
+ - nginx-full
+ - uwsgi
+ - uwsgi-plugin-python
+ - supervisor
+ - lsof
+ - nodejs
+ - npm
+ - rabbitmq-server
+
+- name: Install libc6:arm64 package
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - libc6:arm64
+ when:
+ - arch is defined
+ - arch != arch_arm64
+ - installation_mode == inst_mode_container
+
+- name: Remove dependencies that are no longer required
+ apt:
+ update_cache: yes
+
+- name: Remove useless packages from the cache
+ apt:
+ autoclean: yes
diff --git a/ansible/roles/install_dependencies/tasks/RedHat.yml b/ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml
index a5d4d0b15..85eb1156a 100644
--- a/ansible/roles/install_dependencies/tasks/RedHat.yml
+++ b/ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml
@@ -42,5 +42,13 @@
- python-setuptools
- libffi-devel
- python-devel
- - kpartx
-
+ - nodejs
+ - npm
+ - gcc
+ - lsof
+ - procps
+ - bridge-utils
+ - ebtables
+ - openssl
+ - python-virtualenv
+ - ccze
diff --git a/ansible/roles/install_dependencies/tasks/Debian.yml b/ansible/roles/install_dependencies_jumphost/tasks/Suse.yml
index bba6fb13c..af53c9cd5 100755..100644
--- a/ansible/roles/install_dependencies/tasks/Debian.yml
+++ b/ansible/roles/install_dependencies_jumphost/tasks/Suse.yml
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation.
+# Copyright (c) 2018 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,39 +12,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
---
+- name: Install EPEL if needed
+ action: "{{ ansible_pkg_mgr }} name=epel-release state=present"
+ when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux']
+
- name: Install core packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items:
- - python-minimal
+ - deltarpm
- wget
+ - expect
- curl
- screen
- - procps
- git
- socat
- sshpass
- - libxslt1-dev
- - libffi-dev
- - libfuse-dev
- qemu-kvm
- - qemu-user-static
- - qemu-utils
- kpartx
- - libvirt0
- - python-libvirt
+ - libxslt-devel
+ - libffi-devel
+ - openssl-devel
+ - nginx
+ - uwsgi
+ - python-setuptools
+ - libffi-devel
+ - python-devel
+ - nodejs
+ - npm
+ - gcc
+ - lsof
+ - procps
- bridge-utils
- ebtables
- openssl
- - libssl-dev
- - python-dev
- python-virtualenv
- ccze
- - libxml2-dev
- - libxslt-dev
- - libzmq-dev
- - nginx-full
- - uwsgi
- - uwsgi-plugin-python
- - supervisor
- - python-setuptools
- - lsof
diff --git a/ansible/roles/install_dependencies/tasks/main.yml b/ansible/roles/install_dependencies_jumphost/tasks/main.yml
index 27660c3ca..27660c3ca 100644
--- a/ansible/roles/install_dependencies/tasks/main.yml
+++ b/ansible/roles/install_dependencies_jumphost/tasks/main.yml
diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml
new file mode 100644
index 000000000..ee1b83756
--- /dev/null
+++ b/ansible/roles/install_yardstick/tasks/main.yml
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Arguments needed: map_min_addr_file, yardstick_dir
+
+- name: Define variables
+ set_fact:
+ map_min_addr_file: "/etc/sysctl.d/mmap_min_addr.conf"
+
+- name: Remove the kernel minimum virtual address restriction that a process is allowed to mmap
+ copy:
+ dest: "{{ map_min_addr_file }}"
+ content: "vm.mmap_min_addr = 0\n"
+
+- name: Config git SSL
+ git_config:
+ name: http.sslVerify
+ scope: global
+ value: False
+
+# There is a bug with the easy install ansible module in suse linux.
+# Until this is fixed the shell command must be used
+- name: Install pip
+ shell: easy_install -U pip
+# easy_install:
+# name: pip
+# state: latest
+
+- name: install yardstick without virtual environment
+ include_tasks: regular_install.yml
+ when: virtual_environment == False
+
+- name: install yardstick with virtual environment
+ include_tasks: virtual_install.yml
+ when: virtual_environment == True
diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml
new file mode 100644
index 000000000..4a9925ab4
--- /dev/null
+++ b/ansible/roles/install_yardstick/tasks/regular_install.yml
@@ -0,0 +1,22 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Install Yardstick requirements
+ pip:
+ requirements: "{{ yardstick_dir }}/requirements.txt"
+
+- name: Install Yardstick code
+ pip:
+ name: "{{ yardstick_dir }}/."
+ extra_args: -e
diff --git a/ansible/roles/install_yardstick/tasks/virtual_install.yml b/ansible/roles/install_yardstick/tasks/virtual_install.yml
new file mode 100644
index 000000000..8545acbcb
--- /dev/null
+++ b/ansible/roles/install_yardstick/tasks/virtual_install.yml
@@ -0,0 +1,25 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Install Yardstick requirements
+ pip:
+ requirements: "{{ yardstick_dir }}/requirements.txt"
+ virtualenv: "{{ yardstick_dir }}/virtualenv"
+
+- name: Install Yardstick code
+ pip:
+ name: "{{ yardstick_dir }}/."
+ extra_args: -e
+ virtualenv: "{{ yardstick_dir }}/virtualenv"
+
diff --git a/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml b/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
index 516676576..b54ea9b57 100644
--- a/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
+++ b/ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
@@ -51,13 +51,12 @@
user: ""
password: ""
key_filename: ~
- tg_config:
+ tg_config:
ixchassis: "1.1.1.127" #ixia chassis ip
tcl_port: "8009" # tcl server port
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_ovs_scale_out_ixia_test.yaml b/ansible/standalone_ovs_scale_out_ixia_test.yaml
index ff665377f..cae373432 100644
--- a/ansible/standalone_ovs_scale_out_ixia_test.yaml
+++ b/ansible/standalone_ovs_scale_out_ixia_test.yaml
@@ -60,7 +60,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml b/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
index 45a4a498b..0e3a0af55 100644
--- a/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
+++ b/ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
@@ -43,13 +43,12 @@
user: ""
password: ""
key_filename: ~
- tg_config:
+ tg_config:
ixchassis: "1.1.1.127" #ixia chassis ip
tcl_port: "8009" # tcl server port
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/ansible/standalone_sriov_scale_out_ixia_test.yaml b/ansible/standalone_sriov_scale_out_ixia_test.yaml
index 659dbef07..8fb09d9b9 100644
--- a/ansible/standalone_sriov_scale_out_ixia_test.yaml
+++ b/ansible/standalone_sriov_scale_out_ixia_test.yaml
@@ -49,7 +49,6 @@
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/results"
version: "8.01.106.3"
pcis:
diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json
new file mode 100644
index 000000000..55b5a5f33
--- /dev/null
+++ b/dashboard/opnfv_yardstick_tc058.json
@@ -0,0 +1,265 @@
+{
+ "annotations": {
+ "list": []
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": 33,
+ "links": [],
+ "refresh": "1m",
+ "rows": [
+ {
+ "collapse": false,
+ "height": 343,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "yardstick",
+ "description": "",
+ "fill": 1,
+ "id": 1,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 9,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "opnfv_yardstick_tc058",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT \"server-status_outage_time\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "server-status_outage_time"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 5
+ },
+ {
+ "colorMode": "ok",
+ "fill": true,
+ "line": true,
+ "op": "lt",
+ "value": 5
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Server Status outage time",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": true,
+ "colorValue": false,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "datasource": "yardstick",
+ "format": "short",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "id": 4,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 3,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [],
+ "measurement": "opnfv_yardstick_tc058",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT \"sla_pass\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "sla_pass"
+ ],
+ "type": "field"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "0.5,1",
+ "title": "SLA PASS/FAIL",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "avg"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "HA"
+ ],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "2018-03-26T09:00:00.000Z",
+ "to": "2018-03-28T08:59:59.998Z"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "opnfv_yardstick_tc058",
+ "version": 8
+}
diff --git a/docker/Dockerfile b/docker/Dockerfile
index b97337e4d..62ea0d037 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -20,25 +20,31 @@ ENV REPOS_DIR="/home/opnfv/repos" \
# Set work directory
# Yardstick repo
-ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
+ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
+ RELENG_REPO_DIR="${REPOS_DIR}/releng" \
STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
-RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
+RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
RUN easy_install -U setuptools==30.0.0
-RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
+RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
RUN mkdir -p ${REPOS_DIR}
RUN git config --global http.sslVerify false
+#For developers: To test your changes you must comment out the git clone for ${YARDSTICK_REPO_DIR}.
+#You must also uncomment the RUN and COPY commands below.
+#You must run docker build from your yardstick directory on the host.
RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
+#RUN mkdir ${YARDSTICK_REPO_DIR}
+#COPY ./ ${YARDSTICK_REPO_DIR}
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
-WORKDIR ${YARDSTICK_REPO_DIR}
-RUN ${YARDSTICK_REPO_DIR}/install.sh
+RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+
RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
RUN echo "daemon off;" >> /etc/nginx/nginx.conf
-
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
@@ -47,8 +53,8 @@ ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-d
COPY ./exec_tests.sh /usr/local/bin/
-ENV NSB_DIR="/opt/nsb_bin" \
- PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
+ENV NSB_DIR="/opt/nsb_bin"
+ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
WORKDIR ${REPOS_DIR}
CMD ["/usr/bin/supervisord"]
diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch
index e8dbea288..6c7381284 100644
--- a/docker/Dockerfile.aarch64.patch
+++ b/docker/Dockerfile.aarch64.patch
@@ -1,24 +1,16 @@
-From: Cristina Pauna <cristina.pauna@enea.com>
-Date: Thu, 11 Jan 2018 19:06:26 +0200
-Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
+From: ting wu <ting.wu@enea.com>
+Date: Tue, 8 May 2018 14:02:52 +0200
+Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file
-Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
-Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com>
+Signed-off-by: ting wu <ting.wu@enea.com>
---
- docker/Dockerfile | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
+ docker/Dockerfile | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 2ee5b4c..23e5ea5 100644
+index 62ea0d0..f2f41771 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
-@@ -1,5 +1,5 @@
- ##############################################################################
--# Copyright (c) 2015 Ericsson AB and others.
-+# Copyright (c) 2017 Enea AB and others.
- #
- # All rights reserved. This program and the accompanying materials
- # are made available under the terms of the Apache License, Version 2.0
@@ -7,9 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
@@ -31,17 +23,17 @@ index 2ee5b4c..23e5ea5 100644
ARG BRANCH=master
-@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
+@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
RELENG_REPO_DIR="${REPOS_DIR}/releng" \
STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
--RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
-+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \
+-RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
++RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \
+ apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean
RUN easy_install -U setuptools==30.0.0
- RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
+ RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
-@@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
# nginx=5000, rabbitmq=5672
EXPOSE 5000 5672
@@ -52,3 +44,4 @@ index 2ee5b4c..23e5ea5 100644
COPY ./exec_tests.sh /usr/local/bin/
+
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 4ebf0eceb..6598a2751 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,7 +1,8 @@
+=======
License
=======
-OPNFV Euphrates release note for Yardstick Docs
+OPNFV Fraser release note for Yardstick Docs
are licensed under a Creative Commons Attribution 4.0 International License.
You should have received a copy of the license along with this.
If not, see <http://creativecommons.org/licenses/by/4.0/>.
@@ -9,8 +10,9 @@ If not, see <http://creativecommons.org/licenses/by/4.0/>.
The *Yardstick framework*, the *Yardstick test cases* are open-source software,
licensed under the terms of the Apache License, Version 2.0.
-OPNFV Euphrates Release Note for Yardstick
-==========================================
+=======================================
+OPNFV Fraser Release Note for Yardstick
+=======================================
.. toctree::
:maxdepth: 2
@@ -23,50 +25,43 @@ OPNFV Euphrates Release Note for Yardstick
Abstract
---------
+========
This document describes the release note of Yardstick project.
Version History
----------------
+===============
+-------------------+-----------+---------------------------------+
| *Date* | *Version* | *Comment* |
| | | |
+-------------------+-----------+---------------------------------+
-| December 15, 2017 | 5.1.0 | Yardstick for Euphrates release |
-| | | |
-+-------------------+-----------+---------------------------------+
-| October 20, 2017 | 5.0.0 | Yardstick for Euphrates release |
+| April 27, 2018 | 6.0.0 | Yardstick for Fraser release |
| | | |
+-------------------+-----------+---------------------------------+
Important Notes
----------------
+===============
The software delivered in the OPNFV Yardstick_ Project, comprising the
-*Yardstick framework*, the *Yardstick test cases* and the experimental
-framework *Apex Lake* is a realization of the methodology in ETSI-ISG
-NFV-TST001_.
+*Yardstick framework*, and the *Yardstick test cases* is a realization of
+the methodology in ETSI-ISG NFV-TST001_.
The *Yardstick* framework is *installer*, *infrastructure* and *application*
independent.
-OPNFV Euphrates Release
------------------------
+OPNFV Fraser Release
+====================
-This Euphrates release provides *Yardstick* as a framework for NFVI testing
+This Fraser release provides *Yardstick* as a framework for NFVI testing
and OPNFV feature testing, automated in the OPNFV CI pipeline, including:
* Documentation generated with Sphinx
* User Guide
-
* Developer Guide
-
* Release notes (this document)
-
* Results
* Automated Yardstick test suite (daily, weekly)
@@ -84,39 +79,29 @@ and OPNFV feature testing, automated in the OPNFV CI pipeline, including:
* Yardstick plug-in configuration yaml files, plug-in install/remove scripts
-For Euphrates release, the *Yardstick framework* is used for the following
+For Fraser release, the *Yardstick framework* is used for the following
testing:
* OPNFV platform testing - generic test cases to measure the categories:
* Compute
-
* Network
-
* Storage
-* OPNFV platform network service benchmarking(NSB)
+* OPNFV platform network service benchmarking (NSB)
* NSB
* Test cases for the following OPNFV Projects:
* Container4NFV
-
* High Availability
-
* IPv6
-
* KVM
-
* Parser
-
* StorPerf
-
* VSperf
- * virtual Traffic Classifier
-
The *Yardstick framework* is developed in the OPNFV community, by the
Yardstick_ team.
@@ -126,49 +111,47 @@ Yardstick_ team.
Release Data
-------------
+============
+--------------------------------+-----------------------+
| **Project** | Yardstick |
| | |
+--------------------------------+-----------------------+
-| **Repo/tag** | yardstick/opnfv-5.1.0 |
+| **Repo/tag** | yardstick/opnfv-6.0.0 |
| | |
+--------------------------------+-----------------------+
-| **Yardstick Docker image tag** | opnfv-5.1.0 |
+| **Yardstick Docker image tag** | opnfv-6.0.0 |
| | |
+--------------------------------+-----------------------+
-| **Release designation** | Euphrates |
+| **Release designation** | Fraser |
| | |
+--------------------------------+-----------------------+
-| **Release date** | December 15, 2017 |
+| **Release date** | April 27, 2018 |
| | |
+--------------------------------+-----------------------+
-| **Purpose of the delivery** | OPNFV Euphrates 5.1.0 |
+| **Purpose of the delivery** | OPNFV Fraser 6.0.0 |
| | |
+--------------------------------+-----------------------+
Deliverables
-------------
+============
Documents
-^^^^^^^^^
+---------
- - User Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/user/userguide/index.html
+ - User Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/user/userguide/index.html
- - Developer Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/developer/devguide/index.html
+ - Developer Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/developer/devguide/index.html
Software Deliverables
-^^^^^^^^^^^^^^^^^^^^^
-
+---------------------
- - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-5.1.0)
+ - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.0.0)
-
-New Contexts
-############
+List of Contexts
+^^^^^^^^^^^^^^^^
+--------------+-------------------------------------------+
| **Context** | **Description** |
@@ -188,31 +171,40 @@ New Contexts
+--------------+-------------------------------------------+
-New Runners
-###########
-
-+--------------+-------------------------------------------------------+
-| **Runner** | **Description** |
-| | |
-+--------------+-------------------------------------------------------+
-| *Arithmetic* | Steps every run arithmetically according to specified |
-| | input value |
-| | |
-+--------------+-------------------------------------------------------+
-| *Duration* | Runs for a specified period of time |
-| | |
-+--------------+-------------------------------------------------------+
-| *Iteration* | Runs for a specified number of iterations |
-| | |
-+--------------+-------------------------------------------------------+
-| *Sequence* | Selects input value to a scenario from an input file |
-| | and runs all entries sequentially |
-| | |
-+--------------+-------------------------------------------------------+
-
-
-New Scenarios
-#############
+List of Runners
+^^^^^^^^^^^^^^^
+
+Note: Yardstick Fraser 6.0.0 add two new Runners, "Dynamictp" and "Search".
+
++---------------+-------------------------------------------------------+
+| **Runner** | **Description** |
+| | |
++---------------+-------------------------------------------------------+
+| *Arithmetic* | Steps every run arithmetically according to specified |
+| | input value |
+| | |
++---------------+-------------------------------------------------------+
+| *Duration* | Runs for a specified period of time |
+| | |
++---------------+-------------------------------------------------------+
+| *Iteration* | Runs for a specified number of iterations |
+| | |
++---------------+-------------------------------------------------------+
+| *Sequence* | Selects input value to a scenario from an input file |
+| | and runs all entries sequentially |
+| | |
++---------------+-------------------------------------------------------+
+| **Dynamictp** | A runner that searches for the max throughput with |
+| | binary search |
+| | |
++---------------+-------------------------------------------------------+
+| **Search** | A runner that runs a specific time before it returns |
+| | |
++---------------+-------------------------------------------------------+
+
+
+List of Scenarios
+^^^^^^^^^^^^^^^^^
+----------------+-----------------------------------------------------+
| **Category** | **Delivered** |
@@ -234,224 +226,138 @@ New Scenarios
| | |
+----------------+-----------------------------------------------------+
| *Compute* | * cpuload |
-| | |
| | * cyclictest |
-| | |
| | * lmbench |
-| | |
| | * lmbench_cache |
-| | |
| | * perf |
-| | |
| | * unixbench |
-| | |
| | * ramspeed |
-| | |
| | * cachestat |
-| | |
| | * memeoryload |
-| | |
| | * computecapacity |
-| | |
| | * SpecCPU2006 |
| | |
+----------------+-----------------------------------------------------+
| *Networking* | * iperf3 |
-| | |
| | * netperf |
-| | |
| | * netperf_node |
-| | |
| | * ping |
-| | |
| | * ping6 |
-| | |
| | * pktgen |
-| | |
| | * sfc |
-| | |
| | * sfc with tacker |
-| | |
-| | * vtc instantion validation |
-| | |
-| | * vtc instantion validation with noisy neighbors |
-| | |
-| | * vtc throughput |
-| | |
-| | * vtc throughput in the presence of noisy neighbors |
-| | |
| | * networkcapacity |
-| | |
| | * netutilization |
-| | |
| | * nstat |
-| | |
| | * pktgenDPDK |
| | |
+----------------+-----------------------------------------------------+
| *Parser* | Tosca2Heat |
| | |
+----------------+-----------------------------------------------------+
-| *Storage* | fio |
-| | |
-| | bonnie++ |
-| | |
-| | storagecapacity |
+| *Storage* | * fio |
+| | * bonnie++ |
+| | * storagecapacity |
| | |
+----------------+-----------------------------------------------------+
| *StorPerf* | storperf |
| | |
+----------------+-----------------------------------------------------+
-| *NSB* | vPE thoughput test case |
+| *NSB* | vFW thoughput test case |
| | |
+----------------+-----------------------------------------------------+
-
New Test cases
-^^^^^^^^^^^^^^
+--------------
* Generic NFVI test cases
- * OPNFV_YARDSTICK_TCO78 - SPEC CPU 2006
-
- * OPNFV_YARDSTICK_TCO79 - Bonnie++
-
-* Kubernetes Test cases
+ * OPNFV_YARDSTICK_TCO84 - SPEC CPU 2006 for VM
- * OPNFV_YARDSTICK_TCO80 - NETWORK LATENCY BETWEEN CONTAINER
+* HA Test cases
- * OPNFV_YARDSTICK_TCO81 - NETWORK LATENCY BETWEEN CONTAINER AND VM
+ * OPNFV_YARDSTICK_TC087 - SDN Controller resilience in non-HA configuration
+ * OPNFV_YARDSTICK_TC090 - Control node Openstack service down - database instance
+ * OPNFV_YARDSTICK_TC091 - Control node Openstack service down - heat-api
Version Change
---------------
+==============
Module Version Changes
-^^^^^^^^^^^^^^^^^^^^^^
+----------------------
-This is the fifth tracked release of Yardstick. It is based on following
+This is the sixth tracked release of Yardstick. It is based on following
upstream versions:
-- OpenStack Ocata
-
-- OpenDayLight Nitrogen
-
-- ONOS Junco
+- OpenStack Pike
+- OpenDayLight Oxygen
Document Version Changes
-^^^^^^^^^^^^^^^^^^^^^^^^
+------------------------
-This is the fifth tracked version of the Yardstick framework in OPNFV.
+This is the sixth tracked version of the Yardstick framework in OPNFV.
It includes the following documentation updates:
- Yardstick User Guide: add "network service benchmarking(NSB)" chapter;
add "Yardstick - NSB Testing -Installation" chapter; add "Yardstick API" chapter;
add "Yardstick user interface" chapter; Update Yardstick installation chapter;
-
- Yardstick Developer Guide
-
- Yardstick Release Notes for Yardstick: this document
Feature additions
-^^^^^^^^^^^^^^^^^
-
-- Yardstick RESTful API support
-
-- Network service benchmarking
-
-- Stress testing with Bottlenecks team
-
-- Yardstick framework improvement:
-
- - yardstick report CLI
-
- - Node context support OpenStack configuration via Ansible
-
- - Https support
+-----------------
- - Kubernetes context type
-
-- Yardstick container local GUI
-
-- Python 3 support
+- Plugin-based test cases support Heat context
+- SR-IOV support for the Heat context
+- Support using existing network in Heat context
+- Support running test cases with existing VNFs/without destroying VNF in Heat context
+- Add vFW scale-up template
+- Improvements of unit tests and gating
+- GUI improvement about passing parameters
Scenario Matrix
----------------
-
-For Euphrates 5.0.0, Yardstick was tested on the following scenarios:
-
-+--------------------------+------+---------+------+------+
-| Scenario | Apex | Compass | Fuel | Joid |
-+==========================+======+=========+======+======+
-| os-nosdn-nofeature-noha | | | X | X |
-+--------------------------+------+---------+------+------+
-| os-nosdn-nofeature-ha | X | X | X | X |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-nofeature-ha | | X | X | X |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-nofeature-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-odl_l3-nofeature-ha | X | X | X | |
-+--------------------------+------+---------+------+------+
-| os-odl_l3-nofeature-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-onos-sfc-ha | | | | |
-+--------------------------+------+---------+------+------+
-| os-onos-nofeature-ha | | X | | X |
-+--------------------------+------+---------+------+------+
-| os-onos-nofeature-noha | | | | |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-sfc-ha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-sfc-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-bgpvpn-ha | X | | X | |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-bgpvpn-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm-ha | X | | X | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-ovs-ha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-ovs-noha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-ocl-nofeature-ha | | X | | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-lxd-ha | | | | X |
-+--------------------------+------+---------+------+------+
-| os-nosdn-lxd-noha | | | | X |
-+--------------------------+------+---------+------+------+
-| os-nosdn-fdio-ha | X | | | |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-fdio-noha | X | | | |
-+--------------------------+------+---------+------+------+
-| os-odl-gluon-noha | X | | | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-openo-ha | | X | | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk | | | X | |
-| -noha | | | | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk-ha | | | X | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk | | | X | |
-| _bar-ha | | | | |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk | | | X | |
-| _bar-noha | | | | |
-+--------------------------+------+---------+------+------+
-| opnfv_os-ovn-nofeature- | X | | | |
-| noha_daily | | | | |
-+--------------------------+------+---------+------+------+
+===============
+
+For Fraser 6.0.0, Yardstick was tested on the following scenarios:
+
++-------------------------+------+---------+----------+------+------+-------+
+| Scenario | Apex | Compass | Fuel-arm | Fuel | Joid | Daisy |
++=========================+======+=========+==========+======+======+=======+
+| os-nosdn-nofeature-noha | X | X | | | X | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-nofeature-ha | X | X | X | X | X | X |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-noha | X | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-ha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-bgpvpn-ha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-calipso-noha | X | | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-kvm-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl_l3-nofeature-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-sfc-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-nofeature-ha | | | | X | | X |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-ovs-ha | | | | X | | |
++-------------------------+------+---------+----------+------+------+-------+
+| k8-nosdn-nofeature-ha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+| k8-nosdn-stor4nfv-noha | | X | | | | |
++-------------------------+------+---------+----------+------+------+-------+
+
Test results
-------------
+============
Test results are available in:
@@ -459,109 +365,107 @@ Test results are available in:
The reporting pages can be found at:
-+---------------+-------------------------------------------------------------------------------------+
-| apex | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-apex.html |
-+---------------+-------------------------------------------------------------------------------------+
-| compass | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-compass.html |
-+---------------+-------------------------------------------------------------------------------------+
-| fuel\@x86 | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@x86.html |
-+---------------+-------------------------------------------------------------------------------------+
-| fuel\@aarch64 | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@aarch64.html |
-+---------------+-------------------------------------------------------------------------------------+
-| joid | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-joid.html |
-+---------------+-------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------+
+| apex | http://testresults.opnfv.org/reporting/fraser/yardstick/status-apex.html |
++---------------+----------------------------------------------------------------------------------+
+| compass | http://testresults.opnfv.org/reporting/fraser/yardstick/status-compass.html |
++---------------+----------------------------------------------------------------------------------+
+| fuel\@x86 | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@x86.html |
++---------------+----------------------------------------------------------------------------------+
+| fuel\@aarch64 | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@aarch64.html |
++---------------+----------------------------------------------------------------------------------+
+| joid | http://testresults.opnfv.org/reporting/fraser/yardstick/status-joid.html |
++---------------+----------------------------------------------------------------------------------+
Known Issues/Faults
-^^^^^^^^^^^^^^^^^^^
+-------------------
Corrected Faults
-^^^^^^^^^^^^^^^^
+----------------
+
+Fraser 6.0.0:
+
++--------------------+--------------------------------------------------------------------------+
+| **JIRA REFERENCE** | **DESCRIPTION** |
++====================+==========================================================================+
+| YARDSTICK-831 | tc053 kill haproxy wrong |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-842 | load image fails when there's cirros image exist |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-857 | tc006 failed due to volume attached to different location "/dev/vdc" |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-874 | Specify supported architecture for Ubuntu backports repository |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-875 | Check if multiverse repository is available in Ubuntu |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-893 | Fix proxy env handling and ansible multinode support |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-899 | Variable local_iface_name is read before it is set |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-900 | Section in "upload_yardstick_image.yml" invalid |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-911 | Remove 'inconsistent-return-statements' from Pylint checks |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-989 | Yardstick real-time influxdb KPI reporting regressions |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-994 | NSB set-up build script for baremetal broken |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-996 | Error in address input format in "_ip_range_action_partial" |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1003 | Prox vnf descriptor cleanup for tg and vnf |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1006 | Ansible destroy script will fail if vm has already been undefined |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1012 | constants: fix pylint warnings for OSError |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1014 | Remove unused args in |
+| | network_services.traffic_profile.ixia_rfc2544.IXIARFC2544Profile |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1016 | Allow vm to access outside world through default gateway |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1019 | For 'qemu-img version 2.10.1' unit 'MB' is not acceptable ansible script |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1021 | NSB: All Sample VNF test cases timeout after 1 hour of execution |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1036 | Prox: Addition of storage of extra counters for Grafana |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1038 | Missing file which is described in the operation_conf.yaml |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1047 | Error in string format in HeatTemplateError message |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1056 | yardstick report command print error when run test case |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1059 | Reduce the log level if TRex client is no connected |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1073 | Error when retrieving "options" section in "scenario" |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1080 | Running Test Case in Latest Yardstick Docker Image shows Error |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1082 | tc043,tc055, tc063, tc075, pass wrong node name in the ci scenario yaml |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1102 | Don't hide exception traceback from Task.start() |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1107 | bad exception traceback print due to atexit_handler |
++--------------------+--------------------------------------------------------------------------+
+| YARDSTICK-1120 | HA test case tc050 should start monitor before attack |
++--------------------+--------------------------------------------------------------------------+
+
+Fraser 6.0.0 known restrictions/issues
+======================================
-Euphrates 5.1.0:
-
-+---------------------+-------------------------------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-841 | Fix various NSB license issues |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-73 | How To Work with Test Cases |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-500 | VNF testing documentation |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-826 | Allow overriding Heat IP addresses to match traffic generator profile |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-828 | Refactor doc/testing/user/userguide "Yardstick Installation" |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-830 | build_yardstick_image Ansible mount module doesn't work on Ubuntu 14.04 |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-833 | ansible_common transform password into lower case |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-847 | tc006, tc079, tc082 miss grafana dashboard in local deployment |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-849 | kill process do not accurately kill the process like "nova-api" |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-850 | tc023 miss description and tc050-58 wrong description |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-852 | tc078 cpu2006 fails in some situation |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-854 | yardstick docker lack of trex_client |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-867 | testcase tc078 have no data stored or dashboard to show results |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-871 | Remove img_modify_playbook assignation in build_yardstick_image.yml |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-829 | "nsb_setup.sh" doesn't parse the controller IP correctly |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-839 | NSB Prox BM test cases to be fixed for incorporating scale-up |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-840 | NSB Prox test documentation of vPE and LW-AFTR test cases |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-848 | NSB "Prox" : Cleanup duplicated traffic profile |
-+---------------------+-------------------------------------------------------------------------+
-
-
-
-
-Euphrates 5.0.0:
-
-+---------------------+--------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+---------------------+--------------------------------------------+
-| JIRA: YARDSTICK-599 | Could not load EntryPoint.parse when using |
-| | 'openstack -h' |
-+---------------------+--------------------------------------------+
-| JIRA: YARDSTICK-602 | Don't rely on staic ip addresses as they |
-| | are dynamic |
-+---------------------+--------------------------------------------+
-
-
-Euphratess 5.0.0 known restrictions/issues
-------------------------------------------
+-----------+-----------+----------------------------------------------+
| Installer | Scenario | Issue |
+===========+===========+==============================================+
-| any | \*-bgpvpn | Floating ips not supported. Some Test cases |
-| | | related to floating ips are excluded. |
-+-----------+-----------+----------------------------------------------+
-| any | odl_l3-\* | Some test cases related to using floating IP |
-| | | addresses fail because of a known ODL bug. |
-| | | |
-+-----------+-----------+----------------------------------------------+
-| compass | odl_l2-\* | In some test cases, VM instance will failed |
-| | | raising network interfaces. |
| | | |
+-----------+-----------+----------------------------------------------+
-
Useful links
-------------
+============
- wiki project page: https://wiki.opnfv.org/display/yardstick/Yardstick
- - wiki Yardstick Euphrates release planing page: https://wiki.opnfv.org/display/yardstick/Yardstick+Euphrates+Release+Planning
+ - wiki Yardstick Fraser release planing page: https://wiki.opnfv.org/display/yardstick/Release+Fraser
- Yardstick repo: https://git.opnfv.org/cgit/yardstick
diff --git a/docs/testing/developer/devguide/devguide.rst b/docs/testing/developer/devguide/devguide.rst
index dade49b75..04d5350be 100755
--- a/docs/testing/developer/devguide/devguide.rst
+++ b/docs/testing/developer/devguide/devguide.rst
@@ -432,7 +432,8 @@ Yardstick committers and contributors to review your codes.
:width: 800px
:alt: Gerrit for code review
-You can find Yardstick people info `here <https://wiki.opnfv.org/display/yardstick/People>`_.
+You can find a list Yardstick people `here <https://wiki.opnfv.org/display/yardstick/People>`_,
+or use the ``yardstick-reviewers`` and ``yardstick-committers`` groups in gerrit.
Modify the code under review in Gerrit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/testing/user/userguide/13-nsb-installation.rst b/docs/testing/user/userguide/13-nsb-installation.rst
index 1f6c79b0b..3e0ed0bfb 100644
--- a/docs/testing/user/userguide/13-nsb-installation.rst
+++ b/docs/testing/user/userguide/13-nsb-installation.rst
@@ -1058,40 +1058,8 @@ IxLoad
Config ``pod_ixia.yaml``
- .. code-block:: yaml
-
- nodes:
- -
- name: trafficgen_1
- role: IxNet
- ip: 1.2.1.1 #ixia machine ip
- user: user
- password: r00t
- key_filename: /root/.ssh/id_rsa
- tg_config:
- ixchassis: "1.2.1.7" #ixia chassis ip
- tcl_port: "8009" # tcl server port
- lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
- root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
- py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
- dut_result_dir: "/mnt/ixia"
- version: 8.1
- interfaces:
- xe0: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:5" # Card:port
- driver: "none"
- dpdk_port_num: 0
- local_ip: "152.16.100.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:10:64:14:00"
- xe1: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:6" # [(Card, port)]
- driver: "none"
- dpdk_port_num: 1
- local_ip: "152.40.40.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:28:28:14:00"
+ .. literalinclude:: code/pod_ixia.yaml
+ :language: console
for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
@@ -1113,10 +1081,10 @@ IxLoad
IxNetwork
---------
-1. Software needed: ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz``
- (Download from ixia support site)
- Install - ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz``
-2. Update pod_ixia.yaml file with ixia details.
+IxNetwork testcases use IxNetwork API Python Bindings module, which is
+installed as part of the requirements of the project.
+
+1. Update ``pod_ixia.yaml`` file with ixia details.
.. code-block:: console
@@ -1124,44 +1092,12 @@ IxNetwork
Config pod_ixia.yaml
- .. code-block:: yaml
-
- nodes:
- -
- name: trafficgen_1
- role: IxNet
- ip: 1.2.1.1 #ixia machine ip
- user: user
- password: r00t
- key_filename: /root/.ssh/id_rsa
- tg_config:
- ixchassis: "1.2.1.7" #ixia chassis ip
- tcl_port: "8009" # tcl server port
- lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
- root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
- py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
- dut_result_dir: "/mnt/ixia"
- version: 8.1
- interfaces:
- xe0: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:5" # Card:port
- driver: "none"
- dpdk_port_num: 0
- local_ip: "152.16.100.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:10:64:14:00"
- xe1: # logical name from topology.yaml and vnfd.yaml
- vpci: "2:6" # [(Card, port)]
- driver: "none"
- dpdk_port_num: 1
- local_ip: "152.40.40.20"
- netmask: "255.255.0.0"
- local_mac: "00:98:28:28:14:00"
+ .. literalinclude:: code/pod_ixia.yaml
+ :language: console
for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
-3. Start IxNetwork TCL Server
+2. Start IxNetwork TCL Server
You will also need to configure the IxNetwork machine to start the IXIA
IxNetworkTclServer. This can be started like so:
@@ -1170,6 +1106,5 @@ IxNetwork
``Start->Programs->Ixia->IxNetwork->IxNetwork 7.21.893.14 GA->IxNetworkTclServer``
(or ``IxNetworkApiServer``)
-4. Execute testcase in samplevnf folder e.g.
+3. Execute testcase in samplevnf folder e.g.
``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml``
-
diff --git a/docs/testing/user/userguide/code/pod_ixia.yaml b/docs/testing/user/userguide/code/pod_ixia.yaml
new file mode 100644
index 000000000..4ab56fe4e
--- /dev/null
+++ b/docs/testing/user/userguide/code/pod_ixia.yaml
@@ -0,0 +1,31 @@
+nodes:
+-
+ name: trafficgen_1
+ role: IxNet
+ ip: 1.2.1.1 #ixia machine ip
+ user: user
+ password: r00t
+ key_filename: /root/.ssh/id_rsa
+ tg_config:
+ ixchassis: "1.2.1.7" #ixia chassis ip
+ tcl_port: "8009" # tcl server port
+ lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
+ root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
+ py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
+ dut_result_dir: "/mnt/ixia"
+ version: 8.1
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "2:5" # Card:port
+ driver: "none"
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:10:64:14:00"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "2:6" # [(Card, port)]
+ driver: "none"
+ dpdk_port_num: 1
+ local_ip: "152.40.40.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:28:28:14:00"
diff --git a/etc/infra/infra_deploy_multi.yaml.sample b/etc/infra/infra_deploy_multi.yaml.sample
new file mode 100644
index 000000000..aa27b735a
--- /dev/null
+++ b/etc/infra/infra_deploy_multi.yaml.sample
@@ -0,0 +1,97 @@
+nodes:
+ - name: Deployment and Controller node number 1 VM
+ openstack_node: controller
+ hostname: control-01
+ interfaces:
+ - network: management
+ ip: 192.168.1.10
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.10
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_cntrl_1.img
+ disk: 13000
+ ram: 9000
+ vcpus: 4
+
+ - name: Controller node number 2 VM
+ openstack_node: controller
+ hostname: control-02
+ interfaces:
+ - network: management
+ ip: 192.168.1.11
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.11
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_cntrl_2.img
+ disk: 11000
+ ram: 6000
+ vcpus: 2
+
+ - name: Compute node number 1 VM
+ openstack_node: compute
+ hostname: compute-01
+ interfaces:
+ - network: management
+ ip: 192.168.1.12
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.12
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_comp_1.img
+ disk: 30000
+ ram: 16000
+ vcpus: 12
+
+ - name: Compute node number 2 VM
+ openstack_node: compute
+ hostname: compute-02
+ interfaces:
+ - network: management
+ ip: 192.168.1.13
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.13
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_comp_2.img
+ disk: 12000
+ ram: 6000
+ vcpus: 4
+
+ - name: Jump host
+ hostname: yardstickvm
+ interfaces:
+ - network: management
+ ip: 192.168.1.14
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.14
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_yardstick.img
+ disk: 28000
+ ram: 12000
+ vcpus: 4
+
+networks:
+ - name: management
+ default_gateway: True
+ host_ip: 192.168.1.1
+ netmask: 255.255.255.0
+
+ - name: traffic
+ default_gateway: False # This parameter is not mandatory, default value: False
+ host_ip: 192.20.1.1
+ netmask: 255.255.255.0
+ dhcp_ip_start: 192.20.1.200
+ dhcp_ip_stop: 192.20.1.250
diff --git a/etc/infra/infra_deploy.yaml.sample b/etc/infra/infra_deploy_one.yaml.sample
index 8ed793622..f8759d42e 100644
--- a/etc/infra/infra_deploy.yaml.sample
+++ b/etc/infra/infra_deploy_one.yaml.sample
@@ -1,32 +1,35 @@
nodes:
- - name: Yardstick VM
- hostname: yardstickvm
+ - name: Deployment, Controller and Compute single VM
+ openstack_node: controller # if no compute nodes are defined means a standalone deployment
+ hostname: allinone
interfaces:
- network: management
- ip: 192.168.1.10
+ ip: 192.168.1.21
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.21
netmask: 255.255.255.0
user: ubuntu
password: password
- image: /tmp/image1.qcow
- disk: 50000
- ram: 8192
- vcpus: 4
+ image: /tmp/image_one.img
+ disk: 22000
+ ram: 14000
+ vcpus: 12
- - name: Controller_Compute VM
- openstack_node: controller_compute
- hostname: controller_compute
+ - name: Jump host
+ hostname: yardstickvm
interfaces:
- network: management
- ip: 192.168.1.20
+ ip: 192.168.1.22
netmask: 255.255.255.0
- network: traffic
- ip: 192.20.1.20
+ ip: 192.20.1.22
netmask: 255.255.255.0
user: ubuntu
password: password
- image: /tmp/image_2.qcow
- disk: 40000
- ram: 32768
+ image: /tmp/image_yardstick.img
+ disk: 22000
+ ram: 10000
vcpus: 4
networks:
@@ -39,3 +42,5 @@ networks:
default_gateway: False # This parameter is not mandatory, default value: False
host_ip: 192.20.1.1
netmask: 255.255.255.0
+ dhcp_ip_start: 192.20.1.200
+ dhcp_ip_stop: 192.20.1.250
diff --git a/etc/infra/infra_deploy_two.yaml.sample b/etc/infra/infra_deploy_two.yaml.sample
new file mode 100644
index 000000000..a29f75453
--- /dev/null
+++ b/etc/infra/infra_deploy_two.yaml.sample
@@ -0,0 +1,63 @@
+nodes:
+ - name: Deployment and Controller node number 1 VM
+ openstack_node: controller
+ hostname: control-01
+ interfaces:
+ - network: management
+ ip: 192.168.1.118
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.118
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_cntrl_1.img
+ disk: 12000
+ ram: 10000
+ vcpus: 6
+
+ - name: Compute node number 1 VM
+ openstack_node: compute
+ hostname: compute-01
+ interfaces:
+ - network: management
+ ip: 192.168.1.119
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.119
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_comp_1.img
+ disk: 44000
+ ram: 30000
+ vcpus: 14
+
+ - name: Jump host
+ hostname: yardstickvm
+ interfaces:
+ - network: management
+ ip: 192.168.1.120
+ netmask: 255.255.255.0
+ - network: traffic
+ ip: 192.20.1.120
+ netmask: 255.255.255.0
+ user: ubuntu
+ password: password
+ image: /tmp/image_yardstick.img
+ disk: 22000
+ ram: 10000
+ vcpus: 4
+
+networks:
+ - name: management
+ default_gateway: True
+ host_ip: 192.168.1.1
+ netmask: 255.255.255.0
+
+ - name: traffic
+ default_gateway: False # This parameter is not mandatory, default value: False
+ host_ip: 192.20.1.1
+ netmask: 255.255.255.0
+ dhcp_ip_start: 192.20.1.200
+ dhcp_ip_stop: 192.20.1.250
diff --git a/etc/yardstick/nodes/apex_baremetal/pod.yaml b/etc/yardstick/nodes/apex_baremetal/pod.yaml
new file mode 100644
index 000000000..4b058c499
--- /dev/null
+++ b/etc/yardstick/nodes/apex_baremetal/pod.yaml
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: ip1
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node2
+ role: Controller
+ ip: ip2
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node3
+ role: Controller
+ ip: ip3
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node4
+ role: Compute
+ ip: ip4
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node5
+ role: Compute
+ ip: ip5
+ user: heat-admin
+ key_filename: node_keyfile
+-
+ name: node6
+ role: Opendaylight-Cluster-Leader
+ ip: ip6
+ user: heat-admin
+ key_filename: node_keyfile
diff --git a/etc/yardstick/nodes/apex_virtual/pod.yaml b/etc/yardstick/nodes/apex_virtual/pod.yaml
new file mode 100644
index 000000000..59b51d224
--- /dev/null
+++ b/etc/yardstick/nodes/apex_virtual/pod.yaml
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+nodes:
+-
+ name: node1
+ role: Controller
+ ip: 192.0.2.15
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node2
+ role: Controller
+ ip: 192.0.2.4
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node3
+ role: Controller
+ ip: 192.0.2.6
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node4
+ role: Compute
+ ip: 192.0.2.10
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
+-
+ name: node5
+ role: Compute
+ ip: 192.0.2.14
+ user: heat-admin
+ key_filename: /root/.ssh/id_rsa
diff --git a/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia b/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
index 57a83058e..1f755dc4e 100644
--- a/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
+++ b/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
@@ -26,7 +26,6 @@ nodes:
lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
- py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
dut_result_dir: "/mnt/ixia"
version: 8.1
interfaces:
diff --git a/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml b/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
index 7250c4ce3..ef63ea04c 100644
--- a/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
+++ b/etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
@@ -26,13 +26,12 @@ nodes:
user: {{gen.user}}
password: {{gen.password}}
key_filename: {{gen.key_filename}}
- tg_config:
+ tg_config:
ixchassis: "{{gen.tg_config.ixchassis}}" #ixia chassis ip
tcl_port: "{{gen.tg_config.tcl_port}}" # tcl server port
lib_path: "{{gen.tg_config.lib_path}}"
root_dir: "{{gen.tg_config.root_dir}}"
py_bin_path: "{{gen.tg_config.py_bin_path}}"
- py_lib_path: "{{gen.tg_config.py_lib_path}}"
dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
version: "{{gen.tg_config.version}}"
interfaces:
diff --git a/etc/yardstick/nodes/standalone/ixia_template.yaml b/etc/yardstick/nodes/standalone/ixia_template.yaml
index 617a65162..98ed8c5c2 100644
--- a/etc/yardstick/nodes/standalone/ixia_template.yaml
+++ b/etc/yardstick/nodes/standalone/ixia_template.yaml
@@ -32,7 +32,6 @@ nodes:
lib_path: "{{gen.tg_config.lib_path}}"
root_dir: "{{gen.tg_config.root_dir}}"
py_bin_path: "{{gen.tg_config.py_bin_path}}"
- py_lib_path: "{{gen.tg_config.py_lib_path}}"
dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
version: "{{gen.tg_config.version}}"
interfaces:
diff --git a/install.sh b/install.sh
index 04985f48a..74929345d 100755
--- a/install.sh
+++ b/install.sh
@@ -119,4 +119,4 @@ tar xvf ${NSB_DIR}/trex_client.tar.gz -C ${NSB_DIR}
rm -f ${NSB_DIR}/trex_client.tar.gz
service nginx restart
-uwsgi -i /etc/yardstick/yardstick.ini
+uwsgi -i /etc/yardstick/yardstick.ini \ No newline at end of file
diff --git a/nsb_setup.sh b/nsb_setup.sh
index 50fc017d1..86796c4d4 100755
--- a/nsb_setup.sh
+++ b/nsb_setup.sh
@@ -67,8 +67,16 @@ pip install ansible==2.4.2 shade==1.22.2 docker-py==1.10.6
ANSIBLE_SCRIPTS="ansible"
+if [[ -n ${1} ]]; then
+ yardstick_docker_image="-e yardstick_docker_image=${1}"
+else
+ yardstick_docker_image=""
+fi
+
+# no quotes for yardstick_docker_image so when empty it is removed as whitespace
cd ${ANSIBLE_SCRIPTS} &&\
ansible-playbook \
-e img_property="nsb" \
+ ${yardstick_docker_image} \
-e YARD_IMG_ARCH='amd64' ${extra_args}\
-i yardstick-install-inventory.ini nsb_setup.yml
diff --git a/requirements.txt b/requirements.txt
index 43f0e796c..4679bc8fb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -26,6 +26,7 @@ flask==0.11.1 # BSD; OSI Approved BSD License
functools32==3.2.3.post2; python_version <= "2.7" # PSF license
futures==3.1.1;python_version=='2.7' # BSD; OSI Approved BSD License
influxdb==4.1.1 # MIT License; OSI Approved MIT License
+IxNetwork==8.40.1124.9 # MIT License; OSI Approved MIT License
jinja2schema==0.1.4 # OSI Approved BSD License
keystoneauth1==3.1.0 # OSI Approved Apache Software License
kubernetes==3.0.0a1 # OSI Approved Apache Software License
diff --git a/samples/test_suite.yaml b/samples/test_suite.yaml
index 9a766b06a..6f5f53b46 100644
--- a/samples/test_suite.yaml
+++ b/samples/test_suite.yaml
@@ -20,7 +20,8 @@ test_cases:
file_name: ping.yaml
-
file_name: ping-template.yaml
- task_args: '{"packetsize": "200"}'
+ task_args:
+ default: '{"packetsize": "200"}'
-
file_name: ping-template.yaml
task_args_file: "/tmp/test-args-file.json"
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
index 192f2f89a..ba0055321 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-2.cfg
@@ -49,7 +49,7 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 2]
@@ -59,7 +59,7 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 3]
@@ -67,12 +67,10 @@ name=rec 0
task=0
mode=lat
rx port=p0
-lat pos=38
[core 4]
name=rec 0
task=0
mode=lat
rx port=p1
-lat pos=38
diff --git a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
index 0db21b681..41c31bf82 100644
--- a/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
+++ b/samples/vnf_samples/nsut/prox/configs/gen_l2fwd-4.cfg
@@ -61,7 +61,7 @@ mode=gen
tx port=p0
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac0} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 2]
@@ -71,7 +71,7 @@ mode=gen
tx port=p1
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac1} 3c fd fe 9f a3 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 3]
@@ -81,7 +81,7 @@ mode=gen
tx port=p2
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac2} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac2} 3c fd fe 9f a5 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 4]
@@ -91,7 +91,7 @@ mode=gen
tx port=p3
bps=1250000000
; Ethernet + IP + UDP
-pkt inline=${sut_mac3} 3c fd fe 9f a5 08 08 00 45 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
+pkt inline=${sut_mac3} 3c fd fe 9f a5 08 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b
lat pos=38
[core 5]
@@ -99,25 +99,22 @@ name=rec 0
task=0
mode=lat
rx port=p0
-lat pos=38
[core 6]
name=rec 1
task=0
mode=lat
rx port=p1
-lat pos=38
[core 7]
name=rec 2
task=0
mode=lat
rx port=p2
-lat pos=38
[core 8]
name=rec 3
task=0
mode=lat
rx port=p3
-lat pos=38 \ No newline at end of file
+
diff --git a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
index 9b2a152f3..aca7c2102 100644
--- a/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
+++ b/samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
py_bin_path: '{{tg_config.py_bin_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
diff --git a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
index ad4953fce..0324bb8fd 100644
--- a/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
+++ b/samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
py_bin_path: '{{tg_config.py_bin_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
diff --git a/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml b/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
index ffbfbdec0..def8cdcef 100644
--- a/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
+++ b/samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
@@ -28,7 +28,6 @@ vnfd:vnfd-catalog:
tcl_port: '{{tg_config.tcl_port}}' # tcl server port
lib_path: '{{tg_config.lib_path}}'
root_dir: '{{tg_config.root_dir}}'
- py_lib_path: '{{tg_config.py_lib_path}}'
dut_result_dir: '{{tg_config.dut_result_dir}}'
version: '{{tg_config.version}}'
vdu:
diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh
index d7c60d48f..8b9f887b2 100755
--- a/tests/ci/prepare_env.sh
+++ b/tests/ci/prepare_env.sh
@@ -16,6 +16,7 @@
: ${EXTERNAL_NETWORK:='admin_floating_net'}
: ${USER_NAME:='ubuntu'}
: ${SSH_KEY:='/root/.ssh/id_rsa'}
+: ${DEPLOY_SCENARIO:='unknown'}
# Extract network name from EXTERNAL_NETWORK
# e.g. EXTERNAL_NETWORK='ext-net;flat;192.168.0.2;192.168.0.253;192.168.0.1;192.168.0.0/24'
@@ -62,7 +63,73 @@ verify_connectivity() {
}
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+ # check the connection
+ verify_connectivity "${INSTALLER_IP}"
+
+ pod_yaml="$YARDSTICK_REPO_DIR/etc/yardstick/nodes/apex_baremetal/pod.yaml"
+
+ # update "ip" according to the CI env
+ ssh -l root "${INSTALLER_IP}" -i ${SSH_KEY} ${ssh_options} \
+ "source /home/stack/stackrc && openstack server list -f yaml" > node_info
+
+ controller_ips=($(awk '/control/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'))
+ compute_ips=($(awk '/compute/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'))
+ odl_ip=""
+ # Get ODL's cluster default module-shard Leader IP in HA scenario
+ if [[ ${DEPLOY_SCENARIO} == os-odl-*-ha ]]; then
+ for ip in "${controller_ips[@]}";
+ do
+ if [[ "$odl_ip" ]]; then
+ break
+ fi
+ for ((i=0; i<${#controller_ips[@]}; i++));
+ do
+ ODL_STATE=$(curl -s -u admin:admin -H "Accept: application/json" -H "Content-Type: application/json" \
+ "http://"${ip}":8081/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-"${i}"-shard-default-operational,type=DistributedOperationalDatastore" \
+ | grep -o \"RaftState\"\:\"Leader\" | tr ":" "\n" | sed -n '2p' | sed 's/\"//g');
+
+ if [[ ${ODL_STATE} == "Leader" ]]; then
+ odl_ip=${ip}
+ break
+ fi
+ done;
+ done
+
+ if [[ -z "$odl_ip" ]]; then
+ echo "ERROR: Opendaylight Leader IP is emtpy"
+ exit 1
+ fi
+
+ elif [[ ${DEPLOY_SCENARIO} == *"odl"* ]]; then
+ odl_ip=${controller_ips[0]}
+ fi
+
+ if [[ ${controller_ips[0]} ]]; then
+ sed -i "s|ip1|${controller_ips[0]}|" "${pod_yaml}"
+ fi
+ if [[ ${controller_ips[1]} ]]; then
+ sed -i "s|ip2|${controller_ips[1]}|" "${pod_yaml}"
+ fi
+ if [[ ${controller_ips[2]} ]]; then
+ sed -i "s|ip3|${controller_ips[2]}|" "${pod_yaml}"
+ fi
+ if [[ ${compute_ips[0]} ]]; then
+ sed -i "s|ip4|${compute_ips[0]}|" "${pod_yaml}"
+ fi
+ if [[ ${compute_ips[1]} ]]; then
+ sed -i "s|ip5|${compute_ips[1]}|" "${pod_yaml}"
+ fi
+ if [[ ${odl_ip} ]]; then
+ sed -i "s|ip6|${odl_ip}|" "${pod_yaml}"
+ fi
+
+
+ # update 'key_filename' according to the CI env
+ sed -i "s|node_keyfile|${SSH_KEY}|" "${pod_yaml}"
+
+fi
if [ "$INSTALLER_TYPE" == "fuel" ]; then
# check the connection
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
new file mode 100644
index 000000000..85ec510df
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml
@@ -0,0 +1,276 @@
+##############################################################################
+## Copyright (c) 2018 Intracom Telecom and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC092 config file;
+ SDN Controller resilience in HA configuration
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or 'node6' %}
+
+scenarios:
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+ steps:
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ process_name: "opendaylight"
+ key: "kill-process"
+ host: {{attack_host}}
+
+ monitors:
+ - monitor_type: "process"
+ process_name: "opendaylight"
+ host: {{attack_host}}
+ key: "monitor-recovery"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "start-service"
+ host: {{attack_host}}
+ operation_key: "start-service"
+ action_parameter:
+ service: "opendaylight"
+ rollback_parameter:
+ service: "opendaylight"
+
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+
+ steps:
+
+ - actionKey: "monitor-recovery"
+ actionType: "monitor"
+ index: 1
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 3
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 4
+
+ - actionKey: "kill-process"
+ actionType: "attacker"
+ index: 5
+
+ - actionKey: "start-service"
+ actionType: "operation"
+ index: 6
+
+
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 80
+ host: athena
+ sla:
+ max_outage_time: 40
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "hermes"
+ return_parameter:
+ all: "@private_ip"
+
+ - operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: "hermes yardstick-image yardstick-flavor test_one"
+ rollback_parameter:
+ serverconfig: "hermes"
+
+ - operation_type: "general-operation"
+ key: "add-server-to-secgroup"
+ operation_key: "add-server-to-secgroup"
+ action_parameter:
+ serverconfig: "hermes ODLHA1"
+ rollback_parameter:
+ serverconfig: "hermes ODLHA1"
+
+
+ steps:
+ - actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "add-server-to-secgroup"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 3
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 4
+
+ nodes:
+ {{attack_host}}: {{attack_host}}.LF
+ athena: athena.ODLHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+contexts:
+ -
+ type: Node
+ name: LF
+ file: {{file}}
+ -
+ name: ODLHA1
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+ host: athena
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ ares:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ networks:
+ test_one:
+ cidr: '10.0.1.0/24'
+ router: 'test_router'
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
new file mode 100644
index 000000000..a034471aa
--- /dev/null
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml
@@ -0,0 +1,313 @@
+##############################################################################
+## Copyright (c) 2018 Intracom Telecom and others.
+##
+## All rights reserved. This program and the accompanying materials
+## are made available under the terms of the Apache License, Version 2.0
+## which accompanies this distribution, and is available at
+## http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+ Yardstick TC093 config file;
+ SDN Vswitch resilience in non-HA or HA configuration
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host_cmp_one = attack_host_cmp_one or 'node4' %}
+{% set attack_host_cmp_two = attack_host_cmp_two or 'node5' %}
+{% set systemd_service_name = systemd_service_name or 'openvswitch-switch'%}
+
+scenarios:
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 50
+ host: athena
+ sla:
+ max_outage_time: 0
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+ steps:
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+
+ nodes:
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+-
+ type: "GeneralHA"
+ options:
+ attackers:
+ -
+ fault_type: "kill-process"
+ process_name: "openvswitch"
+ key: "kill-process-cmp-one"
+ host: {{attack_host_cmp_one}}
+
+ -
+ fault_type: "kill-process"
+ process_name: "openvswitch"
+ key: "kill-process-cmp-two"
+ host: {{attack_host_cmp_two}}
+
+ monitors:
+ - monitor_type: "process"
+ process_name: "openvswitch"
+ host: {{attack_host_cmp_one}}
+ key: "monitor-recovery-cmp-one"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+ - monitor_type: "process"
+ process_name: "openvswitch"
+ host: {{attack_host_cmp_two}}
+ key: "monitor-recovery-cmp-two"
+ monitor_time: 50
+ sla:
+ max_recover_time: 30
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "snat"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 20
+ parameter:
+ destination_ip: "8.8.8.8"
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 70
+ host: athena
+ sla:
+ max_outage_time: 20
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "restart-service-cmp-one"
+ host: {{attack_host_cmp_one}}
+ operation_key: "start-service"
+ action_parameter:
+ service: {{systemd_service_name ~ " restart"}}
+ rollback_parameter:
+ service: "openvswitch"
+
+ - operation_type: "general-operation"
+ key: "restart-service-cmp-two"
+ host: {{attack_host_cmp_two}}
+ operation_key: "start-service"
+ action_parameter:
+ service: {{systemd_service_name ~ " restart"}}
+ rollback_parameter:
+ service: "openvswitch"
+
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "ares"
+ return_parameter:
+ all: "@private_ip"
+
+
+
+ steps:
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 2
+
+ - actionKey: "snat"
+ actionType: "monitor"
+ index: 3
+
+ - actionKey: "kill-process-cmp-one"
+ actionType: "attacker"
+ index: 4
+
+ - actionKey: "kill-process-cmp-two"
+ actionType: "attacker"
+ index: 5
+
+ - actionKey: "monitor-recovery-cmp-one"
+ actionType: "monitor"
+ index: 6
+
+ - actionKey: "monitor-recovery-cmp-two"
+ actionType: "monitor"
+ index: 7
+
+
+ - actionKey: "restart-service-cmp-one"
+ actionType: "operation"
+ index: 8
+
+ - actionKey: "restart-service-cmp-two"
+ actionType: "operation"
+ index: 9
+
+
+ nodes:
+ {{attack_host_cmp_one}}: {{attack_host_cmp_one}}.LF
+ {{attack_host_cmp_two}}: {{attack_host_cmp_two}}.LF
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+-
+ type: "GeneralHA"
+ options:
+ monitors:
+
+ - monitor_type: "general-monitor"
+ monitor_key: "ip-status"
+ key: "l2"
+ monitor_time: 80
+ host: athena
+ sla:
+ max_outage_time: 40
+ parameter:
+ destination_ip: "@private_ip"
+
+ operations:
+ - operation_type: "general-operation"
+ key: "get-privateip"
+ operation_key: "get-privateip"
+ action_parameter:
+ server_name: "hermes"
+ return_parameter:
+ all: "@private_ip"
+
+ - operation_type: "general-operation"
+ key: "nova-create-instance"
+ operation_key: "nova-create-instance"
+ action_parameter:
+ serverconfig: "hermes yardstick-image yardstick-flavor test_one"
+ rollback_parameter:
+ serverconfig: "hermes"
+
+ - operation_type: "general-operation"
+ key: "add-server-to-secgroup"
+ operation_key: "add-server-to-secgroup"
+ action_parameter:
+ serverconfig: "hermes ODLnoHA1"
+ rollback_parameter:
+ serverconfig: "hermes ODLnoHA1"
+
+
+ steps:
+ - actionKey: "nova-create-instance"
+ actionType: "operation"
+ index: 1
+
+ - actionKey: "add-server-to-secgroup"
+ actionType: "operation"
+ index: 2
+
+ - actionKey: "get-privateip"
+ actionType: "operation"
+ index: 3
+
+ - actionKey: "l2"
+ actionType: "monitor"
+ index: 4
+
+ nodes:
+ athena: athena.ODLnoHA1
+ runner:
+ type: Duration
+ duration: 1
+ sla:
+ action: monitor
+
+
+contexts:
+ -
+ type: Node
+ name: LF
+ file: {{file}}
+ -
+ name: ODLnoHA1
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+ host: athena
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+ servers:
+ athena:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ ares:
+ floating_ip: true
+ placement: "pgrp1"
+ network_ports:
+ test_one:
+ - ens0
+
+ networks:
+ test_one:
+ cidr: '10.0.1.0/24'
+ router: 'test_router'
+
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
index 13cc710f3..f174a90e4 100644
--- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml
@@ -62,3 +62,18 @@ test_cases:
task_args:
huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
"host": "node1"}'
+-
+ file_name: opnfv_yardstick_tc092.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host": "node6"}'
+-
+ file_name: opnfv_yardstick_tc093.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5",
+ "systemd_service_name": "openvswitch"}'
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
index 42a170a89..feb8a6631 100644
--- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml
@@ -61,3 +61,11 @@ test_cases:
task_args:
default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml",
"attack_host": "node6"}'
+-
+ file_name: opnfv_yardstick_tc093.yaml
+ constraint:
+ installer: apex
+ task_args:
+ default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml",
+ "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5",
+ "systemd_service_name": "openvswitch"}'
diff --git a/tests/unit/network_services/helpers/test_samplevnf_helper.py b/tests/unit/network_services/helpers/test_samplevnf_helper.py
index 3b6c89d3a..dc74b1859 100644
--- a/tests/unit/network_services/helpers/test_samplevnf_helper.py
+++ b/tests/unit/network_services/helpers/test_samplevnf_helper.py
@@ -143,8 +143,6 @@ class TestMultiPortConfig(unittest.TestCase):
def setUp(self):
self._mock_open = mock.patch.object(six.moves.builtins, 'open')
self.mock_open = self._mock_open.start()
- self._mock_os = mock.patch.object(os, 'path')
- self.mock_os = self._mock_os.start()
self._mock_config_parser = mock.patch.object(
samplevnf_helper, 'ConfigParser')
self.mock_config_parser = self._mock_config_parser.start()
@@ -153,7 +151,6 @@ class TestMultiPortConfig(unittest.TestCase):
def _cleanup(self):
self._mock_open.stop()
- self._mock_os.stop()
self._mock_config_parser.stop()
def test_validate_ip_and_prefixlen(self):
@@ -185,7 +182,8 @@ class TestMultiPortConfig(unittest.TestCase):
samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
'::1', '129')
- def test___init__(self):
+ @mock.patch.object(os.path, 'isfile', return_value=False)
+ def test___init__(self, *args):
topology_file = mock.Mock()
config_tpl = mock.Mock()
tmp_file = mock.Mock()
@@ -193,8 +191,6 @@ class TestMultiPortConfig(unittest.TestCase):
opnfv_vnf = samplevnf_helper.MultiPortConfig(
topology_file, config_tpl, tmp_file, vnfd_mock)
self.assertEqual(0, opnfv_vnf.swq)
- self.mock_os.path = mock.MagicMock()
- self.mock_os.path.isfile = mock.Mock(return_value=False)
opnfv_vnf = samplevnf_helper.MultiPortConfig(
topology_file, config_tpl, tmp_file, vnfd_mock)
self.assertEqual(0, opnfv_vnf.swq)
diff --git a/tests/unit/network_services/nfvi/test_resource.py b/tests/unit/network_services/nfvi/test_resource.py
index f5f7f0fe7..9f337c673 100644
--- a/tests/unit/network_services/nfvi/test_resource.py
+++ b/tests/unit/network_services/nfvi/test_resource.py
@@ -19,7 +19,8 @@ import unittest
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.nfvi import resource, collectd
-
+from yardstick.common.exceptions import ResourceCommandError
+from yardstick import ssh
class TestResourceProfile(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
@@ -128,8 +129,31 @@ class TestResourceProfile(unittest.TestCase):
self.assertEqual(val, ('error', 'Invalid', '', ''))
def test__start_collectd(self):
- self.assertIsNone(
- self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin"))
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ ssh_mock.execute = mock.Mock(side_effect=ssh.SSHError)
+ with self.assertRaises(ssh.SSHError):
+ self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin")
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ def test__start_rabbitmq(self):
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", ""))
+ self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock))
+
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ with self.assertRaises(ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ with self.assertRaises(ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
def test__prepare_collectd_conf(self):
self.assertIsNone(
@@ -154,11 +178,12 @@ class TestResourceProfile(unittest.TestCase):
def test_initiate_systemagent(self):
self.resource_profile._start_collectd = mock.Mock()
+ self.resource_profile._start_rabbitmq = mock.Mock()
self.assertIsNone(
self.resource_profile.initiate_systemagent("/opt/nsb_bin"))
def test_initiate_systemagent_raise(self):
- self.resource_profile._start_collectd = mock.Mock(side_effect=RuntimeError)
+ self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError)
with self.assertRaises(RuntimeError):
self.resource_profile.initiate_systemagent("/opt/nsb_bin")
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_base.py b/tests/unit/network_services/vnf_generic/vnf/test_base.py
index 664373f8f..9ef6473f0 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_base.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_base.py
@@ -215,9 +215,11 @@ class TestGenericVNF(unittest.TestCase):
with self.assertRaises(TypeError) as exc:
# pylint: disable=abstract-class-instantiated
base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0])
- msg = ("Can't instantiate abstract class GenericVNF with abstract "
- "methods collect_kpi, instantiate, scale, terminate, "
- "wait_for_instantiate")
+
+ msg = ("Can't instantiate abstract class GenericVNF with abstract methods "
+ "collect_kpi, instantiate, scale, start_collect, "
+ "stop_collect, terminate, wait_for_instantiate")
+
self.assertEqual(msg, str(exc.exception))
diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
index 26bd1dadd..38a043d00 100644
--- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
+++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -1661,42 +1661,6 @@ class TestSampleVnf(unittest.TestCase):
# test the default resource helper is MyResourceHelper, not subclass
self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper)
- def test__get_port0localip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:6414'
- result = sample_vnf._get_port0localip6()
- self.assertEqual(result, expected)
-
- def test__get_port1localip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:2814'
- result = sample_vnf._get_port1localip6()
- self.assertEqual(result, expected)
-
- def test__get_port0prefixip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '112'
- result = sample_vnf._get_port0prefixlen6()
- self.assertEqual(result, expected)
-
- def test__get_port1prefixip6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '112'
- result = sample_vnf._get_port1prefixlen6()
- self.assertEqual(result, expected)
-
- def test__get_port0gateway6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:6414'
- result = sample_vnf._get_port0gateway6()
- self.assertEqual(result, expected)
-
- def test__get_port1gateway6(self):
- sample_vnf = SampleVNF('vnf1', self.VNFD_0)
- expected = '0064:ff9b:0:0:0:0:9810:2814'
- result = sample_vnf._get_port1gateway6()
- self.assertEqual(result, expected)
-
@mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process')
def test__start_vnf(self, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
@@ -1750,6 +1714,64 @@ class TestSampleVnf(unittest.TestCase):
self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {}))
self.assertEqual(sample_vnf.nfvi_context, context2)
+ def test__update_collectd_options(self):
+ scenario_cfg = {'options':
+ {'collectd':
+ {'interval': 3,
+ 'plugins':
+ {'plugin3': {'param': 3}}},
+ 'vnf__0':
+ {'collectd':
+ {'interval': 2,
+ 'plugins':
+ {'plugin3': {'param': 2},
+ 'plugin2': {'param': 2}}}}}}
+ context_cfg = {'nodes':
+ {'vnf__0':
+ {'collectd':
+ {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}}}}
+ expected = {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf__0', vnfd)
+ sample_vnf._update_collectd_options(scenario_cfg, context_cfg)
+ self.assertEqual(sample_vnf.setup_helper.collectd_options, expected)
+
+ def test__update_options(self):
+ options1 = {'interval': 1,
+ 'param1': 'value1',
+ 'plugins':
+ {'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+ options2 = {'interval': 2,
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin2': {'param': 2},
+ 'plugin1': {'param': 2}}}
+ expected = {'interval': 1,
+ 'param1': 'value1',
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf._update_options(options2, options1)
+ self.assertEqual(options2, expected)
+
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
@mock.patch("yardstick.ssh.SSH")
def test_wait_for_instantiate_empty_queue(self, ssh, *args):
@@ -1785,16 +1807,6 @@ class TestSampleVnf(unittest.TestCase):
self.assertEqual(sample_vnf.wait_for_instantiate(), 0)
- def test__build_ports(self):
- vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
- sample_vnf = SampleVNF('vnf1', vnfd)
-
- self.assertIsNone(sample_vnf._build_ports())
- self.assertIsNotNone(sample_vnf.networks)
- self.assertIsNotNone(sample_vnf.uplink_ports)
- self.assertIsNotNone(sample_vnf.downlink_ports)
- self.assertIsNotNone(sample_vnf.my_ports)
-
@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
def test_vnf_execute_with_queue_data(self, *args):
queue_size_list = [
diff --git a/tools/virt_ci_rampup.sh b/tools/virt_ci_rampup.sh
index 6a9f2e7cb..aaf162cf7 100755
--- a/tools/virt_ci_rampup.sh
+++ b/tools/virt_ci_rampup.sh
@@ -13,9 +13,33 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+env_http_proxy=$(sed -ne "s/^http_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${http_proxy} ]] && [[ ! -z ${env_http_proxy} ]]; then
+ export http_proxy=${env_http_proxy}
+fi
+env_https_proxy=$(sed -ne "s/^https_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${https_proxy} ]] && [[ ! -z ${env_https_proxy} ]]; then
+ export https_proxy=${env_https_proxy}
+fi
+env_ftp_proxy=$(sed -ne "s/^ftp_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${ftp_proxy} ]] && [[ ! -z ${env_ftp_proxy} ]]; then
+ export ftp_proxy=${env_ftp_proxy}
+fi
+if [[ ! -z ${http_proxy} ]] || [[ ! -z ${https_proxy} ]]; then
+ export no_proxy="${no_proxy}"
+ extra_args="${extra_args} -e @/tmp/proxy.yml "
+ cat <<EOF > /tmp/proxy.yml
+---
+proxy_env:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ ftp_proxy: ${ftp_proxy}
+ no_proxy: ${no_proxy}
+EOF
+fi
ANSIBLE_SCRIPTS="${0%/*}/../ansible"
-cd ${ANSIBLE_SCRIPTS} &&\
+cd ${ANSIBLE_SCRIPTS} && \
sudo -EH ansible-playbook \
- -e rs_file='../etc/infra/infra_deploy.yaml' \
+ -e RS_FILE='../etc/infra/infra_deploy_two.yaml' -e CLEAN_UP=False ${extra_args} \
-i inventory.ini infra_deploy.yml
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index ae8319e37..692c16892 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -6,17 +6,20 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
import abc
import six
-import yardstick.common.utils as utils
+from yardstick.common import constants
+from yardstick.common import utils
class Flags(object):
"""Class to represent the status of the flags in a context"""
_FLAGS = {'no_setup': False,
- 'no_teardown': False}
+ 'no_teardown': False,
+ 'os_cloud_config': constants.OS_CLOUD_DEFAULT_CONFIG}
def __init__(self, **kwargs):
for name, value in self._FLAGS.items():
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 0d1dfb86f..82861829e 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -7,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-from __future__ import print_function
-
import collections
import logging
import os
@@ -328,8 +325,10 @@ class HeatContext(Context):
if not os.path.exists(self.key_filename):
SSH.gen_keys(self.key_filename)
- heat_template = HeatTemplate(self.name, self.template_file,
- self.heat_parameters)
+ heat_template = HeatTemplate(
+ self.name, template_file=self.template_file,
+ heat_parameters=self.heat_parameters,
+ os_cloud_config=self._flags.os_cloud_config)
if self.template_file is None:
self._add_resources_to_template(heat_template)
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 955b8cae2..697cc007f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -112,9 +112,9 @@ class Task(object): # pragma: no cover
continue
try:
- data = self._run(tasks[i]['scenarios'],
- tasks[i]['run_in_parallel'],
- output_config)
+ success, data = self._run(tasks[i]['scenarios'],
+ tasks[i]['run_in_parallel'],
+ output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
@@ -123,9 +123,15 @@ class Task(object): # pragma: no cover
testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
'tc_data': []}
else:
- LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
- testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
- 'tc_data': data}
+ if success:
+ LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+ testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+ 'tc_data': data}
+ else:
+ LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+ exc_info=True)
+ testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+ 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -240,6 +246,7 @@ class Task(object): # pragma: no cover
background_runners = []
+ task_success = True
result = []
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
@@ -258,8 +265,8 @@ class Task(object): # pragma: no cover
for runner in runners:
status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
else:
# run serially
@@ -271,8 +278,8 @@ class Task(object): # pragma: no cover
LOG.error('Scenario NO.%s: "%s" ERROR!',
scenarios.index(scenario) + 1,
scenario.get('type'))
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
# Abort background runners
@@ -289,7 +296,7 @@ class Task(object): # pragma: no cover
base_runner.Runner.release(runner)
print("Background task ended")
- return result
+ return task_success, result
def atexit_handler(self):
"""handler for process termination"""
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index cb171eafa..7f1136c08 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker):
def check(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, stderr = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
if stdout:
- LOG.info("check the environment success!")
+ LOG.info("Check the environment success!")
return int(stdout.strip('\n'))
else:
- LOG.error(
- "the host environment is error, stdout:%s, stderr:%s",
- stdout, stderr)
+ LOG.error("Error checking the host environment, "
+ "stdout:%s, stderr:%s", stdout, stderr)
return False
def inject_fault(self):
with open(self.inject_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
def recover(self):
with open(self.recovery_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ exit_status, _, _ = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
if exit_status:
- LOG.info("Fail to restart service!")
+ LOG.info("Failed to restart service: %s", self.recovery_script)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d03d04420..d67a16b98 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -71,7 +71,7 @@ class BaseAttacker(object):
for attacker_cls in utils.itersubclasses(BaseAttacker):
if attacker_type == attacker_cls.__attacker_type__:
return attacker_cls
- raise RuntimeError("No such runner_type %s" % attacker_type)
+ raise RuntimeError("No such runner_type: %s" % attacker_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(attacker_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 71690c135..6cc0cb286 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -40,7 +40,7 @@ class Director(object):
nodes = self.context_cfg.get("nodes", None)
# setup attackers
if "attackers" in self.scenario_cfg["options"]:
- LOG.debug("start init attackers...")
+ LOG.debug("Start init attackers...")
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
self.attackerMgr = baseattacker.AttackerMgr()
self.data = self.attackerMgr.init_attackers(attacker_cfgs,
@@ -48,19 +48,19 @@ class Director(object):
# setup monitors
if "monitors" in self.scenario_cfg["options"]:
- LOG.debug("start init monitors...")
+ LOG.debug("Start init monitors...")
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
# setup operations
if "operations" in self.scenario_cfg["options"]:
- LOG.debug("start init operations...")
+ LOG.debug("Start init operations...")
operation_cfgs = self.scenario_cfg["options"]["operations"]
self.operationMgr = baseoperation.OperationMgr()
self.operationMgr.init_operations(operation_cfgs, nodes)
# setup result checker
if "resultCheckers" in self.scenario_cfg["options"]:
- LOG.debug("start init resultCheckers...")
+ LOG.debug("Start init resultCheckers...")
result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
@@ -69,7 +69,7 @@ class Director(object):
if intermediate_variables is None:
intermediate_variables = {}
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables)
if type == ActionType.MONITOR:
@@ -80,17 +80,17 @@ class Director(object):
if type == ActionType.OPERATION:
return actionplayers.OperationPlayer(self.operationMgr[key],
intermediate_variables)
- LOG.debug("something run when creatactionplayer")
+ LOG.debug("The type is not recognized by createActionPlayer")
def createActionRollbacker(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
if type == ActionType.OPERATION:
return actionrollbackers.OperationRollbacker(
self.operationMgr[key])
- LOG.debug("no rollbacker created for %s", key)
+ LOG.debug("No rollbacker created for key: %s", key)
def verify(self):
result = True
@@ -99,7 +99,7 @@ class Director(object):
if hasattr(self, 'resultCheckerMgr'):
result &= self.resultCheckerMgr.verify()
if result:
- LOG.debug("monitors are passed")
+ LOG.debug("Monitor results are passed")
return result
def stopMonitors(self):
@@ -107,12 +107,12 @@ class Director(object):
self.monitorMgr.wait_monitors()
def knockoff(self):
- LOG.debug("knock off ....")
+ LOG.debug("Knock off ....")
while self.executionSteps:
singleStep = self.executionSteps.pop()
singleStep.rollback()
def store_result(self, result):
- LOG.debug("store result ....")
+ LOG.debug("Store result ....")
if hasattr(self, 'monitorMgr'):
self.monitorMgr.store_result(result)
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index 858d86ca0..2388507d7 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
@@ -9,24 +9,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Start a service and check the service is started
+# Start or restart a service and check the service is started
set -e
service_name=$1
+operation=${2-start} # values are "start" or "restart"
-Distributor=$(lsb_release -a | grep "Distributor ID" | awk '{print $3}')
-
-if [ "$Distributor" != "Ubuntu" -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" ]; then
+if [ -f /usr/bin/yum -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" -a "$service_name" != "openvswitch" ]; then
service_name="openstack-"${service_name}
-elif [ "$Distributor" = "Ubuntu" -a "$service_name" = "keystone" ]; then
+elif [ -f /usr/bin/apt -a "$service_name" = "keystone" ]; then
service_name="apache2"
elif [ "$service_name" = "keystone" ]; then
service_name="httpd"
fi
if which systemctl 2>/dev/null; then
- systemctl start $service_name
+ systemctl $operation $service_name
else
- service $service_name start
+ service $service_name $operation
fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 50a63f53d..f6004c774 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process):
for monitor in utils.itersubclasses(BaseMonitor):
if monitor_type == monitor.__monitor_type__:
return monitor
- raise RuntimeError("No such monitor_type %s" % monitor_type)
+ raise RuntimeError("No such monitor_type: %s" % monitor_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(monitor_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index d0551bf03..3b36c762d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
output = []
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host,
defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
@@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self.cmd + " --insecure"
def monitor_func(self):
- exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
- LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout)
+ LOG.debug("Executed command '%s'. "
+ "The stdout is:\n%s", self.cmd, stdout)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- LOG.debug("the _result:%s", self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -97,7 +95,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index dce69f45f..971bae1e9 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor):
monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
monitor_number = self._config.get("monitor_number", 1)
- for i in range(monitor_number):
+ for _ in range(monitor_number):
monitor_ins = monitor_cls(self._config, self._context,
self.monitor_data)
self.monitors.append(monitor_ins)
@@ -70,7 +70,8 @@ class MultiMonitor(basemonitor.BaseMonitor):
elif "max_recover_time" in self._config["sla"]:
max_outage_time = self._config["sla"]["max_recover_time"]
else:
- raise RuntimeError("monitor max_outage_time config is not found")
+ raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+ "config is not found")
self._result = {"outage_time": outage_time}
if outage_time > max_outage_time:
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index b0f6f8e9d..8d2f2633c 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_process_python.bash")
self.process_name = self._config["process_name"]
def monitor_func(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, _ = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.process_name),
stdin=stdin_file)
@@ -45,14 +45,12 @@ class MonitorProcess(basemonitor.BaseMonitor):
return True
def verify_SLA(self):
- LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_recover_time"]
if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -73,7 +71,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 9ac55471d..1fadd2532 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.intermediate_variables = {}
- self.pass_flag = True
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario):
step['actionType'], step['actionKey'])
if actionRollbacker:
self.director.executionSteps.append(actionRollbacker)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception")
LOG.debug(
"\033[91m exception when running step: %s .... \033[0m",
@@ -59,31 +58,16 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
-
- self.director.store_result(result)
-
for k, v in self.director.data.items():
if v == 0:
result['sla_pass'] = 0
verify_result = False
- self.pass_flag = False
- LOG.info(
- "\033[92m The service process not found in the host \
-envrioment, the HA test case NOT pass")
+ LOG.info("\033[92m The service process (%s) not found in the host environment", k)
- if verify_result:
- result['sla_pass'] = 1
- LOG.info(
- "\033[92m Congratulations, "
- "the HA test case PASS! \033[0m")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info(
- "\033[91m Aoh, the HA test case FAIL,"
- "please check the detail debug information! \033[0m")
+ result['sla_pass'] = 1 if verify_result else 0
+ self.director.store_result(result)
+
+ assert verify_result is True, "The HA test case NOT passed"
def teardown(self):
self.director.knockoff()
-
- assert self.pass_flag, "The HA test case NOT passed"
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 6d0d812af..42941c6e7 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,13 +29,12 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
- self.pass_flag = True
def setup(self):
"""scenario setup"""
nodes = self.context_cfg.get("nodes", None)
if nodes is None:
- LOG.error("the nodes info is none")
+ LOG.error("The nodes info is none")
return
self.attackers = []
@@ -58,33 +57,27 @@ class ServiceHA(base.Scenario):
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
- LOG.error("The setup not finished!")
+ LOG.error("The setup is not finished!")
return
self.monitorMgr.start_monitors()
- LOG.info("HA monitor start!")
+ LOG.info("Monitor '%s' start!", self.__scenario_type__)
for attacker in self.attackers:
attacker.inject_fault()
self.monitorMgr.wait_monitors()
- LOG.info("HA monitor stop!")
+ LOG.info("Monitor '%s' stop!", self.__scenario_type__)
sla_pass = self.monitorMgr.verify_SLA()
for k, v in self.data.items():
if v == 0:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info("The service process not found in the host envrioment, \
-the HA test case NOT pass")
- return
+ sla_pass = False
+ LOG.info("The service process (%s) not found in the host envrioment", k)
+
+ result['sla_pass'] = 1 if sla_pass else 0
self.monitorMgr.store_result(result)
- if sla_pass:
- result['sla_pass'] = 1
- LOG.info("The HA test case PASS the SLA")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
+
assert sla_pass is True, "The HA test case NOT pass the SLA"
return
@@ -94,8 +87,6 @@ the HA test case NOT pass")
for attacker in self.attackers:
attacker.recover()
- assert self.pass_flag, "The HA test case NOT passed"
-
def _test(): # pragma: no cover
"""internal test function"""
diff --git a/yardstick/benchmark/scenarios/lib/attach_volume.py b/yardstick/benchmark/scenarios/lib/attach_volume.py
index 88124964b..96dd130b1 100644
--- a/yardstick/benchmark/scenarios/lib/attach_volume.py
+++ b/yardstick/benchmark/scenarios/lib/attach_volume.py
@@ -6,30 +6,31 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
class AttachVolume(base.Scenario):
- """Attach a volmeu to an instance"""
+ """Attach a volume to an instance"""
__scenario_type__ = "AttachVolume"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.server_id = self.options.get("server_id", "TestServer")
- self.volume_id = self.options.get("volume_id", None)
+ self.server_name_or_id = self.options["server_name_or_id"]
+ self.volume_name_or_id = self.options["volume_name_or_id"]
+ self.device = self.options.get("device")
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,10 +45,14 @@ class AttachVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.attach_server_volume(self.server_id,
- self.volume_id)
+ status = openstack_utils.attach_volume_to_server(
+ self.shade_client, self.server_name_or_id, self.volume_name_or_id,
+ device=self.device, wait=self.wait, timeout=self.timeout)
+
+ if not status:
+ result.update({"attach_volume": 0})
+ LOG.error("Attach volume to server failed!")
+ raise exceptions.ScenarioAttachVolumeError
- if status:
- LOG.info("Attach volume to server successful!")
- else:
- LOG.info("Attach volume to server failed!")
+ result.update({"attach_volume": 1})
+ LOG.info("Attach volume to server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/create_keypair.py b/yardstick/benchmark/scenarios/lib/create_keypair.py
index f5b1fff7a..ee9bc440a 100644
--- a/yardstick/benchmark/scenarios/lib/create_keypair.py
+++ b/yardstick/benchmark/scenarios/lib/create_keypair.py
@@ -6,15 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
-import paramiko
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -27,10 +23,11 @@ class CreateKeypair(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.key_name = self.options.get("key_name", "yardstick_key")
- self.key_filename = self.options.get("key_path", "/tmp/yardstick_key")
+ self.name = self.options["key_name"]
+ self.public_key = self.options.get("public_key")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,27 +42,17 @@ class CreateKeypair(base.Scenario):
if not self.setup_done:
self.setup()
- rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
- rsa_key.write_private_key_file(self.key_filename)
- LOG.info("Writing key_file %s ...", self.key_filename)
- with open(self.key_filename + ".pub", "w") as pubkey_file:
- pubkey_file.write(
- "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
- del rsa_key
-
- keypair = op_utils.create_keypair(self.key_name,
- self.key_filename + ".pub")
+ keypair = openstack_utils.create_keypair(
+ self.shade_client, self.name, public_key=self.public_key)
- if keypair:
- result.update({"keypair_create": 1})
- LOG.info("Create keypair successful!")
- else:
+ if not keypair:
result.update({"keypair_create": 0})
- LOG.info("Create keypair failed!")
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [keypair.id]
- return self._push_to_outputs(keys, values)
+ LOG.error("Create keypair failed!")
+ raise exceptions.ScenarioCreateKeypairError
+
+ result.update({"keypair_create": 1})
+ LOG.info("Create keypair successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ keypair_id = keypair["id"]
+ values = [keypair_id]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_server.py b/yardstick/benchmark/scenarios/lib/create_server.py
index 31ba18ed4..e2748aecf 100644
--- a/yardstick/benchmark/scenarios/lib/create_server.py
+++ b/yardstick/benchmark/scenarios/lib/create_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,15 +23,27 @@ class CreateServer(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
-
- self.image_name = self.options.get("image_name", None)
- self.flavor_name = self.options.get("flavor_name", None)
- self.openstack = self.options.get("openstack_paras", None)
-
- self.glance_client = op_utils.get_glance_client()
- self.neutron_client = op_utils.get_neutron_client()
- self.nova_client = op_utils.get_nova_client()
+ self.options = self.scenario_cfg["options"]
+
+ self.name = self.options["name"]
+ self.image = self.options["image"]
+ self.flavor = self.options["flavor"]
+ self.auto_ip = self.options.get("auto_ip", True)
+ self.ips = self.options.get("ips")
+ self.ip_pool = self.options.get("ip_pool")
+ self.root_volume = self.options.get("root_volume")
+ self.terminate_volume = self.options.get("terminate_volume", False)
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout", 180)
+ self.reuse_ips = self.options.get("reuse_ips", True)
+ self.network = self.options.get("network")
+ self.boot_from_volume = self.options.get("boot_from_volume", False)
+ self.volume_size = self.options.get("volume_size", "20")
+ self.boot_volume = self.options.get("boot_volume")
+ self.volumes = self.options.get("volumes")
+ self.nat_destination = self.options.get("nat_destination")
+
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -49,26 +58,23 @@ class CreateServer(base.Scenario):
if not self.setup_done:
self.setup()
- if self.image_name is not None:
- self.openstack['image'] = op_utils.get_image_id(self.glance_client,
- self.image_name)
- if self.flavor_name is not None:
- self.openstack['flavor'] = op_utils.get_flavor_id(self.nova_client,
- self.flavor_name)
-
- vm = op_utils.create_instance_and_wait_for_active(self.openstack)
-
- if vm:
- result.update({"instance_create": 1})
- LOG.info("Create server successful!")
- else:
+ server = openstack_utils.create_instance_and_wait_for_active(
+ self.shade_client, self.name, self.image,
+ self.flavor, auto_ip=self.auto_ip, ips=self.ips,
+ ip_pool=self.ip_pool, root_volume=self.root_volume,
+ terminate_volume=self.terminate_volume, wait=self.wait,
+ timeout=self.timeout, reuse_ips=self.reuse_ips,
+ network=self.network, boot_from_volume=self.boot_from_volume,
+ volume_size=self.volume_size, boot_volume=self.boot_volume,
+ volumes=self.volumes, nat_destination=self.nat_destination)
+
+ if not server:
result.update({"instance_create": 0})
LOG.error("Create server failed!")
+ raise exceptions.ScenarioCreateServerError
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [vm.id]
- return self._push_to_outputs(keys, values)
+ result.update({"instance_create": 1})
+ LOG.info("Create instance successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ values = [server["id"]]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_keypair.py b/yardstick/benchmark/scenarios/lib/delete_keypair.py
index 135139959..a52a38567 100644
--- a/yardstick/benchmark/scenarios/lib/delete_keypair.py
+++ b/yardstick/benchmark/scenarios/lib/delete_keypair.py
@@ -6,14 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -26,11 +24,11 @@ class DeleteKeypair(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.key_name = self.options.get("key_name", "yardstick_key")
+ self.key_name = self.options["key_name"]
- self.nova_client = op_utils.get_nova_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,12 +43,13 @@ class DeleteKeypair(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_keypair(self.nova_client,
- self.key_name)
+ status = openstack_utils.delete_keypair(self.shade_client,
+ self.key_name)
- if status:
- result.update({"delete_keypair": 1})
- LOG.info("Delete keypair successful!")
- else:
+ if not status:
result.update({"delete_keypair": 0})
- LOG.info("Delete keypair failed!")
+ LOG.error("Delete keypair failed!")
+ raise exceptions.ScenarioDeleteKeypairError
+
+ result.update({"delete_keypair": 1})
+ LOG.info("Delete keypair successful!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_server.py b/yardstick/benchmark/scenarios/lib/delete_server.py
index bcd8faba7..46229ff04 100644
--- a/yardstick/benchmark/scenarios/lib/delete_server.py
+++ b/yardstick/benchmark/scenarios/lib/delete_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
LOG = logging.getLogger(__name__)
@@ -26,9 +23,13 @@ class DeleteServer(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
- self.server_id = self.options.get("server_id", None)
- self.nova_client = op_utils.get_nova_client()
+ self.options = self.scenario_cfg["options"]
+ self.server_name_or_id = self.options["name_or_id"]
+ self.wait = self.options.get("wait", False)
+ self.timeout = self.options.get("timeout", 180)
+ self.delete_ips = self.options.get("delete_ips", False)
+ self.delete_ip_retry = self.options.get("delete_ip_retry", 1)
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -43,9 +44,15 @@ class DeleteServer(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_instance(self.nova_client,
- instance_id=self.server_id)
- if status:
- LOG.info("Delete server successful!")
- else:
+ status = openstack_utils.delete_instance(
+ self.shade_client, self.server_name_or_id, wait=self.wait,
+ timeout=self.timeout, delete_ips=self.delete_ips,
+ delete_ip_retry=self.delete_ip_retry)
+
+ if not status:
+ result.update({"delete_server": 0})
LOG.error("Delete server failed!")
+ raise exceptions.ScenarioDeleteServerError
+
+ result.update({"delete_server": 1})
+ LOG.info("Delete server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/get_flavor.py b/yardstick/benchmark/scenarios/lib/get_flavor.py
index d5e33947e..6727a7343 100644
--- a/yardstick/benchmark/scenarios/lib/get_flavor.py
+++ b/yardstick/benchmark/scenarios/lib/get_flavor.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,8 +23,12 @@ class GetFlavor(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
- self.flavor_name = self.options.get("flavor_name", "TestFlavor")
+ self.options = self.scenario_cfg["options"]
+ self.name_or_id = self.options["name_or_id"]
+ self.filters = self.options.get("filters")
+ self.get_extra = self.options.get("get_extra", True)
+ self.shade_client = openstack_utils.get_shade_client()
+
self.setup_done = False
def setup(self):
@@ -41,14 +42,18 @@ class GetFlavor(base.Scenario):
if not self.setup_done:
self.setup()
- LOG.info("Querying flavor: %s", self.flavor_name)
- flavor = op_utils.get_flavor_by_name(self.flavor_name)
- if flavor:
- LOG.info("Get flavor successful!")
- values = [self._change_obj_to_dict(flavor)]
- else:
- LOG.info("Get flavor: no flavor matched!")
- values = []
+ LOG.info("Querying flavor: %s", self.name_or_id)
+ flavor = openstack_utils.get_flavor(
+ self.shade_client, self.name_or_id, filters=self.filters,
+ get_extra=self.get_extra)
+
+ if not flavor:
+ result.update({"get_flavor": 0})
+ LOG.error("Get flavor failed!")
+ raise exceptions.ScenarioGetFlavorError
- keys = self.scenario_cfg.get('output', '').split()
+ result.update({"get_flavor": 1})
+ LOG.info("Get flavor successful!")
+ values = [flavor]
+ keys = self.scenario_cfg.get("output", '').split()
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/get_server.py b/yardstick/benchmark/scenarios/lib/get_server.py
index fcf47c80d..f65fa9ebf 100644
--- a/yardstick/benchmark/scenarios/lib/get_server.py
+++ b/yardstick/benchmark/scenarios/lib/get_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -21,63 +18,58 @@ LOG = logging.getLogger(__name__)
class GetServer(base.Scenario):
"""Get a server instance
- Parameters
- server_id - ID of the server
- type: string
- unit: N/A
- default: null
- server_name - name of the server
- type: string
- unit: N/A
- default: null
-
- Either server_id or server_name is required.
-
- Outputs
+ Parameters:
+ name_or_id - Name or ID of the server
+ type: string
+ filters - meta data to use for further filtering
+ type: dict
+ detailed: Whether or not to add detailed additional information.
+ type: bool
+ bare: Whether to skip adding any additional information to the server
+ record.
+ type: bool
+ all_projects: Whether to get server from all projects or just the current
+ auth scoped project.
+ type: bool
+
+ Outputs:
rc - response code of getting server instance
- 0 for success
- 1 for failure
+ 1 for success
+ 0 for failure
type: int
- unit: N/A
server - instance of the server
type: dict
- unit: N/A
+
"""
- __scenario_type__ = "GetServer"
+ __scenario_type__ = 'GetServer'
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg.get('options', {})
+ self.options = self.scenario_cfg['options']
- self.server_id = self.options.get("server_id")
- if self.server_id:
- LOG.debug('Server id is %s', self.server_id)
+ self.server_name_or_id = self.options.get('name_or_id')
+ self.filters = self.options.get('filters')
+ self.detailed = self.options.get('detailed', False)
+ self.bare = self.options.get('bare', False)
- default_name = self.scenario_cfg.get('host',
- self.scenario_cfg.get('target'))
- self.server_name = self.options.get('server_name', default_name)
- if self.server_name:
- LOG.debug('Server name is %s', self.server_name)
-
- self.nova_client = op_utils.get_nova_client()
+ self.shade_client = openstack_utils.get_shade_client()
def run(self, result):
"""execute the test"""
- if self.server_id:
- server = self.nova_client.servers.get(self.server_id)
- else:
- server = op_utils.get_server_by_name(self.server_name)
-
- keys = self.scenario_cfg.get('output', '').split()
+ server = openstack_utils.get_server(
+ self.shade_client, name_or_id=self.server_name_or_id,
+ filters=self.filters, detailed=self.detailed, bare=self.bare)
- if server:
- LOG.info("Get server successful!")
- values = [0, self._change_obj_to_dict(server)]
- else:
- LOG.info("Get server failed!")
- values = [1]
+ if not server:
+ result.update({'get_server': 0})
+ LOG.error('Get Server failed!')
+ raise exceptions.ScenarioGetServerError
+ result.update({'get_server': 1})
+ LOG.info('Get Server successful!')
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [server]
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be2fa3f3b..78f866e25 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -441,7 +441,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
- self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
+ self.collector = Collector(self.vnfs)
self.collector.start()
# Start the actual traffic
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index 38d2dd7c2..ca5a110e2 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -514,7 +514,7 @@ class AnsibleCommon(object):
parser.add_section('defaults')
parser.set('defaults', 'host_key_checking', 'False')
- cfg_path = os.path.join(directory, 'setup.cfg')
+ cfg_path = os.path.join(directory, 'ansible.cfg')
with open(cfg_path, 'w') as f:
parser.write(f)
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 153bd4bf4..8640afbae 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -152,3 +152,6 @@ IS_PUBLIC = 'is_public'
# general
TESTCASE_PRE = 'opnfv_yardstick_'
TESTSUITE_PRE = 'opnfv_'
+
+# OpenStack cloud default config parameters
+OS_CLOUD_DEFAULT_CONFIG = {'verify': False}
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index ec21c335b..04920943d 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -54,6 +54,10 @@ class YardstickException(Exception):
return False
+class ResourceCommandError(YardstickException):
+ message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"'
+
+
class FunctionNotImplemented(YardstickException):
message = ('The function "%(function_name)s" is not implemented in '
'"%(class_name)" class.')
@@ -148,6 +152,14 @@ class TaskRenderError(YardstickException):
message = 'Failed to render template:\n%(input_task)s'
+class TimerTimeout(YardstickException):
+ message = 'Timer timeout expired, %(timeout)s seconds'
+
+
+class WaitTimeout(YardstickException):
+ message = 'Wait timeout while waiting for condition'
+
+
class ScenarioCreateNetworkError(YardstickException):
message = 'Create Neutron Network Scenario failed'
@@ -190,3 +202,31 @@ class ScenarioCreateSecurityGroupError(YardstickException):
class ScenarioDeleteNetworkError(YardstickException):
message = 'Delete Neutron Network Scenario failed'
+
+
+class ScenarioCreateServerError(YardstickException):
+ message = 'Nova Create Server Scenario failed'
+
+
+class ScenarioDeleteServerError(YardstickException):
+ message = 'Delete Server Scenario failed'
+
+
+class ScenarioCreateKeypairError(YardstickException):
+ message = 'Nova Create Keypair Scenario failed'
+
+
+class ScenarioDeleteKeypairError(YardstickException):
+ message = 'Nova Delete Keypair Scenario failed'
+
+
+class ScenarioAttachVolumeError(YardstickException):
+ message = 'Nova Attach Volume Scenario failed'
+
+
+class ScenarioGetServerError(YardstickException):
+ message = 'Nova Get Server Scenario failed'
+
+
+class ScenarioGetFlavorError(YardstickException):
+ message = 'Nova Get Falvor Scenario failed'
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 0d6afc54a..5a83ddb24 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -7,20 +7,21 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import copy
+import logging
import os
-import time
import sys
-import logging
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
from keystoneauth1 import loading
from keystoneauth1 import session
+from neutronclient.neutron import client as neutronclient
import shade
from shade import exc
-from cinderclient import client as cinderclient
-from novaclient import client as novaclient
-from glanceclient import client as glanceclient
-from neutronclient.neutron import client as neutronclient
+from yardstick.common import constants
log = logging.getLogger(__name__)
@@ -156,204 +157,205 @@ def get_glance_client(): # pragma: no cover
return glanceclient.Client(get_glance_client_version(), session=sess)
-def get_shade_client():
- return shade.openstack_cloud()
+def get_shade_client(**os_cloud_config):
+ """Get Shade OpenStack cloud client
+
+ By default, the input parameters given to "shade.openstack_cloud" method
+ are stored in "constants.OS_CLOUD_DEFAULT_CONFIG". The input parameters
+ passed in this function, "os_cloud_config", will overwrite the default
+ ones.
+
+ :param os_cloud_config: (kwargs) input arguments for
+ "shade.openstack_cloud" method.
+ :return: ``shade.OpenStackCloud`` object.
+ """
+ params = copy.deepcopy(constants.OS_CLOUD_DEFAULT_CONFIG)
+ params.update(os_cloud_config)
+ return shade.openstack_cloud(**params)
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client):
- try:
- return nova_client.servers.list(search_opts={'all_tenants': 1})
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instances(nova_client)]")
-
-
-def get_instance_status(nova_client, instance): # pragma: no cover
- try:
- return nova_client.servers.get(instance.id).status
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instance_status(nova_client)]")
-
-
-def get_instance_by_name(nova_client, instance_name): # pragma: no cover
- try:
- return nova_client.servers.find(name=instance_name)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instance_by_name(nova_client, '%s')]",
- instance_name)
-
-
-def get_aggregates(nova_client): # pragma: no cover
- try:
- return nova_client.aggregates.list()
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_aggregates(nova_client)]")
+def create_keypair(shade_client, name, public_key=None):
+ """Create a new keypair.
+ :param name: Name of the keypair being created.
+ :param public_key: Public key for the new keypair.
-def get_availability_zones(nova_client): # pragma: no cover
- try:
- return nova_client.availability_zones.list()
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_availability_zones(nova_client)]")
-
-
-def get_availability_zone_names(nova_client): # pragma: no cover
+ :return: Created keypair.
+ """
try:
- return [az.zoneName for az in get_availability_zones(nova_client)]
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_availability_zone_names(nova_client)]")
+ return shade_client.create_keypair(name, public_key=public_key)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [create_keypair(shade_client)]. "
+ "Exception message, '%s'", o_exc.orig_message)
-def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
+def create_instance_and_wait_for_active(shade_client, name, image,
+ flavor, auto_ip=True, ips=None,
+ ip_pool=None, root_volume=None,
+ terminate_volume=False, wait=True,
+ timeout=180, reuse_ips=True,
+ network=None, boot_from_volume=False,
+ volume_size='20', boot_volume=None,
+ volumes=None, nat_destination=None,
+ **kwargs):
+ """Create a virtual server instance.
+
+ :param name:(string) Name of the server.
+ :param image:(dict) Image dict, name or ID to boot with. Image is required
+ unless boot_volume is given.
+ :param flavor:(dict) Flavor dict, name or ID to boot onto.
+ :param auto_ip: Whether to take actions to find a routable IP for
+ the server.
+ :param ips: List of IPs to attach to the server.
+ :param ip_pool:(string) Name of the network or floating IP pool to get an
+ address from.
+ :param root_volume:(string) Name or ID of a volume to boot from.
+ (defaults to None - deprecated, use boot_volume)
+ :param boot_volume:(string) Name or ID of a volume to boot from.
+ :param terminate_volume:(bool) If booting from a volume, whether it should
+ be deleted when the server is destroyed.
+ :param volumes:(optional) A list of volumes to attach to the server.
+ :param wait:(optional) Wait for the address to appear as assigned to the server.
+ :param timeout: Seconds to wait, defaults to 60.
+ :param reuse_ips:(bool)Whether to attempt to reuse pre-existing
+ floating ips should a floating IP be needed.
+ :param network:(dict) Network dict or name or ID to attach the server to.
+ Mutually exclusive with the nics parameter. Can also be be
+ a list of network names or IDs or network dicts.
+ :param boot_from_volume:(bool) Whether to boot from volume. 'boot_volume'
+ implies True, but boot_from_volume=True with
+ no boot_volume is valid and will create a
+ volume from the image and use that.
+ :param volume_size: When booting an image from volume, how big should
+ the created volume be?
+ :param nat_destination: Which network should a created floating IP
+ be attached to, if it's not possible to infer from
+ the cloud's configuration.
+ :param meta:(optional) A dict of arbitrary key/value metadata to store for
+ this server. Both keys and values must be <=255 characters.
+ :param reservation_id: A UUID for the set of servers being requested.
+ :param min_count:(optional extension) The minimum number of servers to
+ launch.
+ :param max_count:(optional extension) The maximum number of servers to
+ launch.
+ :param security_groups: A list of security group names.
+ :param userdata: User data to pass to be exposed by the metadata server
+ this can be a file type object as well or a string.
+ :param key_name:(optional extension) Name of previously created keypair to
+ inject into the instance.
+ :param availability_zone: Name of the availability zone for instance
+ placement.
+ :param block_device_mapping:(optional) A dict of block device mappings for
+ this server.
+ :param block_device_mapping_v2:(optional) A dict of block device mappings
+ for this server.
+ :param nics:(optional extension) An ordered list of nics to be added to
+ this server, with information about connected networks, fixed
+ IPs, port etc.
+ :param scheduler_hints:(optional extension) Arbitrary key-value pairs
+ specified by the client to help boot an instance.
+ :param config_drive:(optional extension) Value for config drive either
+ boolean, or volume-id.
+ :param disk_config:(optional extension) Control how the disk is partitioned
+ when the server is created. Possible values are 'AUTO'
+ or 'MANUAL'.
+ :param admin_pass:(optional extension) Add a user supplied admin password.
+
+ :returns: The created server.
+ """
try:
- nova_client.aggregates.create(aggregate_name, av_zone)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_aggregate(nova_client, %s, %s)]",
- aggregate_name, av_zone)
- return False
- else:
- return True
+ return shade_client.create_server(
+ name, image, flavor, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
+ root_volume=root_volume, terminate_volume=terminate_volume,
+ wait=wait, timeout=timeout, reuse_ips=reuse_ips, network=network,
+ boot_from_volume=boot_from_volume, volume_size=volume_size,
+ boot_volume=boot_volume, volumes=volumes,
+ nat_destination=nat_destination, **kwargs)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [create_instance(shade_client)]. "
+ "Exception message, '%s'", o_exc.orig_message)
-def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover
- try:
- aggregates = get_aggregates(nova_client)
- _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_aggregate_id(nova_client, %s)]",
- aggregate_name)
- else:
- return _id
+def attach_volume_to_server(shade_client, server_name_or_id, volume_name_or_id,
+ device=None, wait=True, timeout=None):
+ """Attach a volume to a server.
+ This will attach a volume, described by the passed in volume
+ dict, to the server described by the passed in server dict on the named
+ device on the server.
-def add_host_to_aggregate(nova_client, aggregate_name,
- compute_host): # pragma: no cover
- try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.add_host(aggregate_id, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
- aggregate_name, compute_host)
- return False
- else:
- return True
+ If the volume is already attached to the server, or generally not
+ available, then an exception is raised. To re-attach to a server,
+ but under a different device, the user must detach it first.
+ :param server_name_or_id:(string) The server name or id to attach to.
+ :param volume_name_or_id:(string) The volume name or id to attach.
+ :param device:(string) The device name where the volume will attach.
+ :param wait:(bool) If true, waits for volume to be attached.
+ :param timeout: Seconds to wait for volume attachment. None is forever.
-def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
- compute_host): # pragma: no cover
+ :returns: True if attached successful, False otherwise.
+ """
try:
- create_aggregate(nova_client, aggregate_name, av_zone)
- add_host_to_aggregate(nova_client, aggregate_name, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_aggregate_with_host("
- "nova_client, %s, %s, %s)]",
- aggregate_name, av_zone, compute_host)
- return False
- else:
+ server = shade_client.get_server(name_or_id=server_name_or_id)
+ volume = shade_client.get_volume(volume_name_or_id)
+ shade_client.attach_volume(
+ server, volume, device=device, wait=wait, timeout=timeout)
return True
-
-
-def create_keypair(name, key_path=None): # pragma: no cover
- try:
- with open(key_path) as fpubkey:
- keypair = get_nova_client().keypairs.create(
- name=name, public_key=fpubkey.read())
- return keypair
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_keypair(nova_client)]")
-
-
-def create_instance(json_body): # pragma: no cover
- try:
- return get_nova_client().servers.create(**json_body)
- except Exception: # pylint: disable=broad-except
- log.exception("Error create instance failed")
- return None
-
-
-def create_instance_and_wait_for_active(json_body): # pragma: no cover
- SLEEP = 3
- VM_BOOT_TIMEOUT = 180
- nova_client = get_nova_client()
- instance = create_instance(json_body)
- for _ in range(int(VM_BOOT_TIMEOUT / SLEEP)):
- status = get_instance_status(nova_client, instance)
- if status.lower() == "active":
- return instance
- elif status.lower() == "error":
- log.error("The instance went to ERROR status.")
- return None
- time.sleep(SLEEP)
- log.error("Timeout booting the instance.")
- return None
-
-
-def attach_server_volume(server_id, volume_id,
- device=None): # pragma: no cover
- try:
- get_nova_client().volumes.create_server_volume(server_id,
- volume_id, device)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
- server_id, volume_id)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [attach_volume_to_server(shade_client)]. "
+ "Exception message: %s", o_exc.orig_message)
return False
- else:
- return True
-def delete_instance(nova_client, instance_id): # pragma: no cover
- try:
- nova_client.servers.force_delete(instance_id)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_instance(nova_client, '%s')]",
- instance_id)
- return False
- else:
- return True
-
+def delete_instance(shade_client, name_or_id, wait=False, timeout=180,
+ delete_ips=False, delete_ip_retry=1):
+ """Delete a server instance.
-def remove_host_from_aggregate(nova_client, aggregate_name,
- compute_host): # pragma: no cover
+ :param name_or_id: name or ID of the server to delete
+ :param wait:(bool) If true, waits for server to be deleted.
+ :param timeout:(int) Seconds to wait for server deletion.
+ :param delete_ips:(bool) If true, deletes any floating IPs associated with
+ the instance.
+ :param delete_ip_retry:(int) Number of times to retry deleting
+ any floating ips, should the first try be
+ unsuccessful.
+ :returns: True if delete succeeded, False otherwise.
+ """
try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.remove_host(aggregate_id, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
- aggregate_name, compute_host)
+ return shade_client.delete_server(
+ name_or_id, wait=wait, timeout=timeout, delete_ips=delete_ips,
+ delete_ip_retry=delete_ip_retry)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [delete_instance(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id,
+ o_exc.orig_message)
return False
- else:
- return True
-
-def remove_hosts_from_aggregate(nova_client,
- aggregate_name): # pragma: no cover
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- hosts = nova_client.aggregates.get(aggregate_id).hosts
- assert(
- all(remove_host_from_aggregate(nova_client, aggregate_name, host)
- for host in hosts))
+def get_server(shade_client, name_or_id=None, filters=None, detailed=False,
+ bare=False):
+ """Get a server by name or ID.
-def delete_aggregate(nova_client, aggregate_name): # pragma: no cover
- try:
- remove_hosts_from_aggregate(nova_client, aggregate_name)
- nova_client.aggregates.delete(aggregate_name)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_aggregate(nova_client, %s)]",
- aggregate_name)
- return False
- else:
- return True
+ :param name_or_id: Name or ID of the server.
+ :param filters:(dict) A dictionary of meta data to use for further
+ filtering.
+ :param detailed:(bool) Whether or not to add detailed additional
+ information.
+ :param bare:(bool) Whether to skip adding any additional information to the
+ server record.
-
-def get_server_by_name(name): # pragma: no cover
+ :returns: A server ``munch.Munch`` or None if no matching server is found.
+ """
try:
- return get_nova_client().servers.list(search_opts={'name': name})[0]
- except IndexError:
- log.exception('Failed to get nova client')
- raise
+ return shade_client.get_server(name_or_id=name_or_id, filters=filters,
+ detailed=detailed, bare=bare)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_server(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
@@ -366,14 +368,6 @@ def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
return None
-def get_image_by_name(name): # pragma: no cover
- images = get_nova_client().images.list()
- try:
- return next((a for a in images if a.name == name))
- except StopIteration:
- log.exception('No image matched')
-
-
def get_flavor_id(nova_client, flavor_name): # pragma: no cover
flavors = nova_client.flavors.list(detailed=True)
flavor_id = ''
@@ -384,27 +378,22 @@ def get_flavor_id(nova_client, flavor_name): # pragma: no cover
return flavor_id
-def get_flavor_by_name(name): # pragma: no cover
- flavors = get_nova_client().flavors.list()
- try:
- return next((a for a in flavors if a.name == name))
- except StopIteration:
- log.exception('No flavor matched')
-
+def get_flavor(shade_client, name_or_id, filters=None, get_extra=True):
+ """Get a flavor by name or ID.
-def check_status(status, name, iterations, interval): # pragma: no cover
- for _ in range(iterations):
- try:
- server = get_server_by_name(name)
- except IndexError:
- log.error('Cannot found %s server', name)
- raise
+ :param name_or_id: Name or ID of the flavor.
+ :param filters: A dictionary of meta data to use for further filtering.
+ :param get_extra: Whether or not the list_flavors call should get the extra
+ flavor specs.
- if server.status == status:
- return True
-
- time.sleep(interval)
- return False
+ :returns: A flavor ``munch.Munch`` or None if no matching flavor is found.
+ """
+ try:
+ return shade_client.get_flavor(name_or_id, filters=filters,
+ get_extra=get_extra)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_flavor(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def delete_flavor(flavor_id): # pragma: no cover
@@ -417,12 +406,18 @@ def delete_flavor(flavor_id): # pragma: no cover
return True
-def delete_keypair(nova_client, key): # pragma: no cover
+def delete_keypair(shade_client, name):
+ """Delete a keypair.
+
+ :param name: Name of the keypair to delete.
+
+ :returns: True if delete succeeded, False otherwise.
+ """
try:
- nova_client.keypairs.delete(key=key)
- return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_keypair(nova_client)]")
+ return shade_client.delete_keypair(name)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [delete_neutron_router(shade_client, '%s')]. "
+ "Exception message: %s", name, o_exc.orig_message)
return False
@@ -789,9 +784,8 @@ def list_images(shade_client=None):
# *********************************************
# CINDER
# *********************************************
-def get_volume_id(volume_name): # pragma: no cover
- volumes = get_cinder_client().volumes.list()
- return next((v.id for v in volumes if v.name == volume_name), None)
+def get_volume_id(shade_client, volume_name):
+ return shade_client.get_volume_id(volume_name)
def create_volume(cinder_client, volume_name, volume_size,
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 44cc92a7c..108ee17bc 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -23,9 +23,11 @@ import logging
import os
import random
import re
+import signal
import socket
import subprocess
import sys
+import time
import six
from flask import jsonify
@@ -34,6 +36,8 @@ from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import yardstick
+from yardstick.common import exceptions
+
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@@ -405,15 +409,24 @@ class ErrorClass(object):
class Timer(object):
- def __init__(self):
+ def __init__(self, timeout=None):
super(Timer, self).__init__()
self.start = self.delta = None
+ self._timeout = int(timeout) if timeout else None
+
+ def _timeout_handler(self, *args):
+ raise exceptions.TimerTimeout(timeout=self._timeout)
def __enter__(self):
self.start = datetime.datetime.now()
+ if self._timeout:
+ signal.signal(signal.SIGALRM, self._timeout_handler)
+ signal.alarm(self._timeout)
return self
def __exit__(self, *_):
+ if self._timeout:
+ signal.alarm(0)
self.delta = datetime.datetime.now() - self.start
def __getattr__(self, item):
@@ -460,3 +473,22 @@ def open_relative_file(path, task_path):
if e.errno == errno.ENOENT:
return open(os.path.join(task_path, path))
raise
+
+
+def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
+ """Wait until callable predicate is evaluated as True
+
+ :param predicate: (func) callable deciding whether waiting should continue
+ :param timeout: (int) timeout in seconds how long should function wait
+ :param sleep: (int) polling interval for results in seconds
+ :param exception: exception instance to raise on timeout. If None is passed
+ (default) then WaitTimeout exception is raised.
+ """
+ try:
+ with Timer(timeout=timeout):
+ while not predicate():
+ time.sleep(sleep)
+ except exceptions.TimerTimeout:
+ if exception and issubclass(exception, Exception):
+ raise exception # pylint: disable=raising-bad-type
+ raise exceptions.WaitTimeout
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 7e18302eb..322b3f5a2 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -14,42 +14,29 @@
"""This module implements stub for publishing results in yardstick format."""
import logging
-from yardstick.network_services.nfvi.resource import ResourceProfile
-from yardstick.network_services.utils import get_nsb_option
-
LOG = logging.getLogger(__name__)
class Collector(object):
"""Class that handles dictionary of results in yardstick-plot format."""
- def __init__(self, vnfs, nodes, traffic_profile, timeout=3600):
+ def __init__(self, vnfs):
super(Collector, self).__init__()
- self.traffic_profile = traffic_profile
self.vnfs = vnfs
- self.nodes = nodes
- self.timeout = timeout
- self.bin_path = get_nsb_option('bin_path', '')
- self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout)
- for node_name, node in self.nodes.items()
- if node.get("collectd")}
def start(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.initiate_systemagent(self.bin_path)
- resource.start()
- resource.amqp_process_for_nfvi_kpi()
+ for vnf in self.vnfs:
+ vnf.start_collect()
def stop(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.stop()
+ for vnf in self.vnfs:
+ vnf.stop_collect()
def get_kpi(self):
"""Returns dictionary of results in yardstick-plot format
- :return:
+ :return: (dict) dictionary of kpis collected from the VNFs;
+ the keys are the names of the VNFs.
"""
results = {}
for vnf in self.vnfs:
@@ -58,17 +45,4 @@ class Collector(object):
LOG.debug("collect KPI for %s", vnf.name)
results[vnf.name] = vnf.collect_kpi()
- for node_name, resource in self.resource_profiles.items():
- # Result example:
- # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", node_name)
- if resource.check_if_system_agent_running("collectd")[0] != 0:
- continue
-
- try:
- results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
- LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
- # NOTE(elfoley): catch a more specific error
- except Exception as exc: # pylint: disable=broad-except
- LOG.exception(exc)
return results
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
index 70ce4ff03..c538ceeba 100644
--- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
+++ b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
@@ -12,13 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
-import sys
import logging
import re
from itertools import product
+import IxNetwork
+
log = logging.getLogger(__name__)
@@ -135,7 +134,6 @@ class IxNextgen(object):
port.append(port0)
cfg = {
- 'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"],
'machine': tg_cfg["mgmt-interface"]["ip"],
'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
@@ -186,7 +184,7 @@ class IxNextgen(object):
self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count)
def add_ip_header(self, params, version):
- for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
+ for _, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i))
try:
l3 = next(iter1, {})
@@ -194,21 +192,13 @@ class IxNextgen(object):
except (KeyError, IndexError):
continue
- for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+ for _, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3)
self.ixnet.commit()
def _connect(self, tg_cfg):
self._cfg = self.get_config(tg_cfg)
-
- sys.path.append(self._cfg["py_lib_path"])
- # Import IxNetwork after getting ixia lib path
- try:
- import IxNetwork
- except ImportError:
- raise
-
self.ixnet = IxNetwork.IxNet()
machine = self._cfg['machine']
@@ -292,7 +282,7 @@ class IxNextgen(object):
self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01")))
def ix_update_ether(self, params):
- for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
+ for _, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
"configElement", 1):
iter1 = (v['outer_l2'] for v in params.values() if str(v['id']) == str(index))
try:
@@ -300,7 +290,7 @@ class IxNextgen(object):
except KeyError:
continue
- for ip, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+ for _, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
self.update_ether_multi_attributes(ether, l2)
self.ixnet.commit()
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index dc5c46a86..0c0bf223a 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -27,6 +27,7 @@ from oslo_config import cfg
from oslo_utils.encodeutils import safe_decode
from yardstick import ssh
+from yardstick.common.exceptions import ResourceCommandError
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
@@ -249,45 +250,46 @@ class ResourceProfile(object):
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
+ def _start_rabbitmq(self, connection):
+ # Reset amqp queue
+ LOG.debug("reset and setup amqp to collect data from collectd")
+ # ensure collectd.conf.d exists to avoid error/warning
+ cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d",
+ "sudo service rabbitmq-server restart",
+ "sudo rabbitmqctl stop_app",
+ "sudo rabbitmqctl reset",
+ "sudo rabbitmqctl start_app",
+ "sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+ for cmd in cmd_list:
+ exit_status, stdout, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
+ # check stdout for "sudo rabbitmqctl status" command
+ cmd = "sudo rabbitmqctl status"
+ _, stdout, stderr = connection.execute(cmd)
+ if not re.search("RabbitMQ", stdout):
+ LOG.error("rabbitmqctl status don't have RabbitMQ in running apps")
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
- connection.execute('sudo pkill -x -9 collectd')
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
+ self._prepare_collectd_conf(config_file_path)
+
+ connection.execute('sudo pkill -x -9 collectd')
exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
if exit_status != 0:
LOG.warning("%s is not present disabling", collectd_path)
- # disable auto-provisioning because it requires Internet access
- # collectd_installer = os.path.join(bin_path, "collectd.sh")
- # provision_tool(connection, collectd)
- # http_proxy = os.environ.get('http_proxy', '')
- # https_proxy = os.environ.get('https_proxy', '')
- # connection.execute("sudo %s '%s' '%s'" % (
- # collectd_installer, http_proxy, https_proxy))
return
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
LOG.debug("Starting collectd to collect NFVi stats")
- # ensure collectd.conf.d exists to avoid error/warning
- connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
- self._prepare_collectd_conf(config_file_path)
-
- # Reset amqp queue
- LOG.debug("reset and setup amqp to collect data from collectd")
- connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
- connection.execute("sudo service rabbitmq-server start")
- connection.execute("sudo rabbitmqctl stop_app")
- connection.execute("sudo rabbitmqctl reset")
- connection.execute("sudo rabbitmqctl start_app")
- connection.execute("sudo service rabbitmq-server restart")
-
- LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
- connection.execute("sudo rabbitmqctl delete_user guest")
- connection.execute("sudo rabbitmqctl add_user admin admin")
- connection.execute("sudo rabbitmqctl authenticate_user admin admin")
- connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
-
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
@@ -299,9 +301,10 @@ class ResourceProfile(object):
""" Start system agent for NFVi collection on host """
if self.enable:
try:
+ self._start_rabbitmq(self.connection)
self._start_collectd(self.connection, bin_path)
- except Exception:
- LOG.exception("Exception during collectd start")
+ except ResourceCommandError as e:
+ LOG.exception("Exception during collectd and rabbitmq start: %s", str(e))
raise
def start(self):
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index a776b0989..9ceac3167 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -195,6 +195,18 @@ class GenericVNF(object):
:return: {"kpi": value, "kpi2": value}
"""
+ @abc.abstractmethod
+ def start_collect(self):
+ """Start KPI collection
+ :return: None
+ """
+
+ @abc.abstractmethod
+ def stop_collect(self):
+ """Stop KPI collection
+ :return: None
+ """
+
@six.add_metaclass(abc.ABCMeta)
class GenericTrafficGen(GenericVNF):
@@ -254,3 +266,23 @@ class GenericTrafficGen(GenericVNF):
:return: True/False
"""
pass
+
+ def start_collect(self):
+ """Start KPI collection.
+
+ Traffic measurements are always collected during injection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
+
+ def stop_collect(self):
+ """Stop KPI collection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 77488c479..34b0260b4 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -35,7 +35,6 @@ from yardstick.common import utils
from yardstick.network_services import constants
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
@@ -60,6 +59,7 @@ class SetupEnvHelper(object):
self.vnfd_helper = vnfd_helper
self.ssh_helper = ssh_helper
self.scenario_helper = scenario_helper
+ self.collectd_options = {}
def build_config(self):
raise NotImplementedError
@@ -225,12 +225,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
if exit_status == 0:
return
- def get_collectd_options(self):
- options = self.scenario_helper.all_options.get("collectd", {})
- # override with specific node settings
- options.update(self.scenario_helper.options.get("collectd", {}))
- return options
-
def _setup_resources(self):
# what is this magic? how do we know which socket is for which port?
# what about quad-socket?
@@ -243,11 +237,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
# this won't work because we don't have DPDK port numbers yet
ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num)
port_names = (intf["name"] for intf in ports)
- collectd_options = self.get_collectd_options()
- plugins = collectd_options.get("plugins", {})
+ plugins = self.collectd_options.get("plugins", {})
+ interval = self.collectd_options.get("interval")
# we must set timeout to be the same as the VNF otherwise KPIs will die before VNF
return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names,
- plugins=plugins, interval=collectd_options.get("interval"),
+ plugins=plugins, interval=interval,
timeout=self.scenario_helper.timeout)
def _check_interface_fields(self):
@@ -657,49 +651,6 @@ class SampleVNF(GenericVNF):
self.vnf_port_pairs = None
self._vnf_process = None
- def _build_ports(self):
- self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
- self.networks = self._port_pairs.networks
- self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports)
- self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports)
- self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
-
- def _get_route_data(self, route_index, route_type):
- route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
- for _ in range(route_index):
- next(route_iter, '')
- return next(route_iter, {}).get(route_type, '')
-
- def _get_port0localip6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0localip6 : %s", return_value)
- return return_value
-
- def _get_port1localip6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1localip6 : %s", return_value)
- return return_value
-
- def _get_port0prefixlen6(self):
- return_value = self._get_route_data(0, 'netmask')
- LOG.info("_get_port0prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port1prefixlen6(self):
- return_value = self._get_route_data(1, 'netmask')
- LOG.info("_get_port1prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port0gateway6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0gateway6 : %s", return_value)
- return return_value
-
- def _get_port1gateway6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1gateway6 : %s", return_value)
- return return_value
-
def _start_vnf(self):
self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT)
name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid())
@@ -710,6 +661,7 @@ class SampleVNF(GenericVNF):
pass
def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
@@ -721,6 +673,54 @@ class SampleVNF(GenericVNF):
self.resource_helper.setup()
self._start_vnf()
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ """Update collectd configuration options
+ This function retrieves all collectd options contained in the test case
+
+ definition builds a single dictionary combining them. The following fragment
+ represents a test case with the collectd options and priorities (1 highest, 3 lowest):
+ ---
+ schema: yardstick:task:0.1
+ scenarios:
+ - type: NSPerf
+ nodes:
+ tg__0: trafficgen_1.yardstick
+ vnf__0: vnf.yardstick
+ options:
+ collectd:
+ <options> # COLLECTD priority 3
+ vnf__0:
+ collectd:
+ plugins:
+ load
+ <options> # COLLECTD priority 2
+ context:
+ type: Node
+ name: yardstick
+ nfvi_type: baremetal
+ file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1
+ """
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ """Update collectd options and plugins dictionary"""
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
def wait_for_instantiate(self):
buf = []
time.sleep(self.WAIT_TIME) # Give some time for config to load
@@ -736,7 +736,6 @@ class SampleVNF(GenericVNF):
LOG.info("%s VNF is up and running.", self.APP_NAME)
self._vnf_up_post()
self.queue_wrapper.clear()
- self.resource_helper.start_collect()
return self._vnf_process.exitcode
if "PANIC" in message:
@@ -749,6 +748,12 @@ class SampleVNF(GenericVNF):
# by other VNF output
self.q_in.put('\r\n')
+ def start_collect(self):
+ self.resource_helper.start_collect()
+
+ def stop_collect(self):
+ self.resource_helper.stop_collect()
+
def _build_run_kwargs(self):
self.run_kwargs = {
'stdin': self.queue_wrapper,
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 5afa4151e..e0c0db262 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -22,13 +22,13 @@ import time
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
-import shade
from shade._heat import event_utils
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import constants as consts
from yardstick.common import exceptions
from yardstick.common import template_format
-from yardstick.common import constants as consts
+from yardstick.common import openstack_utils as op_utils
+
log = logging.getLogger(__name__)
@@ -41,10 +41,11 @@ _DEPLOYED_STACKS = {}
class HeatStack(object):
"""Represents a Heat stack (deployed template) """
- def __init__(self, name):
+ def __init__(self, name, os_cloud_config=None):
self.name = name
self.outputs = {}
- self._cloud = shade.openstack_cloud()
+ os_cloud_config = {} if not os_cloud_config else os_cloud_config
+ self._cloud = op_utils.get_shade_client(**os_cloud_config)
self._stack = None
def _update_stack_tracking(self):
@@ -152,10 +153,12 @@ name (i.e. %s).
# short hand for resources part of template
self.resources = self._template['resources']
- def __init__(self, name, template_file=None, heat_parameters=None):
+ def __init__(self, name, template_file=None, heat_parameters=None,
+ os_cloud_config=None):
self.name = name
self.keystone_client = None
self.heat_parameters = {}
+ self._os_cloud_config = {} if not os_cloud_config else os_cloud_config
# heat_parameters is passed to heat in stack create, empty dict when
# yardstick creates the template (no get_param in resources part)
@@ -622,7 +625,7 @@ name (i.e. %s).
log.info("Creating stack '%s' START", self.name)
start_time = time.time()
- stack = HeatStack(self.name)
+ stack = HeatStack(self.name, os_cloud_config=self._os_cloud_config)
stack.create(self._template, self.heat_parameters, block, timeout)
if not block:
diff --git a/yardstick/tests/integration/dummy-scenario-heat-context.yaml b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
index 7c980b412..45a39951a 100644
--- a/yardstick/tests/integration/dummy-scenario-heat-context.yaml
+++ b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
@@ -6,6 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{% set context_name = context_name or "demo" %}
---
# Sample Heat context config with Dummy context
@@ -22,9 +23,9 @@ scenarios:
context:
name: {{ context_name }}
- image: cirros-0.3.5
- flavor: cirros256
- user: cirros
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
servers:
athena:
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index 7078c89b2..72e684a68 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -478,7 +478,7 @@ class OvsDeployTestCase(unittest.TestCase):
def setUp(self):
self._mock_ssh = mock.patch.object(ssh, 'SSH')
- self.mock_ssh = self._mock_ssh .start()
+ self.mock_ssh = self._mock_ssh.start()
self.ovs_deploy = model.OvsDeploy(self.mock_ssh,
'/tmp/dpdk-devbind.py',
self.OVS_DETAILS)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py
index 153c6a527..b19883479 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_base.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_base.py
@@ -25,6 +25,7 @@ class FlagsTestCase(unittest.TestCase):
def test___init__(self):
self.assertFalse(self.flags.no_setup)
self.assertFalse(self.flags.no_teardown)
+ self.assertEqual({'verify': False}, self.flags.os_cloud_config)
def test___init__with_flags(self):
flags = base.Flags(no_setup=True)
@@ -32,10 +33,12 @@ class FlagsTestCase(unittest.TestCase):
self.assertFalse(flags.no_teardown)
def test_parse(self):
- self.flags.parse(no_setup=True, no_teardown="False")
+ self.flags.parse(no_setup=True, no_teardown='False',
+ os_cloud_config={'verify': True})
self.assertTrue(self.flags.no_setup)
- self.assertEqual(self.flags.no_teardown, "False")
+ self.assertEqual('False', self.flags.no_teardown)
+ self.assertEqual({'verify': True}, self.flags.os_cloud_config)
def test_parse_forbidden_flags(self):
self.flags.parse(foo=42)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index a40adf5ae..ebb1d69ba 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -229,12 +229,12 @@ class HeatContextTestCase(unittest.TestCase):
self.assertRaises(y_exc.HeatTemplateError,
self.test_context.deploy)
- mock_path_exists.assert_called_once()
+ mock_path_exists.assert_called()
mock_resources_template.assert_called_once()
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(ssh.SSH, 'gen_keys')
- @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
+ @mock.patch.object(heat, 'HeatTemplate')
def test_deploy(self, mock_template, mock_genkeys, mock_path_exists):
self.test_context._name = 'foo'
self.test_context._task_id = '1234567890'
@@ -245,16 +245,17 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.get_neutron_info = mock.MagicMock()
self.test_context.deploy()
- mock_template.assert_called_with('foo-12345678',
- '/bar/baz/some-heat-file',
- {'image': 'cirros'})
+ mock_template.assert_called_with(
+ 'foo-12345678', template_file='/bar/baz/some-heat-file',
+ heat_parameters={'image': 'cirros'},
+ os_cloud_config=self.test_context._flags.os_cloud_config)
self.assertIsNotNone(self.test_context.stack)
key_filename = ''.join(
[consts.YARDSTICK_ROOT_PATH,
'yardstick/resources/files/yardstick_key-',
self.test_context._name_task_id])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_once_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
@mock.patch.object(heat, 'HeatTemplate')
@mock.patch.object(os.path, 'exists', return_value=False)
@@ -280,7 +281,7 @@ class HeatContextTestCase(unittest.TestCase):
'yardstick/resources/files/yardstick_key-',
self.test_context._name])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_once_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
@mock.patch.object(heat, 'HeatTemplate')
@mock.patch.object(os.path, 'exists', return_value=False)
@@ -296,7 +297,6 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context._flags.no_setup = True
self.test_context.template_file = '/bar/baz/some-heat-file'
self.test_context.get_neutron_info = mock.MagicMock()
-
self.test_context.deploy()
mock_retrieve_stack.assert_called_once_with(self.test_context._name)
@@ -334,7 +334,7 @@ class HeatContextTestCase(unittest.TestCase):
'yardstick/resources/files/yardstick_key-',
self.test_context._name_task_id])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
mock_call_gen_keys = mock.call.gen_keys(key_filename)
mock_call_add_resources = (
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index 45840d569..d1172d5a6 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -14,7 +14,8 @@ from yardstick.benchmark.scenarios.availability import scenario_general
class ScenarioGeneralTestCase(unittest.TestCase):
- def setUp(self):
+ @mock.patch.object(scenario_general, 'Director')
+ def setUp(self, *args):
self.scenario_cfg = {
'type': "general_scenario",
'options': {
@@ -36,32 +37,28 @@ class ScenarioGeneralTestCase(unittest.TestCase):
}
}
self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
-
- self._mock_director = mock.patch.object(scenario_general, 'Director')
- self.mock_director = self._mock_director.start()
- self.addCleanup(self._stop_mock)
-
- def _stop_mock(self):
- self._mock_director.stop()
+ self.instance.setup()
+ self.instance.director.verify.return_value = True
def test_scenario_general_all_successful(self):
- self.instance.setup()
- self.instance.run({})
+
+ ret = {}
+ self.instance.run(ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 1)
def test_scenario_general_exception(self):
- mock_obj = mock.Mock()
- mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
- self.instance.director = mock_obj
+ self.instance.director.createActionPlayer.side_effect = KeyError('Wrong')
self.instance.director.data = {}
- self.instance.run({})
+ ret = {}
+ self.instance.run(ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 1)
def test_scenario_general_case_fail(self):
- mock_obj = mock.Mock()
- mock_obj.verify.return_value = False
- self.instance.director = mock_obj
+ self.instance.director.verify.return_value = False
self.instance.director.data = {}
- self.instance.run({})
- self.instance.pass_flag = True
+ ret = {}
+ self.assertRaises(AssertionError, self.instance.run, ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 6bb3ec63b..dd656fbd5 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -60,15 +60,16 @@ class ServicehaTestCase(unittest.TestCase):
p.setup()
self.assertTrue(p.setup_done)
- # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
- # p = serviceha.ServiceHA(self.args, self.ctx)
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_run_sla_error(self, mock_monitor, *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
- # p.setup()
- # self.assertEqual(p.setup_done, True)
+ p.setup()
+ self.assertEqual(p.setup_done, True)
- # result = {}
- # result["outage_time"] = 10
- # mock_monitor.Monitor().get_result.return_value = result
+ mock_monitor.MonitorMgr().verify_SLA.return_value = False
- # ret = {}
- # self.assertRaises(AssertionError, p.run, ret)
+ ret = {}
+ self.assertRaises(AssertionError, p.run, ret)
+ self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
index 2964ecc14..bb7fa4536 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
@@ -6,21 +6,51 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import attach_volume
class AttachVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.attach_server_volume')
- def test_attach_volume(self, mock_attach_server_volume):
- options = {
- 'volume_id': '123-456-000',
- 'server_id': '000-123-456'
- }
- args = {"options": options}
- obj = AttachVolume(args, {})
- obj.run({})
- mock_attach_server_volume.assert_called_once()
+ def setUp(self):
+
+ self._mock_attach_volume_to_server = mock.patch.object(
+ openstack_utils, 'attach_volume_to_server')
+ self.mock_attach_volume_to_server = (
+ self._mock_attach_volume_to_server.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(attach_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'server_name_or_id': _uuid,
+ 'volume_name_or_id': _uuid}}
+ self.result = {}
+ self.addCleanup(self._stop_mock)
+ self.attachvol_obj = attach_volume.AttachVolume(self.args, mock.ANY)
+
+ def _stop_mock(self):
+ self._mock_attach_volume_to_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_attach_volume_to_server.return_value = True
+ self.assertIsNone(self.attachvol_obj.run(self.result))
+ self.assertEqual({'attach_volume': 1}, self.result)
+ self.mock_log.info.asset_called_once_with(
+ 'Attach volume to server successful!')
+
+ def test_run_fail(self):
+ self.mock_attach_volume_to_server.return_value = False
+ with self.assertRaises(exceptions.ScenarioAttachVolumeError):
+ self.attachvol_obj.run(self.result)
+ self.assertEqual({'attach_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Attach volume to server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
index 1c3d6cebc..a7b683f47 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
@@ -6,22 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-import mock
+from oslo_utils import uuidutils
import unittest
+import mock
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios.lib import create_keypair
class CreateKeypairTestCase(unittest.TestCase):
- @mock.patch.object(create_keypair, 'paramiko')
- @mock.patch.object(create_keypair, 'op_utils')
- def test_create_keypair(self, mock_op_utils, *args):
- options = {
- 'key_name': 'yardstick_key',
- 'key_path': '/tmp/yardstick_key'
- }
- args = {"options": options}
- obj = create_keypair.CreateKeypair(args, {})
- obj.run({})
- mock_op_utils.create_keypair.assert_called_once()
+
+ def setUp(self):
+
+ self._mock_create_keypair = mock.patch.object(
+ openstack_utils, 'create_keypair')
+ self.mock_create_keypair = (
+ self._mock_create_keypair.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_keypair, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'key_name': 'yardstick_key'}}
+ self.result = {}
+
+ self.ckeypair_obj = create_keypair.CreateKeypair(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_create_keypair.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.ckeypair_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_keypair.return_value = {
+ 'name': 'key-name', 'type': 'ssh', 'id': _uuid}
+ output = self.ckeypair_obj.run(self.result)
+ self.assertDictEqual({'keypair_create': 1}, self.result)
+ self.assertDictEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create keypair successful!')
+
+ def test_run_fail(self):
+ self.mock_create_keypair.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateKeypairError):
+ self.ckeypair_obj.run(self.result)
+ self.assertDictEqual({'keypair_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create keypair failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
index 9d6d8cb1b..b58785112 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
@@ -6,29 +6,54 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.create_server import CreateServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_server
class CreateServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.create_instance_and_wait_for_active')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- def test_create_server(self, mock_get_nova_client, mock_get_neutron_client,
- mock_get_glance_client, mock_create_instance_and_wait_for_active):
- scenario_cfg = {
- 'options': {
- 'openstack_paras': 'example'
- },
- 'output': 'server'
- }
- obj = CreateServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_get_glance_client.assert_called_once()
- mock_get_neutron_client.assert_called_once()
- mock_create_instance_and_wait_for_active.assert_called_once()
+ def setUp(self):
+
+ self._mock_create_instance_and_wait_for_active = mock.patch.object(
+ openstack_utils, 'create_instance_and_wait_for_active')
+ self.mock_create_instance_and_wait_for_active = (
+ self._mock_create_instance_and_wait_for_active.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {
+ 'options': {'name': 'server-name', 'image': 'image-name',
+ 'flavor': 'flavor-name'}}
+ self.result = {}
+
+ self.addCleanup(self._stop_mock)
+ self.cserver_obj = create_server.CreateServer(self.args, mock.ANY)
+
+ def _stop_mock(self):
+ self._mock_create_instance_and_wait_for_active.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cserver_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_instance_and_wait_for_active.return_value = (
+ {'name': 'server-name', 'flavor': 'flavor-name', 'id': _uuid})
+ output = self.cserver_obj.run(self.result)
+ self.assertEqual({'instance_create': 1}, self.result)
+ self.assertEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create server successful!')
+
+ def test_run_fail(self):
+ self.mock_create_instance_and_wait_for_active.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateServerError):
+ self.cserver_obj.run(self.result)
+ self.assertEqual({'instance_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
index 6e790ba90..c7940251e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
@@ -9,19 +9,43 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_keypair
class DeleteKeypairTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- @mock.patch('yardstick.common.openstack_utils.delete_keypair')
- def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair):
- options = {
- 'key_name': 'yardstick_key'
- }
- args = {"options": options}
- obj = DeleteKeypair(args, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_delete_keypair.assert_called_once()
+ def setUp(self):
+ self._mock_delete_keypair = mock.patch.object(
+ openstack_utils, 'delete_keypair')
+ self.mock_delete_keypair = self._mock_delete_keypair.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_keypair, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'key_name': 'yardstick_key'}}
+ self.result = {}
+ self.delkey_obj = delete_keypair.DeleteKeypair(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_keypair.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_keypair.return_value = True
+ self.assertIsNone(self.delkey_obj.run(self.result))
+ self.assertEqual({'delete_keypair': 1}, self.result)
+ self.mock_log.info.assert_called_once_with(
+ 'Delete keypair successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_keypair.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteKeypairError):
+ self.delkey_obj.run(self.result)
+ self.assertEqual({'delete_keypair': 0}, self.result)
+ self.mock_log.error.assert_called_once_with("Delete keypair failed!")
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
index eee565de7..55fe53df8 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
@@ -6,22 +6,49 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_server import DeleteServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_server
class DeleteServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.delete_instance')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_delete_server(self, mock_get_nova_client, mock_delete_instance):
- options = {
- 'server_id': '1234-4567-0000'
- }
- args = {"options": options}
- obj = DeleteServer(args, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_delete_instance.assert_called_once()
+ def setUp(self):
+ self._mock_delete_instance = mock.patch.object(
+ openstack_utils, 'delete_instance')
+ self.mock_delete_instance = (
+ self._mock_delete_instance.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': uuidutils.generate_uuid()
+ }}
+ self.result = {}
+
+ self.delserver_obj = delete_server.DeleteServer(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_instance.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_instance.return_value = True
+ self.assertIsNone(self.delserver_obj.run(self.result))
+ self.assertEqual({'delete_server': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete server successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_instance.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteServerError):
+ self.delserver_obj.run(self.result)
+ self.assertEqual({'delete_server': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
index 15a6f7c8f..1c1364348 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
@@ -6,20 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_flavor import GetFlavor
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_flavor
class GetFlavorTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_flavor_by_name')
- def test_get_flavor(self, mock_get_flavor_by_name):
- options = {
- 'flavor_name': 'yardstick_test_flavor'
- }
- args = {"options": options}
- obj = GetFlavor(args, {})
- obj.run({})
- mock_get_flavor_by_name.assert_called_once()
+ def setUp(self):
+
+ self._mock_get_flavor = mock.patch.object(
+ openstack_utils, 'get_flavor')
+ self.mock_get_flavor = self._mock_get_flavor.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_flavor, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_flavor'}}
+ self.result = {}
+
+ self.getflavor_obj = get_flavor.GetFlavor(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_flavor.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getflavor_obj.scenario_cfg = {'output': 'flavor'}
+ self.mock_get_flavor.return_value = (
+ {'name': 'flavor-name', 'id': _uuid})
+ output = self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 1}, self.result)
+ self.assertDictEqual({'flavor': {'name': 'flavor-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get flavor successful!')
+
+ def test_run_fail(self):
+ self.mock_get_flavor.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetFlavorError):
+ self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get flavor failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
index 83ec903bc..5b5329cb0 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
@@ -6,37 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_server import GetServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_server
class GetServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_server_by_name')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_name(self, mock_get_nova_client, mock_get_server_by_name):
- scenario_cfg = {
- 'options': {
- 'server_name': 'yardstick_server'
- },
- 'output': 'status server'
- }
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_get_server_by_name.assert_called_once()
-
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_id(self, mock_get_nova_client):
- scenario_cfg = {
- 'options': {
- 'server_id': '1'
- },
- 'output': 'status server'
- }
- mock_get_nova_client().servers.get.return_value = None
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called()
+ def setUp(self):
+
+ self._mock_get_server = mock.patch.object(
+ openstack_utils, 'get_server')
+ self.mock_get_server = self._mock_get_server.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_key'}}
+ self.result = {}
+
+ self.getserver_obj = get_server.GetServer(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getserver_obj.scenario_cfg = {'output': 'server'}
+ self.mock_get_server.return_value = (
+ {'name': 'server-name', 'id': _uuid})
+ output = self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 1}, self.result)
+ self.assertDictEqual({'server': {'name': 'server-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get Server successful!')
+
+ def test_run_fail(self):
+ self.mock_get_server.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetServerError):
+ self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get Server failed!')
diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py
index f03f2516c..cc19d98b4 100644
--- a/yardstick/tests/unit/common/test_openstack_utils.py
+++ b/yardstick/tests/unit/common/test_openstack_utils.py
@@ -10,8 +10,10 @@
from oslo_utils import uuidutils
import unittest
import mock
-
+import shade
from shade import exc
+
+from yardstick.common import constants
from yardstick.common import openstack_utils
@@ -35,6 +37,29 @@ class GetHeatApiVersionTestCase(unittest.TestCase):
self.assertEqual(api_version, expected_result)
+class GetShadeClientTestCase(unittest.TestCase):
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client(self, mock_openstack_cloud):
+ os_cloud_config = {'param1': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ mock_openstack_cloud.reset_mock()
+ os_cloud_config = {'verify': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client_no_parameters(self, mock_openstack_cloud):
+ self.assertEqual('os_client', openstack_utils.get_shade_client())
+ mock_openstack_cloud.assert_called_once_with(
+ **constants.OS_CLOUD_DEFAULT_CONFIG)
+
+
class DeleteNeutronNetTestCase(unittest.TestCase):
def setUp(self):
@@ -337,3 +362,199 @@ class SecurityGroupTestCase(unittest.TestCase):
mock_create_security_group_rule.assert_called()
self.mock_shade_client.delete_security_group(self.sg_name)
self.assertEqual(self._uuid, output)
+
+# *********************************************
+# NOVA
+# *********************************************
+
+
+class CreateInstanceTestCase(unittest.TestCase):
+
+ def test_create_instance_and_wait_for_active(self):
+ self.mock_shade_client = mock.Mock()
+ name = 'server_name'
+ image = 'image_name'
+ flavor = 'flavor_name'
+ self.mock_shade_client.create_server.return_value = (
+ {'name': name, 'image': image, 'flavor': flavor})
+ output = openstack_utils.create_instance_and_wait_for_active(
+ self.mock_shade_client, name, image, flavor)
+ self.assertEqual(
+ {'name': name, 'image': image, 'flavor': flavor}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_instance_and_wait_for_active_fail(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.create_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_instance_and_wait_for_active(
+ self.mock_shade_client, 'server_name', 'image_name', 'flavor_name')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteInstanceTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_instance(self):
+ self.mock_shade_client.delete_server.return_value = True
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ self.assertTrue(output)
+
+ def test_delete_instance_fail(self):
+ self.mock_shade_client.delete_server.return_value = False
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_instance_exception(self, mock_logger):
+ self.mock_shade_client.delete_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class CreateKeypairTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.name = 'key_name'
+
+ def test_create_keypair(self):
+ self.mock_shade_client.create_keypair.return_value = (
+ {'name': 'key-name', 'type': 'ssh'})
+ output = openstack_utils.create_keypair(
+ self.mock_shade_client, self.name)
+ self.assertEqual(
+ {'name': 'key-name', 'type': 'ssh'},
+ output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_keypair_fail(self, mock_logger):
+ self.mock_shade_client.create_keypair.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_keypair(
+ self.mock_shade_client, self.name)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteKeypairTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_keypair(self):
+ self.mock_shade_client.delete_keypair.return_value = True
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ self.assertTrue(output)
+
+ def test_delete_keypair_fail(self):
+ self.mock_shade_client.delete_keypair.return_value = False
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_keypair_exception(self, mock_logger):
+ self.mock_shade_client.delete_keypair.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class AttachVolumeToServerTestCase(unittest.TestCase):
+
+ def test_attach_volume_to_server(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ self.mock_shade_client.attach_volume.return_value = True
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ self.assertTrue(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_attach_volume_to_server_fail(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.attach_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class GetServerTestCase(unittest.TestCase):
+
+ def test_get_server(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_server.return_value = {
+ 'name': 'server_name', 'id': _uuid}
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ self.assertEqual({'name': 'server_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_server_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class GetFlavorTestCase(unittest.TestCase):
+
+ def test_get_flavor(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_flavor.return_value = {
+ 'name': 'flavor_name', 'id': _uuid}
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ self.assertEqual({'name': 'flavor_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_flavor_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_flavor.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+# *********************************************
+# CINDER
+# *********************************************
+
+
+class GetVolumeIDTestCase(unittest.TestCase):
+
+ def test_get_volume_id(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_volume_id.return_value = _uuid
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertEqual(_uuid, output)
+
+ def test_get_volume_id_None(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_volume_id.return_value = None
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertIsNone(output)
diff --git a/yardstick/tests/unit/common/test_packages.py b/yardstick/tests/unit/common/test_packages.py
new file mode 100644
index 000000000..ba59a3015
--- /dev/null
+++ b/yardstick/tests/unit/common/test_packages.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from pip import exceptions as pip_exceptions
+from pip.operations import freeze
+import unittest
+
+from yardstick.common import packages
+
+
+class PipExecuteActionTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_pip_main = mock.patch.object(packages, '_pip_main')
+ self.mock_pip_main = self._mock_pip_main.start()
+ self.mock_pip_main.return_value = 0
+ self._mock_freeze = mock.patch.object(freeze, 'freeze')
+ self.mock_freeze = self._mock_freeze.start()
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_pip_main.stop()
+ self._mock_freeze.stop()
+
+ def test_pip_execute_action(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package'))
+
+ def test_remove(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package',
+ action='uninstall'))
+
+ def test_install(self):
+ self.assertEqual(0, packages._pip_execute_action(
+ 'test_package', action='install', target='temp_dir'))
+
+ def test_pip_execute_action_error(self):
+ self.mock_pip_main.return_value = 1
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_execute_action_exception(self):
+ self.mock_pip_main.side_effect = pip_exceptions.PipError
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_list(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ pkg_dict = {
+ 'XStatic-Rickshaw': '1.5.0.0',
+ 'xvfbwrapper': '0.2.9',
+ 'yardstick': '50773a24afc02c9652b662ecca2fc5621ea6097a',
+ 'zope.interface': '4.4.3'
+ }
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list()
+ for pkg_name, pkg_version in pkg_output.items():
+ self.assertEqual(pkg_dict.get(pkg_name), pkg_version)
+
+ def test_pip_list_single_package(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list(pkg_name='xvfbwrapper')
+ self.assertEqual(1, len(pkg_output))
+ self.assertEqual(pkg_output.get('xvfbwrapper'), '0.2.9')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 9540a39e8..666b29b5f 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -16,13 +16,15 @@ import mock
import os
import six
from six.moves import configparser
+import time
import unittest
import yardstick
from yardstick import ssh
import yardstick.error
-from yardstick.common import utils
from yardstick.common import constants
+from yardstick.common import utils
+from yardstick.common import exceptions
class IterSubclassesTestCase(unittest.TestCase):
@@ -1158,3 +1160,43 @@ class ReadMeminfoTestCase(unittest.TestCase):
output = utils.read_meminfo(ssh_client)
mock_get_client.assert_called_once_with('/proc/meminfo', mock.ANY)
self.assertEqual(self.MEMINFO_DICT, output)
+
+
+class TimerTestCase(unittest.TestCase):
+
+ def test__getattr(self):
+ with utils.Timer() as timer:
+ time.sleep(1)
+ self.assertEqual(1, round(timer.total_seconds(), 0))
+ self.assertEqual(1, timer.delta.seconds)
+
+ def test__enter_with_timeout(self):
+ with utils.Timer(timeout=10) as timer:
+ time.sleep(1)
+ self.assertEqual(1, round(timer.total_seconds(), 0))
+
+ def test__enter_with_timeout_exception(self):
+ with self.assertRaises(exceptions.TimerTimeout):
+ with utils.Timer(timeout=1):
+ time.sleep(2)
+
+
+class WaitUntilTrueTestCase(unittest.TestCase):
+
+ def test_no_timeout(self):
+ self.assertIsNone(utils.wait_until_true(lambda: True,
+ timeout=1, sleep=1))
+
+ def test_timeout_generic_exception(self):
+ with self.assertRaises(exceptions.WaitTimeout):
+ self.assertIsNone(utils.wait_until_true(lambda: False,
+ timeout=1, sleep=1))
+
+ def test_timeout_given_exception(self):
+ class MyTimeoutException(exceptions.YardstickException):
+ message = 'My timeout exception'
+
+ with self.assertRaises(MyTimeoutException):
+ self.assertIsNone(
+ utils.wait_until_true(lambda: False, timeout=1, sleep=1,
+ exception=MyTimeoutException))
diff --git a/tests/unit/network_services/collector/__init__.py b/yardstick/tests/unit/network_services/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/collector/__init__.py
+++ b/yardstick/tests/unit/network_services/__init__.py
diff --git a/tests/unit/network_services/libs/__init__.py b/yardstick/tests/unit/network_services/collector/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/libs/__init__.py
+++ b/yardstick/tests/unit/network_services/collector/__init__.py
diff --git a/tests/unit/network_services/collector/test_publisher.py b/yardstick/tests/unit/network_services/collector/test_publisher.py
index 4a175841d..145441ddd 100644
--- a/tests/unit/network_services/collector/test_publisher.py
+++ b/yardstick/tests/unit/network_services/collector/test_publisher.py
@@ -13,9 +13,6 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.collector.publisher
-
-from __future__ import absolute_import
import unittest
from yardstick.network_services.collector import publisher
diff --git a/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py
index d4b4ecf7a..14e26f7fe 100644
--- a/tests/unit/network_services/collector/test_subscriber.py
+++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py
@@ -13,13 +13,11 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.collector.subscriber
-
-from __future__ import absolute_import
import unittest
import mock
from yardstick.network_services.collector import subscriber
+from yardstick import ssh
class MockVnfAprrox(object):
@@ -40,57 +38,41 @@ class MockVnfAprrox(object):
class CollectorTestCase(unittest.TestCase):
- NODES = {
- 'node1': {},
- 'node2': {
- 'ip': '1.2.3.4',
- 'collectd': {
- 'plugins': {'abc': 12, 'def': 34},
- 'interval': 987,
- },
- },
- }
- TRAFFIC_PROFILE = {
- 'key1': 'value1',
- }
-
def setUp(self):
vnf = MockVnfAprrox()
- self.ssh_patch = mock.patch('yardstick.network_services.nfvi.resource.ssh', autospec=True)
+ vnf.start_collect = mock.Mock()
+ vnf.stop_collect = mock.Mock()
+ self.ssh_patch = mock.patch.object(ssh, 'AutoConnectSSH')
mock_ssh = self.ssh_patch.start()
mock_instance = mock.Mock()
mock_instance.execute.return_value = 0, '', ''
- mock_ssh.AutoConnectSSH.from_node.return_value = mock_instance
- self.collector = subscriber.Collector([vnf], self.NODES, self.TRAFFIC_PROFILE, 1800)
+ mock_ssh.from_node.return_value = mock_instance
+ self.collector = subscriber.Collector([vnf])
def tearDown(self):
self.ssh_patch.stop()
def test___init__(self, *_):
vnf = MockVnfAprrox()
- collector = subscriber.Collector([vnf], {}, {})
+ collector = subscriber.Collector([vnf])
self.assertEqual(len(collector.vnfs), 1)
- self.assertEqual(collector.traffic_profile, {})
-
- def test___init___with_data(self, *_):
- self.assertEqual(len(self.collector.vnfs), 1)
- self.assertDictEqual(self.collector.traffic_profile, self.TRAFFIC_PROFILE)
- self.assertEqual(len(self.collector.resource_profiles), 1)
-
- def test___init___negative(self, *_):
- pass
def test_start(self, *_):
self.assertIsNone(self.collector.start())
+ for vnf in self.collector.vnfs:
+ vnf.start_collect.assert_called_once()
def test_stop(self, *_):
self.assertIsNone(self.collector.stop())
+ for vnf in self.collector.vnfs:
+ vnf.stop_collect.assert_called_once()
def test_get_kpi(self, *_):
result = self.collector.get_kpi()
+ self.assertEqual(1, len(result))
+ self.assertEqual(4, len(result["vnf__1"]))
self.assertEqual(result["vnf__1"]["pkt_in_up_stream"], 100)
self.assertEqual(result["vnf__1"]["pkt_drop_up_stream"], 5)
self.assertEqual(result["vnf__1"]["pkt_in_down_stream"], 50)
self.assertEqual(result["vnf__1"]["pkt_drop_down_stream"], 40)
- self.assertIn('node2', result)
diff --git a/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/__init__.py
index e69de29bb..e69de29bb 100644
--- a/tests/unit/network_services/libs/ixia_libs/__init__.py
+++ b/yardstick/tests/unit/network_services/libs/__init__.py
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
diff --git a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
index 2a97048aa..7ca2f0f19 100644
--- a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py
@@ -13,34 +13,38 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.libs.ixia_libs.IxNet
-
-from __future__ import absolute_import
-import unittest
import mock
+import IxNetwork
+import unittest
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IxNextgen
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_4
from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_6
-
UPLINK = "uplink"
DOWNLINK = "downlink"
+
class TestIxNextgen(unittest.TestCase):
def test___init__(self):
ixnet_gen = IxNextgen()
self.assertIsNone(ixnet_gen._bidir)
- @mock.patch("yardstick.network_services.libs.ixia_libs.IxNet.IxNet.sys")
- def test_connect(self, *args):
-
+ @mock.patch.object(IxNetwork, 'IxNet')
+ def test_connect(self, mock_ixnet):
+ ixnet_instance = mock.Mock()
+ mock_ixnet.return_value = ixnet_instance
ixnet_gen = IxNextgen()
- ixnet_gen.get_config = mock.MagicMock()
- ixnet_gen.get_ixnet = mock.MagicMock()
+ with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ mock_config.return_value = {'machine': 'machine_fake',
+ 'port': 'port_fake',
+ 'version': 12345}
+ ixnet_gen._connect(mock.ANY)
- self.assertRaises(ImportError, ixnet_gen._connect, {"py_lib_path": "/tmp"})
+ ixnet_instance.connect.assert_called_once_with(
+ 'machine_fake', '-port', 'port_fake', '-version', '12345')
+ mock_config.assert_called_once()
def test_clear_ixia_config(self):
ixnet = mock.MagicMock()
@@ -628,11 +632,9 @@ class TestIxNextgen(unittest.TestCase):
def test_set_random_ip_multi_attributes_bad_ip_version(self):
bad_ip_version = object()
ixnet_gen = IxNextgen(mock.Mock())
- mock1 = mock.Mock()
- mock2 = mock.Mock()
- mock3 = mock.Mock()
with self.assertRaises(ValueError):
- ixnet_gen.set_random_ip_multi_attributes(mock1, bad_ip_version, mock2, mock3)
+ ixnet_gen.set_random_ip_multi_attributes(
+ mock.Mock(), bad_ip_version, mock.Mock(), mock.Mock())
def test_get_config(self):
tg_cfg = {
@@ -659,13 +661,11 @@ class TestIxNextgen(unittest.TestCase):
"version": "test3",
"ixchassis": "test4",
"tcl_port": "test5",
- "py_lib_path": "test6",
},
}
}
expected = {
- 'py_lib_path': 'test6',
'machine': 'test1',
'port': 'test5',
'chassis': 'test4',
diff --git a/tests/unit/network_services/test_utils.py b/yardstick/tests/unit/network_services/test_utils.py
index bf98a4474..2b2eb7109 100644
--- a/tests/unit/network_services/test_utils.py
+++ b/yardstick/tests/unit/network_services/test_utils.py
@@ -13,8 +13,6 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.utils
-
import os
import unittest
import mock
diff --git a/tests/unit/network_services/test_yang_model.py b/yardstick/tests/unit/network_services/test_yang_model.py
index 0b29da701..a7eb36b8a 100644
--- a/tests/unit/network_services/test_yang_model.py
+++ b/yardstick/tests/unit/network_services/test_yang_model.py
@@ -13,14 +13,8 @@
# limitations under the License.
#
-# Unittest for yardstick.network_services.utils
-
-from __future__ import absolute_import
-
-import unittest
import mock
-
-import yaml
+import unittest
from yardstick.network_services.yang_model import YangModel
@@ -95,9 +89,9 @@ class YangModelTestCase(unittest.TestCase):
y._get_entries()
self.assertEqual(y._rules, '')
- @mock.patch('yardstick.network_services.yang_model.yaml_load')
@mock.patch('yardstick.network_services.yang_model.open')
- def test__read_config(self, mock_open, mock_safe_load):
+ @mock.patch('yardstick.network_services.yang_model.yaml_load')
+ def test__read_config(self, mock_safe_load, *args):
cfg = "yang.yaml"
y = YangModel(cfg)
mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'}
diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py
index 9598eeb04..3ec59a3c2 100644
--- a/yardstick/tests/unit/orchestrator/test_heat.py
+++ b/yardstick/tests/unit/orchestrator/test_heat.py
@@ -17,6 +17,7 @@ import shade
import unittest
from yardstick.benchmark.contexts import node
+from yardstick.common import constants
from yardstick.common import exceptions
from yardstick.orchestrator import heat
@@ -53,6 +54,14 @@ class HeatStackTestCase(unittest.TestCase):
self._mock_stack_get.stop()
heat._DEPLOYED_STACKS = {}
+ @mock.patch.object(shade, 'openstack_cloud')
+ def test__init(self, mock_openstack_cloud):
+ os_cloud_config = {'key': 'value'}
+ heatstack = heat.HeatStack('name', os_cloud_config=os_cloud_config)
+ self.assertEqual('name', heatstack.name)
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
def test_create(self):
template = {'tkey': 'tval'}
heat_parameters = {'pkey': 'pval'}
@@ -192,7 +201,9 @@ class HeatStackTestCase(unittest.TestCase):
class HeatTemplateTestCase(unittest.TestCase):
def setUp(self):
- self.template = heat.HeatTemplate('test')
+ self._os_cloud_config = {'key1': 'value1'}
+ self.template = heat.HeatTemplate(
+ 'test', os_cloud_config=self._os_cloud_config)
def test_add_tenant_network(self):
self.template.add_network('some-network')
@@ -337,8 +348,12 @@ class HeatTemplateTestCase(unittest.TestCase):
def test_create_not_block(self):
heat_stack = mock.Mock()
- with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack) \
+ as mock_heatstack:
ret = self.template.create(block=False)
+
+ mock_heatstack.assert_called_once_with(
+ self.template.name, os_cloud_config=self.template._os_cloud_config)
heat_stack.create.assert_called_once_with(
self.template._template, self.template.heat_parameters, False,
3600)