diff options
108 files changed, 3588 insertions, 1395 deletions
diff --git a/ansible/build_yardstick_image.yml b/ansible/build_yardstick_image.yml index c926af208..072c12c66 100644 --- a/ansible/build_yardstick_image.yml +++ b/ansible/build_yardstick_image.yml @@ -76,18 +76,17 @@ - name: Debug dump loop devices command: losetup -a - register: losetup_output ignore_errors: true - - debug: - var: losetup_output - verbosity: 2 - - name: delete loop devices for image file # use this because kpartx -dv will fail if raw_imgfile was delete # but in theory we could have deleted file still attached to loopback device? # use grep because of // and awk - shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile_basename }}" | awk '{ print $1 }' | xargs -l1 losetup -d + shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile_basename }}" | awk '{ print $1 }' | xargs -l1 losetup -v -d + ignore_errors: true + + - name: Debug dump loop devices again + command: losetup -a ignore_errors: true - name: delete {{ raw_imgfile }} diff --git a/ansible/infra_deploy.yml b/ansible/infra_deploy.yml index 4ad21af00..8cf5dffef 100644 --- a/ansible/infra_deploy.yml +++ b/ansible/infra_deploy.yml @@ -13,9 +13,37 @@ # limitations under the License. --- - hosts: jumphost + vars: + rs_file: "{{ RS_FILE }}" + clean_up: "{{ CLEAN_UP | default(False) }}" # If True will be delete all VMs, networks, disk images + + tasks: + - set_fact: + proxy_host: "{{ lookup('env', 'http_proxy') | urlsplit('hostname') }}" + proxy_proto: "{{ lookup('env', 'http_proxy') | urlsplit('scheme') }}" + proxy_port: "{{ lookup('env', 'http_proxy') | urlsplit('port') }}" + + - set_fact: + proxy_host_ip: "{{ lookup('dig', proxy_host) }}" roles: - - infra_check_requirements - infra_destroy_previous_configuration + - infra_check_requirements - infra_create_network - infra_create_vms + - infra_prepare_vms + +- hosts: deploy,regular,yardstickG + gather_facts: no + become: yes + + roles: + - infra_rampup_stack_nodes + + +- hosts: deploy + become: yes + environment: "{{ proxy_env }}" + + roles: + - infra_deploy_openstack diff --git a/ansible/install.yaml b/ansible/install.yaml new file mode 100644 index 000000000..afffbede2 --- /dev/null +++ b/ansible/install.yaml @@ -0,0 +1,42 @@ +--- +- hosts: localhost + + vars: + arch_amd64: "amd64" + arch_arm64: "arm64" + inst_mode_container: "container" + inst_mode_baremetal: "baremetal" + ubuntu_archive: + amd64: "http://archive.ubuntu.com/ubuntu/" + arm64: "http://ports.ubuntu.com/ubuntu-ports/" + installation_mode: "{{ INSTALLATION_MODE | default('baremetal') }}" + yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}" + virtual_environment: "{{ VIRTUAL_ENVIRONMENT | default(False) }}" + nsb_dir: "{{ NSB_DIR | default('/opt/nsb_bin/') }}" + + pre_tasks: + + - name: Create NSB binaries directory, accesible to any user + file: + path: "{{ nsb_dir }}" + state: directory + owner: root + mode: 0777 + + roles: + - add_repos_jumphost + - install_dependencies_jumphost + - install_yardstick + - configure_uwsgi + - configure_nginx + - download_trex + - install_trex + - configure_rabbitmq + + post_tasks: + + - service: + name: nginx + state: restarted + + - shell: uwsgi -i /etc/yardstick/yardstick.ini diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml index 98a59f984..0149054ae 100644 --- a/ansible/nsb_setup.yml +++ b/ansible/nsb_setup.yml @@ -22,7 +22,7 @@ environment: "{{ proxy_env }}" roles: - - install_dependencies + - install_dependencies_jumphost - docker - name: "handle all openstack stuff when: openrc_file is defined" diff --git a/ansible/roles/add_repos_jumphost/tasks/Debian.yml b/ansible/roles/add_repos_jumphost/tasks/Debian.yml new file mode 100644 index 000000000..626f0b037 --- /dev/null +++ b/ansible/roles/add_repos_jumphost/tasks/Debian.yml @@ -0,0 +1,81 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# Arguments needed: arch_arm64, arch_amd64, ubuntu_archive + +- name: Set the repositories architecture name + set_fact: + arch: "{{ arch_arm64 if ansible_architecture == 'aarch64' else arch_amd64 }}" + extra_arch: "{{ arch_amd64 if ansible_architecture == 'aarch64' else arch_arm64 }}" + +- name: Define the repositories names + set_fact: + repo: "{{ ubuntu_archive[arch] }}" + extra_repo: "{{ ubuntu_archive[extra_arch] }}" + +- name: Add architecture to the default repository list + replace: + path: "{{ sources_list_file }}" + regexp: '(^deb\s+)([^\[].*)$' + replace: 'deb [arch={{ arch }}] \2' + +- name: Remove support for source repositories + replace: + path: "{{ sources_list_file }}" + regexp: "^deb-src " + replace: "# deb-src " + +- name: Add extra architecture + command: "dpkg --add-architecture {{ extra_arch }}" + +- name: Define the default release version + copy: + dest: "{{ default_distro_file }}" + content: 'APT::Default-Release "{{ ansible_distribution_release }}";' + +- name: Remove extra repository file + file: + path: "{{ repo_file }}" + state: absent + ignore_errors: yes + +- name: Add extra repository file + file: + path: "{{ repo_file }}" + state: touch + +- name: Add the repository for qemu_static_user/xenial + blockinfile: + path: "{{ repo_file }}" + marker: "MARKER" + content: | + deb [arch={{ arch }}] {{ repo }} xenial-updates universe + when: ansible_distribution_release != "xenial" + +- name: Add extra architecture repositories if installing in container + blockinfile: + path: "{{ repo_file }}" + marker: "MARKER" + content: | + deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }} main universe multiverse restricted + deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-updates main universe multiverse restricted + deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-security main universe multiverse restricted + deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-proposed main universe multiverse restricted + when: installation_mode == "container" + +- name: Remove the marker + lineinfile: + dest: "{{ repo_file }}" + state: absent + regexp: "MARKER" diff --git a/ansible/install_dependencies.yml b/ansible/roles/add_repos_jumphost/tasks/main.yml index 1c7d20170..f50fd9f0d 100644 --- a/ansible/install_dependencies.yml +++ b/ansible/roles/add_repos_jumphost/tasks/main.yml @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation. +# Copyright (c) 2018 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- name: install yardstick dependencies - hosts: all - - roles: - - install_dependencies +- include: "{{ ansible_os_family }}.yml" + when: ansible_os_family == "Debian" diff --git a/ansible/roles/add_repos_jumphost/vars/main.yml b/ansible/roles/add_repos_jumphost/vars/main.yml new file mode 100644 index 000000000..30e444711 --- /dev/null +++ b/ansible/roles/add_repos_jumphost/vars/main.yml @@ -0,0 +1,17 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +repo_file: "/etc/apt/sources.list.d/yardstick.list" +sources_list_file: "/etc/apt/sources.list" +default_distro_file: "/etc/apt/apt.conf.d/default-distro" diff --git a/ansible/roles/configure_gui/tasks/main.yml b/ansible/roles/configure_gui/tasks/main.yml new file mode 100644 index 000000000..846a9cb47 --- /dev/null +++ b/ansible/roles/configure_gui/tasks/main.yml @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Define variables + set_fact: + gui_dir: "{{ yardstick_dir }}/gui/" + +- name: Run gui.sh + shell: + cmd: /bin/bash gui.sh + chdir: "{{ gui_dir }}" + +- name: Create nginx/yardstick directory + file: + path: /etc/nginx/yardstick + state: directory + recurse: yes + +- name: Move dist to /etc/nginx/yardstick/gui + shell: + cmd: mv dist /etc/nginx/yardstick/gui + chdir: "{{ gui_dir }}" diff --git a/ansible/roles/configure_nginx/tasks/main.yml b/ansible/roles/configure_nginx/tasks/main.yml new file mode 100644 index 000000000..37b052725 --- /dev/null +++ b/ansible/roles/configure_nginx/tasks/main.yml @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Define variables + set_fact: + socket_file: "{{ socket_file|default('/var/run/yardstick.sock') }}" + +- name: Make sure conf.d directory exists + file: + path: /etc/nginx/conf.d + state: directory + +- name: Create the nginx config file + template: + src: yardstick.conf.j2 + dest: "/etc/nginx/conf.d/yardstick.conf" + +- name: Configure ports if RedHat OS + shell: | + semanage port -m -t http_port_t -p tcp 5000 + semanage port -m -t http_port_t -p udp 5000 + when: ansible_os_family == "RedHat"
\ No newline at end of file diff --git a/ansible/roles/configure_nginx/templates/yardstick.conf.j2 b/ansible/roles/configure_nginx/templates/yardstick.conf.j2 new file mode 100644 index 000000000..484096cec --- /dev/null +++ b/ansible/roles/configure_nginx/templates/yardstick.conf.j2 @@ -0,0 +1,18 @@ +server { + listen 5000; + server_name localhost; + index index.htm index.html; + location / { + include uwsgi_params; + client_max_body_size 2000m; + uwsgi_pass unix://{{ socket_file }}; + } + + location /gui/ { + alias /etc/nginx/yardstick/gui/; + } + + location /report/ { + alias /tmp/; + } +} diff --git a/ansible/roles/configure_rabbitmq/tasks/main.yml b/ansible/roles/configure_rabbitmq/tasks/main.yml new file mode 100644 index 000000000..3ad60c1ea --- /dev/null +++ b/ansible/roles/configure_rabbitmq/tasks/main.yml @@ -0,0 +1,30 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Restart rabbitmq + service: + name: rabbitmq-server + state: restarted + +- name: rabbitmqctl start_app + shell: rabbitmqctl start_app + +- name: Configure rabbitmq + rabbitmq_user: + user: yardstick + password: yardstick + configure_priv: .* + read_priv: .* + write_priv: .* + state: present diff --git a/ansible/roles/configure_uwsgi/tasks/main.yml b/ansible/roles/configure_uwsgi/tasks/main.yml new file mode 100644 index 000000000..6a2244657 --- /dev/null +++ b/ansible/roles/configure_uwsgi/tasks/main.yml @@ -0,0 +1,45 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Define variables + set_fact: + config_dir: "/etc/yardstick/" + log_dir: "/var/log/yardstick/" + socket_file: "/var/run/yardstick.sock" + +- name: Create UWSGI config directory + file: + path: "/etc/yardstick" + state: directory + owner: root + mode: 0755 + +- name: Create API log directory + file: + path: "{{ log_dir }}" + state: directory + owner: root + mode: 0777 + +- name: Create the socket for communicating + file: + path: "{{ socket_file }}" + state: touch + owner: root + mode: 0644 + +- name: Create the UWSGI config file + template: + src: yardstick.ini.j2 + dest: "{{ config_dir }}yardstick.ini" diff --git a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 new file mode 100644 index 000000000..c049daf84 --- /dev/null +++ b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 @@ -0,0 +1,18 @@ +[uwsgi] +master = true +debug = true +chdir = {{ yardstick_dir }}api +module = server +plugins = python +processes = 10 +threads = 5 +async = true +max-requests = 5000 +chmod-socket = 666 +callable = app_wrapper +enable-threads = true +close-on-exec = 1 +daemonize = {{ log_dir }}uwsgi.log +socket = {{ socket_file }} +{# If virtual environment, we need to add: + virtualenv = <virtual_env> #}
\ No newline at end of file diff --git a/ansible/roles/docker/tasks/Debian.yml b/ansible/roles/docker/tasks/Debian.yml index cf4128774..7f998de45 100644 --- a/ansible/roles/docker/tasks/Debian.yml +++ b/ansible/roles/docker/tasks/Debian.yml @@ -12,15 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. --- - - name: add Ubuntu docker repo - apt_repository: repo='deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' state=present - - - name: ensure correct docker version - action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" - with_items: "{{ docker_packages[ansible_os_family] }}" - - - name: remove Ubuntu docker repo - apt_repository: - repo: 'deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' - state: absent - update_cache: no + - name: Install docker.io + action: "{{ ansible_pkg_mgr }} name=docker.io state=present force=yes" diff --git a/ansible/roles/docker/vars/main.yml b/ansible/roles/docker/vars/main.yml index 8b5077490..a735d523d 100644 --- a/ansible/roles/docker/vars/main.yml +++ b/ansible/roles/docker/vars/main.yml @@ -16,5 +16,3 @@ docker_project_url: https://yum.dockerproject.org docker_packages: "RedHat": - docker-engine-1.13.1 - "Debian": - - docker-engine=1.13.1* diff --git a/ansible/roles/download_trex/tasks/main.yml b/ansible/roles/download_trex/tasks/main.yml index baa964fd8..9df67d939 100644 --- a/ansible/roles/download_trex/tasks/main.yml +++ b/ansible/roles/download_trex/tasks/main.yml @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. --- +- name: make sure trex_dest exists + file: + path: "{{ trex_dest }}" + state: directory + - name: fetch Trex get_url: url: "{{ trex_url }}" diff --git a/ansible/roles/infra_check_requirements/tasks/main.yml b/ansible/roles/infra_check_requirements/tasks/main.yml index a11bc56a1..991bd7383 100644 --- a/ansible/roles/infra_check_requirements/tasks/main.yml +++ b/ansible/roles/infra_check_requirements/tasks/main.yml @@ -12,27 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. --- +- name: Reread system properties + setup: + - name: Include include_vars: - file: "{{rs_file}}" + file: "{{ rs_file }}" name: infra_deploy_vars - name: Store total CPU, RAM, Disk requested resources set_fact: - vcpu_t: "{{item.vcpus|int + vcpu_t|int}}" - vram_t: "{{item.ram|int + vram_t|int}}" - disk_t: "{{item.disk|int + disk_t|int}}" - with_items: "{{infra_deploy_vars.nodes}}" + vcpu_t: "{{ item.vcpus|int + vcpu_t | int }}" + vram_t: "{{ item.ram|int + vram_t | int }}" + disk_t: "{{ item.disk|int + disk_t | int }}" + with_items: "{{ infra_deploy_vars.nodes }}" - name: Fail if not enough RAM fail: msg: "Failed, not enough RAM, required: {{ vram_t }}, available {{ ansible_memory_mb.nocache.free }}" - when: ansible_memory_mb.nocache.free < vram_t|int + when: ansible_memory_mb.nocache.free < vram_t | int - name: Fail if not enough CPU fail: msg: "Failed, not enough CPU, required: {{ vcpu_t }}, available {{ ansible_processor_vcpus }}" - when: ansible_processor_vcpus < vcpu_t|int + when: ansible_processor_vcpus < vcpu_t | int - name: Define default network counter set_fact: @@ -40,20 +43,38 @@ - name: Increment counter for every default network detected set_fact: - num_default_network_detected: "{{ num_default_network_detected|int + 1 }}" + num_default_network_detected: "{{ num_default_network_detected | int + 1 }}" when: - item.default_gateway is defined - item.default_gateway == True - with_items: "{{infra_deploy_vars.networks}}" + with_items: "{{ infra_deploy_vars.networks }}" - name: Fail if more than 1 or 0 default networks fail: msg: "Failed, there must be 1 default network: {{ num_default_network_detected }} detected" - when: num_default_network_detected|int != 1 + when: num_default_network_detected | int != 1 - name: Fail if not enough Disk space set_fact: - disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024)|int }}{% endfor %}" + disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024) | int }}{% endfor %}" - fail: msg: "Failed, not enough disk space, required {{ disk_t }}, available: {{ disk_avail }}" - when: disk_avail|int < disk_t|int + when: disk_avail|int < disk_t | int + +- set_fact: + ostack_nodes: "{{ ostack_nodes | default([]) + [item.openstack_node] }}" + when: item.openstack_node is defined + with_items: "{{ infra_deploy_vars.nodes }}" + +# all-in-one node node type must be controller, multinode requires at least one controller and one compute node +- fail: + msg: "OpenStack node types currently supported: controller, compute. Check input VMs file." + when: ostack_nodes is undefined or ostack_nodes | length < 1 + +- fail: + msg: "In all-in-one configuration OpenStack node type must be controller." + when: ostack_nodes | length == 1 and 'controller' not in ostack_nodes + +- fail: + msg: "At least one controller and one compute node expected when total number of OpenStack nodes is more than one." + when: ostack_nodes | length > 1 and not ('compute' in ostack_nodes and 'controller' in ostack_nodes) diff --git a/ansible/roles/infra_create_vms/tasks/configure_vm.yml b/ansible/roles/infra_create_vms/tasks/configure_vm.yml index c20a0b175..a6a5e0618 100644 --- a/ansible/roles/infra_create_vms/tasks/configure_vm.yml +++ b/ansible/roles/infra_create_vms/tasks/configure_vm.yml @@ -47,8 +47,6 @@ output: all: ">> /var/log/cloud-init.log" ssh_pwauth: True - bootcmd: - - echo 127.0.0.1 {{ node_item.hostname }} >> /etc/hosts users: - name: {{ node_item.user }} lock-passwd: False diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml new file mode 100644 index 000000000..9713c0d1e --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml @@ -0,0 +1,40 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Modify globals.yml + replace: + path: /etc/kolla/globals.yml + regexp: "{{ item.find }}" + replace: "{{ item.insert_after }}" + with_items: + - { find: '^#kolla_base_distro:.*', insert_after: 'kolla_base_distro: "ubuntu"' } + - { find: '^#kolla_install_type:.*', insert_after: 'kolla_install_type: "source"' } + - { find: '^#openstack_release:.*', insert_after: 'openstack_release: "pike"' } + - { find: 'kolla_internal_vip_address:.*', insert_after: 'kolla_internal_vip_address: "{{ deployvm_ip }}"' } + - { find: '^#network_interface:.*', insert_after: 'network_interface: "{{ hostvars[ansible_host].ansible_default_ipv4.interface }}"' } + - { find: '^#neutron_external_interface:.*', insert_after: 'neutron_external_interface: "{{ neutron_iface }}"' } + - { find: '^#enable_haproxy:.*', insert_after: 'enable_haproxy: "no"'} + - { find: '^#enable_heat:.*' , insert_after: 'enable_heat: "yes"'} + - { find: '^#docker_registry:.*', insert_after: 'docker_registry: "{{ ansible_host }}:4000"' } + +- name: Generate multinode from inventory + template: + src: templates/multinode.j2 + dest: "{{ git_repos_path + 'multinode' }}" + +- set_fact: + path2multinode: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/multinode' }}" + +- name: Append rest groups to multinode file + shell: line=`grep -n '\[deployment\]' {{ path2multinode }} | cut -d ':' -f1` && tail -n +$line {{ path2multinode }} >> "{{ git_repos_path + 'multinode' }}" diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml new file mode 100644 index 000000000..3963cb64c --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml @@ -0,0 +1,67 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Create folders + file: + path: "{{ item }}" + state: directory + with_items: + - /etc/kolla/config/nova + - /etc/kolla/config/neutron + +- set_fact: + filter_ops: RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter + +- name: Configure OpenStack Nova + copy: + content: | + [filter_scheduler] + enabled_filters = {{ filter_ops }} + [libvirt] + cpu_mode = host-passthrough + dest: /etc/kolla/config/nova.conf + +- name: Configure OpenStack Neutron + copy: + content: | + [DEFAULT] + service_plugins=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin + [securitygroup] + firewall_driver = neutron.agent.firewall.NoopFirewallDriver + [ml2] + extension_drivers=port_security + [agent] + extensions=port_security + dest: /etc/kolla/config/neutron.conf + +- name: Configure OpenStack ml2_plugin.ini + copy: + content: | + [ml2] + tenant_network_types = vxlan + extension_drivers = port_security + type_drivers = vlan,flat,local,vxlan + mechanism_drivers = openvswitch + [ml2_type_flat] + flat_networks = physnet1 + [ml2_type_vlan] + network_vlan_ranges = physnet1 + [securitygroup] + firewall_driver = iptables_hybrid + [ovs] + datapath_type = system + bridge_mappings = physnet1:br-ex + tunnel_bridge = br-tun + local_ip = {{ deployvm_ip }} + dest: /etc/kolla/config/neutron/ml2_plugin.ini diff --git a/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml new file mode 100644 index 000000000..38c163c6c --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml @@ -0,0 +1,54 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Download kolla and kolla-ansible from git repos + git: + repo: "{{ item.repo }}" + dest: "{{ git_repos_path + item.dir }}" + version: stable/pike + with_items: + - { dir: "{{ kolla_path }}", repo: 'https://git.openstack.org/openstack/kolla'} + - { dir: "{{ kolla_ans_path }}", repo: 'https://git.openstack.org/openstack/kolla-ansible' } + +- name: Copy kolla-ansible password.yml and globals.yml + shell: cp -r "{{ git_repos_path + kolla_ans_path + '/etc/kolla/' }}" /etc/ + +- name: Copy kolla-ansible all-in-one, multinode + shell: cp * "{{ git_repos_path }}" + args: + chdir: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/' }}" + +- name: Install requirements + pip: + chdir: "{{ item[0] }}" + requirements: "{{ item[1] }}" + with_nested: + - [ "{{ git_repos_path + kolla_path }}", "{{ git_repos_path + kolla_ans_path }}" ] + - [ 'requirements.txt', 'test-requirements.txt' ] + +- name: pip install . + pip: + chdir: "{{ item }}" + name: '.' + with_items: + - "{{ git_repos_path + kolla_path }}" + - "{{ git_repos_path + kolla_ans_path }}" + +- name: Run setup.py + shell: "python setup.py install" + args: + chdir: "{{ item }}" + with_items: + - "{{ git_repos_path + kolla_path }}" + - "{{ git_repos_path + kolla_ans_path }}" diff --git a/ansible/roles/infra_deploy_openstack/tasks/main.yml b/ansible/roles/infra_deploy_openstack/tasks/main.yml new file mode 100644 index 000000000..ba5d5bc54 --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/tasks/main.yml @@ -0,0 +1,125 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# This script is based on https://docs.openstack.org/kolla-ansible/pike/user/quickstart.html +- name: Include variables + include_vars: + file: "{{ rs_file }}" + name: infra_deploy_vars + +- set_fact: + traffic_ip: "{{ item.interfaces[1].ip }}" + when: item.hostname == ansible_host + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Get neutron iface + set_fact: + neutron_iface: "{{ item }}" + when: + - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4 is defined + - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address is defined + - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address == traffic_ip + with_items: "{{ hostvars[ansible_host].ansible_interfaces }}" + +- name: Create a registry container + docker_container: + name: registry + image: registry:2 + restart_policy: always + ports: + - "4000:5000" + +- name: Download and install Kolla + include_tasks: install_kolla.yml + +- name: Configure Kolla + include_tasks: configure_kolla.yml + +- name: Configure Open Stack + include_tasks: configure_openstack.yml + +- name: Ramp up Open Stack + include_tasks: rampup_openstack.yml + +- name: Update admin-openrc.sh + lineinfile: + path: /etc/kolla/admin-openrc.sh + regexp: "{{ item.find }}" + line: "{{ item.add }}" + with_items: + - { find: 'EXTERNAL_NETWORK', add: 'export EXTERNAL_NETWORK=public' } + - { find: 'OS_AUTH_TYPE', add: 'export OS_AUTH_TYPE=password' } + +- name: Copy env file + shell: cp /etc/kolla/admin-openrc.sh /tmp/admin-openrc.yaml + +- name: Rework as env vars + replace: + path: /tmp/admin-openrc.yaml + regexp: 'export\s+(.*)=(.*)' + replace: '\1: \2' + +- name: Download OpenStack env file + fetch: + src: /tmp/admin-openrc.yaml + dest: /tmp/ + flat: yes + +- include_vars: + file: /tmp/admin-openrc.yaml + name: ostack_env + +- name: Re-assign IP address + shell: ip address show {{ neutron_iface }} | awk '/inet/ {print $2}' + when: neutron_iface is defined + register: ip_netmask + +- shell: > + ip addr del dev {{ neutron_iface }} {{ ip_netmask.stdout }} && + ip addr add dev br-ex {{ infra_deploy_vars.networks[1].host_ip }}/{{ ip_netmask.stdout_lines[0].split('/')[1] }} + when: + - neutron_iface is defined + - ip_netmask.stdout | length > 0 + +- name: Create external network + os_network: + name: public + external: yes + provider_physical_network: physnet1 + provider_network_type: flat + environment: + - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}" + - "{{ ostack_env }}" + +- name: Create sub-network + os_subnet: + name: public-subnet + network_name: public + cidr: "{{ ip_netmask.stdout }}" + allocation_pool_start: "{{ infra_deploy_vars.networks[1].dhcp_ip_start }}" + allocation_pool_end: "{{ infra_deploy_vars.networks[1].dhcp_ip_stop }}" + gateway_ip: "{{ infra_deploy_vars.networks[1].host_ip }}" + enable_dhcp: no + environment: + - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}" + - "{{ ostack_env }}" + +- name: Upload OpenStack env file to Yardstick VM + copy: + src: /etc/kolla/admin-openrc.sh + dest: '/tmp/admin-openrc.sh' + delegate_to: "{{ item }}" + when: "groups['yardstickG'] is defined" + with_items: + - "{{ groups['yardstickG'] }}" diff --git a/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml new file mode 100644 index 000000000..c75bec685 --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml @@ -0,0 +1,43 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Generate passwords + shell: kolla-genpwd + +- name: Generate the kolla-build.conf + shell: tox -e genconfig + args: + chdir: "{{ git_repos_path + kolla_path }}" + +- set_fact: + target: "{{ git_repos_path + 'all-in-one' }}" + +- set_fact: + target: "{{ git_repos_path + 'multinode' }}" + when: "groups['ostack'] | length > 1" + +- name: Run kolla-ansible precheck + shell: kolla-ansible prechecks -i "{{ target }}" + +- name: Build kolla-ansible + shell: kolla-build -b ubuntu -t source --profile default --tag pike --registry {{ ansible_host }}:4000 --push + +- name: Pull images from registry + shell: kolla-ansible pull -i "{{ target }}" + +- name: Run kolla-ansible deploy + shell: kolla-ansible deploy -i "{{ target }}" + +- name: Create an openrc file + shell: kolla-ansible post-deploy diff --git a/ansible/roles/infra_deploy_openstack/templates/multinode.j2 b/ansible/roles/infra_deploy_openstack/templates/multinode.j2 new file mode 100644 index 000000000..57f87b521 --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/templates/multinode.j2 @@ -0,0 +1,39 @@ +{% set control_dict = {} %} +{% set compute_dict = {} %} +{% for host in groups['ostack'] %} +{% if hostvars[host].node_type is defined and hostvars[host].node_type == 'controller' %} +{% set control_dict = control_dict.update({hostvars[host].ansible_host: hostvars[host].ansible_default_ipv4.interface}) %} +{% endif %} +{% endfor %} +{% for host in groups['ostack'] %} +{% if hostvars[host].node_type is defined and hostvars[host].node_type == 'compute' %} +{% for iface in hostvars[host].ansible_interfaces %} +{%- if ((hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4 is defined) and + (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address is defined) and + (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address == hostvars[host].secondary_ip)) -%} +{% set compute_dict = compute_dict.update({hostvars[host].ansible_host: iface}) %} +{% endif %} +{% endfor %} +{% endif %} +{% endfor %} +{% macro print_node(in_dict, iface_str='', cnt=1) %} +{%- for host, iface in in_dict | dictsort -%} +{% if loop.index <= cnt %} +{% if iface_str %} +{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True {{ iface_str }}={{ iface }} +{% else %} +{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True +{% endif %} +{% endif %} +{% endfor %} +{% endmacro %} +[control] +{{ print_node(control_dict, iface_str='network_interface', cnt=control_dict | length) }} +[compute] +{{ print_node(compute_dict, iface_str='network_interface', cnt=compute_dict | length) }} +[network] +{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }} +[monitoring] +{{ print_node(control_dict) }} +[storage] +{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }} diff --git a/ansible/roles/infra_deploy_openstack/vars/main.yml b/ansible/roles/infra_deploy_openstack/vars/main.yml new file mode 100644 index 000000000..bbea56847 --- /dev/null +++ b/ansible/roles/infra_deploy_openstack/vars/main.yml @@ -0,0 +1,18 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +kolla_path: "{{ 'https://git.openstack.org/openstack/kolla' | urlsplit('path') | basename }}" +kolla_ans_path: "{{ 'https://git.openstack.org/openstack/kolla-ansible' | urlsplit('path') | basename }}" +deployvm_ip: "{{ hostvars[ansible_host].host_ip }}" +git_repos_path: '/tmp/repos/' diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml index 314ee30af..5e616335a 100644 --- a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml @@ -14,18 +14,18 @@ --- - name: Destroy old networks created by virt virt_net: - name: "{{ network_item.name }}" + name: "{{ network_item }}" command: destroy - when: network_item.name in virt_nets.list_nets + when: clean_up | bool or network_item in deploy_nets -# Ignoring erros as network can be created without being defined. +# Ignoring errors as network can be created without being defined. # This can happen if a user manually creates a network using the virsh command. # If the network is not defined the undefine code will throw an error. - name: Undefine old networks defined by virt virt_net: - name: "{{ network_item.name }}" + name: "{{ network_item }}" command: undefine - when: network_item.name in virt_nets.list_nets + when: clean_up | bool or network_item in deploy_nets ignore_errors: yes - name: Check if "ovs-vsctl" command is present @@ -34,15 +34,20 @@ ignore_errors: yes - name: Destroy OVS bridge if it exists - command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}" - when: ovs_vsctl_present.rc == 0 + command: ovs-vsctl --if-exists -- del-br "{{ network_item }}" + when: + - ovs_vsctl_present.rc == 0 + - clean_up | bool or network_item in deploy_nets + ignore_errors: yes - name: Check if linux bridge is present - stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}" + stat: path="{{ '/sys/class/net/' + network_item + '/brif/' }}" register: check_linux_bridge - name: Remove linux bridge if it exists shell: | - ifconfig "{{ network_item.name }}" down - brctl delbr "{{ network_item.name }}" - when: check_linux_bridge.stat.exists + ifconfig "{{ network_item }}" down + brctl delbr "{{ network_item }}" + when: + - check_linux_bridge.stat.exists + - clean_up | bool or network_item in deploy_nets diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml index 5e43ee81e..91e949344 100644 --- a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml @@ -16,14 +16,14 @@ - name: Destroy old VMs virt: command: destroy - name: "{{ node_item.hostname }}" - when: node_item.hostname in virt_vms.list_vms + name: "{{ vmhost_item }}" + when: clean_up | bool or vmhost_item in deploy_vms ignore_errors: yes # Ignore errors as VM can be running while undefined - name: Undefine old VMs virt: command: undefine - name: "{{ node_item.hostname }}" - when: node_item.hostname in virt_vms.list_vms + name: "{{ vmhost_item }}" + when: clean_up | bool or vmhost_item in deploy_vms ignore_errors: yes diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml index e6c2c0229..6c4aa33cf 100644 --- a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -- name: Include +- name: Include input file include_vars: file: "{{ rs_file }}" name: infra_deploy_vars @@ -25,16 +25,40 @@ virt: command=list_vms register: virt_vms +- set_fact: + deploy_vms: "{{ deploy_vms | default([]) + [item.hostname] }}" + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Define old disk images to delete + shell: virsh domblklist {{ item }} | awk '/\/.*/ { print $2 }' + when: clean_up | bool or item in deploy_vms + with_items: "{{ virt_vms.list_vms }}" + register: virt_img + +- set_fact: + images: "{{ images | default([]) + item.stdout_lines }}" + when: item.stdout_lines is defined and item.stdout_lines | length > 0 + with_items: "{{ virt_img.results }}" + - name: Destroy old VMs include_tasks: delete_vm.yml - extra_vars: "{{ virt_vms }}" loop_control: - loop_var: node_item - with_items: "{{ infra_deploy_vars.nodes }}" + loop_var: vmhost_item + with_items: "{{ virt_vms.list_vms }}" + +- set_fact: + deploy_nets: "{{ deploy_nets | default([]) + [item.name] }}" + with_items: "{{ infra_deploy_vars.networks }}" - name: Delete old networks include_tasks: delete_network.yml - extra_vars: "{{ virt_nets }}" loop_control: loop_var: network_item - with_items: "{{ infra_deploy_vars.networks }}" + with_items: "{{ virt_nets.list_nets }}" + +- name: Delete old disk images + file: + path: "{{ item }}" + state: absent + when: images is defined and images | length > 0 + with_items: "{{ images }}" diff --git a/ansible/roles/infra_prepare_vms/tasks/main.yml b/ansible/roles/infra_prepare_vms/tasks/main.yml new file mode 100644 index 000000000..d7ed08511 --- /dev/null +++ b/ansible/roles/infra_prepare_vms/tasks/main.yml @@ -0,0 +1,105 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Include input file + include_vars: + file: "{{ rs_file }}" + name: infra_deploy_vars + +- name: Install setuptools + apt: + name: python-setuptools + +- name: Install pip + shell: easy_install pip + environment: "{{ proxy_env }}" + +- name: Install dependency for dns dig + pip: + name: dnspython + state: latest + +- set_fact: + block_str: "{{ block_str | default('') + item.interfaces[0].ip + ' ' + item.hostname + '\n'}}" + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Delete hosts between markers + blockinfile: + path: /etc/hosts + marker: "# {mark} generated hosts file" + content: "" + +- name: Update /etc/hosts + blockinfile: + path: /etc/hosts + block: | + {{ block_str }} + marker: "# {mark} generated hosts file" + +- name: Clear known hosts + shell: > + ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.interfaces[0].ip }}"; + ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.hostname }}" + with_items: "{{ infra_deploy_vars.nodes }}" + +- set_fact: + controllers: "{{ controllers | default([]) + [item.hostname] }}" + when: + - item.openstack_node is defined + - item.openstack_node == 'controller' + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Add host controller as deploy + add_host: + hostname: "{{ item.hostname }}" + host_ip: "{{ item.interfaces[0].ip }}" + groups: deploy, ostack + ansible_host: "{{ item.hostname }}" + ansible_user: "{{ item.user }}" + ansible_ssh_pass: "{{ item.password }}" + node_type: "{{ item.openstack_node }}" + secondary_ip: "{{ item.interfaces[1].ip }}" + when: item.hostname == controllers[0] + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Add hosts others as controller, compute + add_host: + hostname: "{{ item.hostname }}" + host_ip: "{{ item.interfaces[0].ip }}" + groups: regular,ostack + ansible_host: "{{ item.hostname }}" + ansible_user: "{{ item.user }}" + ansible_ssh_pass: "{{ item.password }}" + node_type: "{{ item.openstack_node }}" + secondary_ip: "{{ item.interfaces[1].ip }}" + when: + - item.openstack_node is defined + - item.openstack_node == 'controller' or item.openstack_node == 'compute' + - item.hostname != controllers[0] + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Add yardstick host to group + add_host: + hostname: "{{ item.hostname }}" + host_ip: "{{ item.interfaces[0].ip }}" + groups: yardstickG + ansible_host: "{{ item.hostname }}" + ansible_user: "{{ item.user }}" + ansible_ssh_pass: "{{ item.password }}" + secondary_ip: "{{ item.interfaces[1].ip }}" + when: item.hostname == 'yardstickvm' + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Workaround, not all VMs are ready by that time + pause: seconds=20 diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml new file mode 100644 index 000000000..a6ae00e51 --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml @@ -0,0 +1,48 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- file: + path: /lib/systemd/system/docker.service.d + state: directory + +- copy: + content: | + [Service] + MountFlags=shared + dest: /lib/systemd/system/docker.service.d/kolla.conf + +- set_fact: + ostack_hosts: "{{ ostack_hosts | default([]) + [hostvars[item].ansible_host] }}" + with_items: "{{ groups['ostack'] }}" + +- name: Create proxy configuration for docker + copy: + content: | + [Service] + Environment="HTTP_PROXY={{ lookup('env', 'http_proxy') }}" + Environment="HTTPS_PROXY={{ lookup('env', 'https_proxy') }}" + Environment="FTP_PROXY={{ lookup('env', 'ftp_proxy') }}" + Environment="NO_PROXY={{ lookup('env', 'no_proxy') }},{{ hostvars[ansible_host].ansible_default_ipv4.address }},{{ ostack_hosts | join(',') }}" + dest: /lib/systemd/system/docker.service.d/http-proxy.conf + +- name: Update /etc/default/docker + lineinfile: + path: /etc/default/docker + line: 'DOCKER_OPTS="--dns {{ hostvars[ansible_host].ansible_default_ipv4.gateway }} --insecure-registry {{ deploy_host }}:4000"' + +- name: reload restart docker + systemd: + state: restarted + daemon_reload: yes + name: docker diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml new file mode 100644 index 000000000..d22e8155a --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml @@ -0,0 +1,85 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Update apt cache + apt: + update_cache: yes + upgrade: yes + cache_valid_time: 36000 + environment: "{{ proxy_env }}" + +- name: Install packets + apt: + name: "{{ item }}" + with_items: + - python-tox + - python-dev + - libffi-dev + - libssl-dev + - python3-dev + - ethtool + - ipmitool + - git + - ntp + - apparmor-utils + - docker.io + - libvirt-bin + - python-setuptools + - build-essential + environment: "{{ proxy_env }}" + +- name: Install pip + shell: easy_install pip + environment: "{{ proxy_env }}" + +- name: Update pip ansible docker + pip: + name: "{{ item }}" + state: latest + with_items: + - ansible + - docker + - tox + - shade + environment: "{{ proxy_env }}" + +- name: Remove conflicting packages + apt: + name: "{{ item }}" + state: absent + with_items: + - lxd + - lxc + +- name: Stop and disable libvirt + systemd: + state: stopped + enabled: no + name: libvirt-bin.service + +- name: Stop and disable apparmor service + systemd: + name: apparmor + state: stopped + enabled: no + +- name: Get stat of libvirtd apparmor profile + stat: + path: /etc/apparmor.d/disable/usr.sbin.libvirtd + register: apparmor_libvirtd_profile + +- name: Remove apparmor profile for libvirt + shell: ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/ && apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd + when: + - apparmor_libvirtd_profile.stat.exists == False diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml new file mode 100644 index 000000000..65d5e59d8 --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml @@ -0,0 +1,39 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# Configure proxy and install python to support ansible +- name: Create apt.conf proxy config + raw: > + echo 'Acquire::http::proxy "{{ hostvars[groups['jumphost'][0]].proxy_proto + '://' + hostvars[groups['jumphost'][0]].proxy_host_ip + ':' + hostvars[groups['jumphost'][0]].proxy_port }}";' + > /etc/apt/apt.conf.d/22proxy + +- name: Install python which is required to run ansible mudules + raw: apt-get update && apt-get install -y python + +- name: Gather facts + setup: + +- name: Update configuration files + include_tasks: update_conf_files.yml + +- name: Install packets + include_tasks: install_packets.yml + when: ansible_hostname in groups['ostack'] + +- name: Configure docker settings + include_tasks: configure_docker.yml + when: ansible_hostname in groups['ostack'] + +- name: generate and apply SSH keys + include_tasks: update_keys.yml diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml new file mode 100644 index 000000000..424fb543b --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml @@ -0,0 +1,69 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Set hostname + shell: hostname {{ ansible_hostname }} + +- name: Delete hosts between markers + blockinfile: + path: /etc/hosts + marker: "# {mark} generated hosts file" + content: "" + +- set_fact: + block_str: "{{ block_str | default('') + hostvars[item].host_ip + ' ' + hostvars[item].ansible_host + '\n'}}" + with_items: "{{ groups['ostack'] }}" + +- name: Update /etc/hosts + blockinfile: + path: /etc/hosts + block: | + {{ block_str }} + marker: "# {mark} generated hosts file" + +- name: Update /etc/hosts + lineinfile: + path: /etc/hosts + regexp: ".*{{ hostvars[groups['jumphost'][0]].proxy_host }}.*" + line: "{{ hostvars[groups['jumphost'][0]].proxy_host_ip }} {{ hostvars[groups['jumphost'][0]].proxy_host }}" + +- name: Turn off IPv6 + lineinfile: + path: /etc/sysctl.conf + regexp: '^{{ item }}.*' + line: "{{ item }} = 1" + with_items: + - 'net.ipv6.conf.all.disable_ipv6' + - 'net.ipv6.conf.default.disable_ipv6' + - 'net.ipv6.conf.lo.disable_ipv6' + +- name: Update IP configuration + shell: sysctl -p + +- name: Update resolv.conf + shell: echo "{{ 'nameserver ' + hostvars[ansible_host].ansible_default_ipv4.gateway }}" > /etc/resolvconf/resolv.conf.d/base + +- name: Update name servers + shell: resolvconf -u + +- name: Update /etc/environment + lineinfile: + path: /etc/environment + regexp: "{{ item.find }}" + line: "{{ item.add }}" + with_items: + - { find: 'http_proxy=', add: "{{ 'export http_proxy=' + lookup('env', 'http_proxy') }}" } + - { find: 'https_proxy=', add: "{{ 'export https_proxy=' + lookup('env', 'https_proxy') }}" } + - { find: 'ftp_proxy=', add: "{{ 'export ftp_proxy=' + lookup('env', 'ftp_proxy') }}" } + - { find: 'no_proxy=', add: "{{ 'export no_proxy=' + lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}" } diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml new file mode 100644 index 000000000..816f7cbca --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml @@ -0,0 +1,48 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Generate keys + user: + name: "{{ hostvars[ansible_host].ansible_user }}" + generate_ssh_key: yes + state: present + ssh_key_file: "/root/.ssh/id_rsa" + +- name: Get remote files + fetch: + src: "/root/.ssh/id_rsa.pub" + dest: "/tmp" + +- name: Update authorized_key + authorized_key: + key: "{{ lookup('file', '/tmp/{{ hostvars[item].ansible_host }}/root/.ssh/id_rsa.pub') }}" + state: present + user: "{{ hostvars[item].ansible_user }}" + with_items: + - "{{ groups['ostack'] }}" + - "{{ groups['yardstickG'] }}" + +- name: Make sure the known hosts file exists + file: + path: "{{ ssh_known_hosts_file }}" + state: touch + +- name: Add key to known hosts + known_hosts: + name: "{{ hostvars[item].ansible_host }}" + key: "{{ lookup('pipe', 'ssh-keyscan -t rsa {{ hostvars[item].ansible_host }}') }}" + path: "{{ ssh_known_hosts_file }}" + with_items: + - "{{ groups['ostack'] }}" + - "{{ groups['yardstickG'] }}" diff --git a/ansible/roles/infra_rampup_stack_nodes/vars/main.yml b/ansible/roles/infra_rampup_stack_nodes/vars/main.yml new file mode 100644 index 000000000..252eb86b3 --- /dev/null +++ b/ansible/roles/infra_rampup_stack_nodes/vars/main.yml @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +ssh_known_hosts_file: "/root/.ssh/known_hosts" +deploy_host: "{{ hostvars[groups['deploy'][0]].ansible_host }}" diff --git a/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml b/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml new file mode 100755 index 000000000..9baf7e59e --- /dev/null +++ b/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml @@ -0,0 +1,76 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Update repositories + apt: + update_cache: yes + +- name: Install core packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - wget + - curl + - screen + - procps + - socat + - sshpass + - sudo + - vim + - libffi-dev + - libfuse-dev + - libssl-dev + - libxft-dev + - libxml2-dev + - libxss-dev + - libxslt-dev + - libxslt1-dev + - libzmq-dev + - qemu-user-static + - qemu-utils + - kpartx + - python + - python-setuptools + - python-dev + - python-pip + - python-libvirt + - python-virtualenv + - bridge-utils + - ebtables + - openssl + - ccze + - nginx-full + - uwsgi + - uwsgi-plugin-python + - supervisor + - lsof + - nodejs + - npm + - rabbitmq-server + +- name: Install libc6:arm64 package + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - libc6:arm64 + when: + - arch is defined + - arch != arch_arm64 + - installation_mode == inst_mode_container + +- name: Remove dependencies that are no longer required + apt: + update_cache: yes + +- name: Remove useless packages from the cache + apt: + autoclean: yes diff --git a/ansible/roles/install_dependencies/tasks/RedHat.yml b/ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml index a5d4d0b15..85eb1156a 100644 --- a/ansible/roles/install_dependencies/tasks/RedHat.yml +++ b/ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml @@ -42,5 +42,13 @@ - python-setuptools - libffi-devel - python-devel - - kpartx - + - nodejs + - npm + - gcc + - lsof + - procps + - bridge-utils + - ebtables + - openssl + - python-virtualenv + - ccze diff --git a/ansible/roles/install_dependencies/tasks/Debian.yml b/ansible/roles/install_dependencies_jumphost/tasks/Suse.yml index bba6fb13c..af53c9cd5 100755..100644 --- a/ansible/roles/install_dependencies/tasks/Debian.yml +++ b/ansible/roles/install_dependencies_jumphost/tasks/Suse.yml @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Intel Corporation. +# Copyright (c) 2018 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,39 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. --- +- name: Install EPEL if needed + action: "{{ ansible_pkg_mgr }} name=epel-release state=present" + when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] + - name: Install core packages action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" with_items: - - python-minimal + - deltarpm - wget + - expect - curl - screen - - procps - git - socat - sshpass - - libxslt1-dev - - libffi-dev - - libfuse-dev - qemu-kvm - - qemu-user-static - - qemu-utils - kpartx - - libvirt0 - - python-libvirt + - libxslt-devel + - libffi-devel + - openssl-devel + - nginx + - uwsgi + - python-setuptools + - libffi-devel + - python-devel + - nodejs + - npm + - gcc + - lsof + - procps - bridge-utils - ebtables - openssl - - libssl-dev - - python-dev - python-virtualenv - ccze - - libxml2-dev - - libxslt-dev - - libzmq-dev - - nginx-full - - uwsgi - - uwsgi-plugin-python - - supervisor - - python-setuptools - - lsof diff --git a/ansible/roles/install_dependencies/tasks/main.yml b/ansible/roles/install_dependencies_jumphost/tasks/main.yml index 27660c3ca..27660c3ca 100644 --- a/ansible/roles/install_dependencies/tasks/main.yml +++ b/ansible/roles/install_dependencies_jumphost/tasks/main.yml diff --git a/ansible/roles/install_dpdk/vars/main.yml b/ansible/roles/install_dpdk/vars/main.yml index 5dec63776..957f47e99 100644 --- a/ansible/roles/install_dpdk/vars/main.yml +++ b/ansible/roles/install_dpdk/vars/main.yml @@ -1,5 +1,8 @@ --- -dpdk_make_arch: x86_64-native-linuxapp-gcc +dpdk_make_archs: + "amd64": "x86_64-native-linuxapp-gcc" + "arm64": "arm64-native-linuxapp-gcc" +dpdk_make_arch: "{{ dpdk_make_archs[YARD_IMG_ARCH] }}" dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra" hugetable_mount: /mnt/huge dpdk_devbind_tools: "{{ dpdk_path }}/tools/dpdk-devbind.py" diff --git a/ansible/roles/install_dpdk_shared/vars/main.yml b/ansible/roles/install_dpdk_shared/vars/main.yml index eadf35a03..b663cedd2 100644 --- a/ansible/roles/install_dpdk_shared/vars/main.yml +++ b/ansible/roles/install_dpdk_shared/vars/main.yml @@ -1,5 +1,8 @@ --- -dpdk_make_arch: x86_64-native-linuxapp-gcc +dpdk_make_archs: + "amd64": "x86_64-native-linuxapp-gcc" + "arm64": "arm64-native-linuxapp-gcc" +dpdk_make_arch: "{{ dpdk_make_archs[YARD_IMG_ARCH] }}" dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra" hugetable_mount: /mnt/huge dpdk_pmd_path: /usr/lib/dpdk-pmd/ diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml new file mode 100644 index 000000000..ee1b83756 --- /dev/null +++ b/ansible/roles/install_yardstick/tasks/main.yml @@ -0,0 +1,46 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +# Arguments needed: map_min_addr_file, yardstick_dir + +- name: Define variables + set_fact: + map_min_addr_file: "/etc/sysctl.d/mmap_min_addr.conf" + +- name: Remove the kernel minimum virtual address restriction that a process is allowed to mmap + copy: + dest: "{{ map_min_addr_file }}" + content: "vm.mmap_min_addr = 0\n" + +- name: Config git SSL + git_config: + name: http.sslVerify + scope: global + value: False + +# There is a bug with the easy install ansible module in suse linux. +# Until this is fixed the shell command must be used +- name: Install pip + shell: easy_install -U pip +# easy_install: +# name: pip +# state: latest + +- name: install yardstick without virtual environment + include_tasks: regular_install.yml + when: virtual_environment == False + +- name: install yardstick with virtual environment + include_tasks: virtual_install.yml + when: virtual_environment == True diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml new file mode 100644 index 000000000..4a9925ab4 --- /dev/null +++ b/ansible/roles/install_yardstick/tasks/regular_install.yml @@ -0,0 +1,22 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Install Yardstick requirements + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + +- name: Install Yardstick code + pip: + name: "{{ yardstick_dir }}/." + extra_args: -e diff --git a/ansible/roles/install_yardstick/tasks/virtual_install.yml b/ansible/roles/install_yardstick/tasks/virtual_install.yml new file mode 100644 index 000000000..8545acbcb --- /dev/null +++ b/ansible/roles/install_yardstick/tasks/virtual_install.yml @@ -0,0 +1,25 @@ +# Copyright (c) 2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Install Yardstick requirements + pip: + requirements: "{{ yardstick_dir }}/requirements.txt" + virtualenv: "{{ yardstick_dir }}/virtualenv" + +- name: Install Yardstick code + pip: + name: "{{ yardstick_dir }}/." + extra_args: -e + virtualenv: "{{ yardstick_dir }}/virtualenv" + diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json new file mode 100644 index 000000000..55b5a5f33 --- /dev/null +++ b/dashboard/opnfv_yardstick_tc058.json @@ -0,0 +1,265 @@ +{ + "annotations": { + "list": [] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": 33, + "links": [], + "refresh": "1m", + "rows": [ + { + "collapse": false, + "height": 343, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "yardstick", + "description": "", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "dsType": "influxdb", + "groupBy": [], + "measurement": "opnfv_yardstick_tc058", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT \"server-status_outage_time\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "server-status_outage_time" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 5 + }, + { + "colorMode": "ok", + "fill": true, + "line": true, + "op": "lt", + "value": 5 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Server Status outage time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "yardstick", + "format": "short", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "alias": "", + "dsType": "influxdb", + "groupBy": [], + "measurement": "opnfv_yardstick_tc058", + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT \"sla_pass\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter", + "rawQuery": false, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "sla_pass" + ], + "type": "field" + } + ] + ], + "tags": [] + } + ], + "thresholds": "0.5,1", + "title": "SLA PASS/FAIL", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "HA" + ], + "templating": { + "list": [] + }, + "time": { + "from": "2018-03-26T09:00:00.000Z", + "to": "2018-03-28T08:59:59.998Z" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "opnfv_yardstick_tc058", + "version": 8 +} diff --git a/docker/Dockerfile b/docker/Dockerfile index b97337e4d..5813f0245 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,35 +20,41 @@ ENV REPOS_DIR="/home/opnfv/repos" \ # Set work directory # Yardstick repo -ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \ +ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ + RELENG_REPO_DIR="${REPOS_DIR}/releng" \ STORPERF_REPO_DIR="${REPOS_DIR}/storperf" -RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean +RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean RUN easy_install -U setuptools==30.0.0 -RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 +RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 RUN mkdir -p ${REPOS_DIR} RUN git config --global http.sslVerify false +#For developers: To test your changes you must comment out the git clone for ${YARDSTICK_REPO_DIR}. +#You must also uncomment the RUN and COPY commands below. +#You must run docker build from your yardstick directory on the host. RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR} +#RUN mkdir ${YARDSTICK_REPO_DIR} +#COPY ./ ${YARDSTICK_REPO_DIR} +RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR} RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR} -WORKDIR ${YARDSTICK_REPO_DIR} -RUN ${YARDSTICK_REPO_DIR}/install.sh +RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml + RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh RUN echo "daemon off;" >> /etc/nginx/nginx.conf - # nginx=5000, rabbitmq=5672 EXPOSE 5000 5672 ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR} ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR} -COPY ./exec_tests.sh /usr/local/bin/ +COPY ./docker/exec_tests.sh /usr/local/bin/ -ENV NSB_DIR="/opt/nsb_bin" \ - PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl" +ENV NSB_DIR="/opt/nsb_bin" +ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl" WORKDIR ${REPOS_DIR} CMD ["/usr/bin/supervisord"] diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index e8dbea288..720a39970 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -1,24 +1,17 @@ From: Cristina Pauna <cristina.pauna@enea.com> -Date: Thu, 11 Jan 2018 19:06:26 +0200 -Subject: [PATCH] Patch for Yardstick AARCH64 Docker file +Date: Mon, 30 Apr 2018 14:09:00 +0300 +Subject: [PATCH] [PATCH] Patch for Yardstick AARCH64 Docker file Signed-off-by: Cristina Pauna <cristina.pauna@enea.com> Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com> --- - docker/Dockerfile | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) + docker/Dockerfile | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile -index 2ee5b4c..23e5ea5 100644 +index fed9f9bd..9654b5dc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile -@@ -1,5 +1,5 @@ - ############################################################################## --# Copyright (c) 2015 Ericsson AB and others. -+# Copyright (c) 2017 Enea AB and others. - # - # All rights reserved. This program and the accompanying materials - # are made available under the terms of the Apache License, Version 2.0 @@ -7,9 +7,9 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## @@ -31,17 +24,18 @@ index 2ee5b4c..23e5ea5 100644 ARG BRANCH=master -@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \ +@@ -24,7 +24,9 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ RELENG_REPO_DIR="${REPOS_DIR}/releng" \ STORPERF_REPO_DIR="${REPOS_DIR}/storperf" --RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean -+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \ +-RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean ++RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \ + apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean ++ RUN easy_install -U setuptools==30.0.0 - RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 + RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 -@@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf +@@ -45,8 +47,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf # nginx=5000, rabbitmq=5672 EXPOSE 5000 5672 @@ -50,5 +44,5 @@ index 2ee5b4c..23e5ea5 100644 +ADD http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img ${IMAGE_DIR} +ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img ${IMAGE_DIR} - COPY ./exec_tests.sh /usr/local/bin/ + COPY ./docker/exec_tests.sh /usr/local/bin/ diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst index 4ebf0eceb..6598a2751 100644 --- a/docs/release/release-notes/release-notes.rst +++ b/docs/release/release-notes/release-notes.rst @@ -1,7 +1,8 @@ +======= License ======= -OPNFV Euphrates release note for Yardstick Docs +OPNFV Fraser release note for Yardstick Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>. @@ -9,8 +10,9 @@ If not, see <http://creativecommons.org/licenses/by/4.0/>. The *Yardstick framework*, the *Yardstick test cases* are open-source software, licensed under the terms of the Apache License, Version 2.0. -OPNFV Euphrates Release Note for Yardstick -========================================== +======================================= +OPNFV Fraser Release Note for Yardstick +======================================= .. toctree:: :maxdepth: 2 @@ -23,50 +25,43 @@ OPNFV Euphrates Release Note for Yardstick Abstract --------- +======== This document describes the release note of Yardstick project. Version History ---------------- +=============== +-------------------+-----------+---------------------------------+ | *Date* | *Version* | *Comment* | | | | | +-------------------+-----------+---------------------------------+ -| December 15, 2017 | 5.1.0 | Yardstick for Euphrates release | -| | | | -+-------------------+-----------+---------------------------------+ -| October 20, 2017 | 5.0.0 | Yardstick for Euphrates release | +| April 27, 2018 | 6.0.0 | Yardstick for Fraser release | | | | | +-------------------+-----------+---------------------------------+ Important Notes ---------------- +=============== The software delivered in the OPNFV Yardstick_ Project, comprising the -*Yardstick framework*, the *Yardstick test cases* and the experimental -framework *Apex Lake* is a realization of the methodology in ETSI-ISG -NFV-TST001_. +*Yardstick framework*, and the *Yardstick test cases* is a realization of +the methodology in ETSI-ISG NFV-TST001_. The *Yardstick* framework is *installer*, *infrastructure* and *application* independent. -OPNFV Euphrates Release ------------------------ +OPNFV Fraser Release +==================== -This Euphrates release provides *Yardstick* as a framework for NFVI testing +This Fraser release provides *Yardstick* as a framework for NFVI testing and OPNFV feature testing, automated in the OPNFV CI pipeline, including: * Documentation generated with Sphinx * User Guide - * Developer Guide - * Release notes (this document) - * Results * Automated Yardstick test suite (daily, weekly) @@ -84,39 +79,29 @@ and OPNFV feature testing, automated in the OPNFV CI pipeline, including: * Yardstick plug-in configuration yaml files, plug-in install/remove scripts -For Euphrates release, the *Yardstick framework* is used for the following +For Fraser release, the *Yardstick framework* is used for the following testing: * OPNFV platform testing - generic test cases to measure the categories: * Compute - * Network - * Storage -* OPNFV platform network service benchmarking(NSB) +* OPNFV platform network service benchmarking (NSB) * NSB * Test cases for the following OPNFV Projects: * Container4NFV - * High Availability - * IPv6 - * KVM - * Parser - * StorPerf - * VSperf - * virtual Traffic Classifier - The *Yardstick framework* is developed in the OPNFV community, by the Yardstick_ team. @@ -126,49 +111,47 @@ Yardstick_ team. Release Data ------------- +============ +--------------------------------+-----------------------+ | **Project** | Yardstick | | | | +--------------------------------+-----------------------+ -| **Repo/tag** | yardstick/opnfv-5.1.0 | +| **Repo/tag** | yardstick/opnfv-6.0.0 | | | | +--------------------------------+-----------------------+ -| **Yardstick Docker image tag** | opnfv-5.1.0 | +| **Yardstick Docker image tag** | opnfv-6.0.0 | | | | +--------------------------------+-----------------------+ -| **Release designation** | Euphrates | +| **Release designation** | Fraser | | | | +--------------------------------+-----------------------+ -| **Release date** | December 15, 2017 | +| **Release date** | April 27, 2018 | | | | +--------------------------------+-----------------------+ -| **Purpose of the delivery** | OPNFV Euphrates 5.1.0 | +| **Purpose of the delivery** | OPNFV Fraser 6.0.0 | | | | +--------------------------------+-----------------------+ Deliverables ------------- +============ Documents -^^^^^^^^^ +--------- - - User Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/user/userguide/index.html + - User Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/user/userguide/index.html - - Developer Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/developer/devguide/index.html + - Developer Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/developer/devguide/index.html Software Deliverables -^^^^^^^^^^^^^^^^^^^^^ - +--------------------- - - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-5.1.0) + - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.0.0) - -New Contexts -############ +List of Contexts +^^^^^^^^^^^^^^^^ +--------------+-------------------------------------------+ | **Context** | **Description** | @@ -188,31 +171,40 @@ New Contexts +--------------+-------------------------------------------+ -New Runners -########### - -+--------------+-------------------------------------------------------+ -| **Runner** | **Description** | -| | | -+--------------+-------------------------------------------------------+ -| *Arithmetic* | Steps every run arithmetically according to specified | -| | input value | -| | | -+--------------+-------------------------------------------------------+ -| *Duration* | Runs for a specified period of time | -| | | -+--------------+-------------------------------------------------------+ -| *Iteration* | Runs for a specified number of iterations | -| | | -+--------------+-------------------------------------------------------+ -| *Sequence* | Selects input value to a scenario from an input file | -| | and runs all entries sequentially | -| | | -+--------------+-------------------------------------------------------+ - - -New Scenarios -############# +List of Runners +^^^^^^^^^^^^^^^ + +Note: Yardstick Fraser 6.0.0 add two new Runners, "Dynamictp" and "Search". + ++---------------+-------------------------------------------------------+ +| **Runner** | **Description** | +| | | ++---------------+-------------------------------------------------------+ +| *Arithmetic* | Steps every run arithmetically according to specified | +| | input value | +| | | ++---------------+-------------------------------------------------------+ +| *Duration* | Runs for a specified period of time | +| | | ++---------------+-------------------------------------------------------+ +| *Iteration* | Runs for a specified number of iterations | +| | | ++---------------+-------------------------------------------------------+ +| *Sequence* | Selects input value to a scenario from an input file | +| | and runs all entries sequentially | +| | | ++---------------+-------------------------------------------------------+ +| **Dynamictp** | A runner that searches for the max throughput with | +| | binary search | +| | | ++---------------+-------------------------------------------------------+ +| **Search** | A runner that runs a specific time before it returns | +| | | ++---------------+-------------------------------------------------------+ + + +List of Scenarios +^^^^^^^^^^^^^^^^^ +----------------+-----------------------------------------------------+ | **Category** | **Delivered** | @@ -234,224 +226,138 @@ New Scenarios | | | +----------------+-----------------------------------------------------+ | *Compute* | * cpuload | -| | | | | * cyclictest | -| | | | | * lmbench | -| | | | | * lmbench_cache | -| | | | | * perf | -| | | | | * unixbench | -| | | | | * ramspeed | -| | | | | * cachestat | -| | | | | * memeoryload | -| | | | | * computecapacity | -| | | | | * SpecCPU2006 | | | | +----------------+-----------------------------------------------------+ | *Networking* | * iperf3 | -| | | | | * netperf | -| | | | | * netperf_node | -| | | | | * ping | -| | | | | * ping6 | -| | | | | * pktgen | -| | | | | * sfc | -| | | | | * sfc with tacker | -| | | -| | * vtc instantion validation | -| | | -| | * vtc instantion validation with noisy neighbors | -| | | -| | * vtc throughput | -| | | -| | * vtc throughput in the presence of noisy neighbors | -| | | | | * networkcapacity | -| | | | | * netutilization | -| | | | | * nstat | -| | | | | * pktgenDPDK | | | | +----------------+-----------------------------------------------------+ | *Parser* | Tosca2Heat | | | | +----------------+-----------------------------------------------------+ -| *Storage* | fio | -| | | -| | bonnie++ | -| | | -| | storagecapacity | +| *Storage* | * fio | +| | * bonnie++ | +| | * storagecapacity | | | | +----------------+-----------------------------------------------------+ | *StorPerf* | storperf | | | | +----------------+-----------------------------------------------------+ -| *NSB* | vPE thoughput test case | +| *NSB* | vFW thoughput test case | | | | +----------------+-----------------------------------------------------+ - New Test cases -^^^^^^^^^^^^^^ +-------------- * Generic NFVI test cases - * OPNFV_YARDSTICK_TCO78 - SPEC CPU 2006 - - * OPNFV_YARDSTICK_TCO79 - Bonnie++ - -* Kubernetes Test cases + * OPNFV_YARDSTICK_TCO84 - SPEC CPU 2006 for VM - * OPNFV_YARDSTICK_TCO80 - NETWORK LATENCY BETWEEN CONTAINER +* HA Test cases - * OPNFV_YARDSTICK_TCO81 - NETWORK LATENCY BETWEEN CONTAINER AND VM + * OPNFV_YARDSTICK_TC087 - SDN Controller resilience in non-HA configuration + * OPNFV_YARDSTICK_TC090 - Control node Openstack service down - database instance + * OPNFV_YARDSTICK_TC091 - Control node Openstack service down - heat-api Version Change --------------- +============== Module Version Changes -^^^^^^^^^^^^^^^^^^^^^^ +---------------------- -This is the fifth tracked release of Yardstick. It is based on following +This is the sixth tracked release of Yardstick. It is based on following upstream versions: -- OpenStack Ocata - -- OpenDayLight Nitrogen - -- ONOS Junco +- OpenStack Pike +- OpenDayLight Oxygen Document Version Changes -^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------ -This is the fifth tracked version of the Yardstick framework in OPNFV. +This is the sixth tracked version of the Yardstick framework in OPNFV. It includes the following documentation updates: - Yardstick User Guide: add "network service benchmarking(NSB)" chapter; add "Yardstick - NSB Testing -Installation" chapter; add "Yardstick API" chapter; add "Yardstick user interface" chapter; Update Yardstick installation chapter; - - Yardstick Developer Guide - - Yardstick Release Notes for Yardstick: this document Feature additions -^^^^^^^^^^^^^^^^^ - -- Yardstick RESTful API support - -- Network service benchmarking - -- Stress testing with Bottlenecks team - -- Yardstick framework improvement: - - - yardstick report CLI - - - Node context support OpenStack configuration via Ansible - - - Https support +----------------- - - Kubernetes context type - -- Yardstick container local GUI - -- Python 3 support +- Plugin-based test cases support Heat context +- SR-IOV support for the Heat context +- Support using existing network in Heat context +- Support running test cases with existing VNFs/without destroying VNF in Heat context +- Add vFW scale-up template +- Improvements of unit tests and gating +- GUI improvement about passing parameters Scenario Matrix ---------------- - -For Euphrates 5.0.0, Yardstick was tested on the following scenarios: - -+--------------------------+------+---------+------+------+ -| Scenario | Apex | Compass | Fuel | Joid | -+==========================+======+=========+======+======+ -| os-nosdn-nofeature-noha | | | X | X | -+--------------------------+------+---------+------+------+ -| os-nosdn-nofeature-ha | X | X | X | X | -+--------------------------+------+---------+------+------+ -| os-odl_l2-nofeature-ha | | X | X | X | -+--------------------------+------+---------+------+------+ -| os-odl_l2-nofeature-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-odl_l3-nofeature-ha | X | X | X | | -+--------------------------+------+---------+------+------+ -| os-odl_l3-nofeature-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-onos-sfc-ha | | | | | -+--------------------------+------+---------+------+------+ -| os-onos-nofeature-ha | | X | | X | -+--------------------------+------+---------+------+------+ -| os-onos-nofeature-noha | | | | | -+--------------------------+------+---------+------+------+ -| os-odl_l2-sfc-ha | | | X | | -+--------------------------+------+---------+------+------+ -| os-odl_l2-sfc-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-odl_l2-bgpvpn-ha | X | | X | | -+--------------------------+------+---------+------+------+ -| os-odl_l2-bgpvpn-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm-ha | X | | X | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-nosdn-ovs-ha | | | X | | -+--------------------------+------+---------+------+------+ -| os-nosdn-ovs-noha | | | X | | -+--------------------------+------+---------+------+------+ -| os-ocl-nofeature-ha | | X | | | -+--------------------------+------+---------+------+------+ -| os-nosdn-lxd-ha | | | | X | -+--------------------------+------+---------+------+------+ -| os-nosdn-lxd-noha | | | | X | -+--------------------------+------+---------+------+------+ -| os-nosdn-fdio-ha | X | | | | -+--------------------------+------+---------+------+------+ -| os-odl_l2-fdio-noha | X | | | | -+--------------------------+------+---------+------+------+ -| os-odl-gluon-noha | X | | | | -+--------------------------+------+---------+------+------+ -| os-nosdn-openo-ha | | X | | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm_ovs_dpdk | | | X | | -| -noha | | | | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm_ovs_dpdk-ha | | | X | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm_ovs_dpdk | | | X | | -| _bar-ha | | | | | -+--------------------------+------+---------+------+------+ -| os-nosdn-kvm_ovs_dpdk | | | X | | -| _bar-noha | | | | | -+--------------------------+------+---------+------+------+ -| opnfv_os-ovn-nofeature- | X | | | | -| noha_daily | | | | | -+--------------------------+------+---------+------+------+ +=============== + +For Fraser 6.0.0, Yardstick was tested on the following scenarios: + ++-------------------------+------+---------+----------+------+------+-------+ +| Scenario | Apex | Compass | Fuel-arm | Fuel | Joid | Daisy | ++=========================+======+=========+==========+======+======+=======+ +| os-nosdn-nofeature-noha | X | X | | | X | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-nofeature-ha | X | X | X | X | X | X | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-bar-noha | X | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-bar-ha | X | | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-odl-bgpvpn-ha | X | | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-calipso-noha | X | | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-kvm-ha | | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-odl_l3-nofeature-ha | | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-odl-sfc-ha | | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| os-odl-nofeature-ha | | | | X | | X | ++-------------------------+------+---------+----------+------+------+-------+ +| os-nosdn-ovs-ha | | | | X | | | ++-------------------------+------+---------+----------+------+------+-------+ +| k8-nosdn-nofeature-ha | | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ +| k8-nosdn-stor4nfv-noha | | X | | | | | ++-------------------------+------+---------+----------+------+------+-------+ + Test results ------------- +============ Test results are available in: @@ -459,109 +365,107 @@ Test results are available in: The reporting pages can be found at: -+---------------+-------------------------------------------------------------------------------------+ -| apex | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-apex.html | -+---------------+-------------------------------------------------------------------------------------+ -| compass | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-compass.html | -+---------------+-------------------------------------------------------------------------------------+ -| fuel\@x86 | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@x86.html | -+---------------+-------------------------------------------------------------------------------------+ -| fuel\@aarch64 | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@aarch64.html | -+---------------+-------------------------------------------------------------------------------------+ -| joid | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-joid.html | -+---------------+-------------------------------------------------------------------------------------+ ++---------------+----------------------------------------------------------------------------------+ +| apex | http://testresults.opnfv.org/reporting/fraser/yardstick/status-apex.html | ++---------------+----------------------------------------------------------------------------------+ +| compass | http://testresults.opnfv.org/reporting/fraser/yardstick/status-compass.html | ++---------------+----------------------------------------------------------------------------------+ +| fuel\@x86 | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@x86.html | ++---------------+----------------------------------------------------------------------------------+ +| fuel\@aarch64 | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@aarch64.html | ++---------------+----------------------------------------------------------------------------------+ +| joid | http://testresults.opnfv.org/reporting/fraser/yardstick/status-joid.html | ++---------------+----------------------------------------------------------------------------------+ Known Issues/Faults -^^^^^^^^^^^^^^^^^^^ +------------------- Corrected Faults -^^^^^^^^^^^^^^^^ +---------------- + +Fraser 6.0.0: + ++--------------------+--------------------------------------------------------------------------+ +| **JIRA REFERENCE** | **DESCRIPTION** | ++====================+==========================================================================+ +| YARDSTICK-831 | tc053 kill haproxy wrong | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-842 | load image fails when there's cirros image exist | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-857 | tc006 failed due to volume attached to different location "/dev/vdc" | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-874 | Specify supported architecture for Ubuntu backports repository | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-875 | Check if multiverse repository is available in Ubuntu | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-893 | Fix proxy env handling and ansible multinode support | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-899 | Variable local_iface_name is read before it is set | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-900 | Section in "upload_yardstick_image.yml" invalid | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-911 | Remove 'inconsistent-return-statements' from Pylint checks | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-989 | Yardstick real-time influxdb KPI reporting regressions | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-994 | NSB set-up build script for baremetal broken | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-996 | Error in address input format in "_ip_range_action_partial" | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1003 | Prox vnf descriptor cleanup for tg and vnf | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1006 | Ansible destroy script will fail if vm has already been undefined | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1012 | constants: fix pylint warnings for OSError | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1014 | Remove unused args in | +| | network_services.traffic_profile.ixia_rfc2544.IXIARFC2544Profile | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1016 | Allow vm to access outside world through default gateway | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1019 | For 'qemu-img version 2.10.1' unit 'MB' is not acceptable ansible script | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1021 | NSB: All Sample VNF test cases timeout after 1 hour of execution | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1036 | Prox: Addition of storage of extra counters for Grafana | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1038 | Missing file which is described in the operation_conf.yaml | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1047 | Error in string format in HeatTemplateError message | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1056 | yardstick report command print error when run test case | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1059 | Reduce the log level if TRex client is no connected | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1073 | Error when retrieving "options" section in "scenario" | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1080 | Running Test Case in Latest Yardstick Docker Image shows Error | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1082 | tc043,tc055, tc063, tc075, pass wrong node name in the ci scenario yaml | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1102 | Don't hide exception traceback from Task.start() | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1107 | bad exception traceback print due to atexit_handler | ++--------------------+--------------------------------------------------------------------------+ +| YARDSTICK-1120 | HA test case tc050 should start monitor before attack | ++--------------------+--------------------------------------------------------------------------+ + +Fraser 6.0.0 known restrictions/issues +====================================== -Euphrates 5.1.0: - -+---------------------+-------------------------------------------------------------------------+ -| **JIRA REFERENCE** | **DESCRIPTION** | -| | | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-841 | Fix various NSB license issues | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-73 | How To Work with Test Cases | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-500 | VNF testing documentation | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-826 | Allow overriding Heat IP addresses to match traffic generator profile | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-828 | Refactor doc/testing/user/userguide "Yardstick Installation" | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-830 | build_yardstick_image Ansible mount module doesn't work on Ubuntu 14.04 | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-833 | ansible_common transform password into lower case | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-847 | tc006, tc079, tc082 miss grafana dashboard in local deployment | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-849 | kill process do not accurately kill the process like "nova-api" | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-850 | tc023 miss description and tc050-58 wrong description | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-852 | tc078 cpu2006 fails in some situation | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-854 | yardstick docker lack of trex_client | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-867 | testcase tc078 have no data stored or dashboard to show results | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-871 | Remove img_modify_playbook assignation in build_yardstick_image.yml | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-829 | "nsb_setup.sh" doesn't parse the controller IP correctly | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-839 | NSB Prox BM test cases to be fixed for incorporating scale-up | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-840 | NSB Prox test documentation of vPE and LW-AFTR test cases | -+---------------------+-------------------------------------------------------------------------+ -| JIRA: YARDSTICK-848 | NSB "Prox" : Cleanup duplicated traffic profile | -+---------------------+-------------------------------------------------------------------------+ - - - - -Euphrates 5.0.0: - -+---------------------+--------------------------------------------+ -| **JIRA REFERENCE** | **DESCRIPTION** | -| | | -+---------------------+--------------------------------------------+ -| JIRA: YARDSTICK-599 | Could not load EntryPoint.parse when using | -| | 'openstack -h' | -+---------------------+--------------------------------------------+ -| JIRA: YARDSTICK-602 | Don't rely on staic ip addresses as they | -| | are dynamic | -+---------------------+--------------------------------------------+ - - -Euphratess 5.0.0 known restrictions/issues ------------------------------------------- +-----------+-----------+----------------------------------------------+ | Installer | Scenario | Issue | +===========+===========+==============================================+ -| any | \*-bgpvpn | Floating ips not supported. Some Test cases | -| | | related to floating ips are excluded. | -+-----------+-----------+----------------------------------------------+ -| any | odl_l3-\* | Some test cases related to using floating IP | -| | | addresses fail because of a known ODL bug. | -| | | | -+-----------+-----------+----------------------------------------------+ -| compass | odl_l2-\* | In some test cases, VM instance will failed | -| | | raising network interfaces. | | | | | +-----------+-----------+----------------------------------------------+ - Useful links ------------- +============ - wiki project page: https://wiki.opnfv.org/display/yardstick/Yardstick - - wiki Yardstick Euphrates release planing page: https://wiki.opnfv.org/display/yardstick/Yardstick+Euphrates+Release+Planning + - wiki Yardstick Fraser release planing page: https://wiki.opnfv.org/display/yardstick/Release+Fraser - Yardstick repo: https://git.opnfv.org/cgit/yardstick diff --git a/docs/testing/user/userguide/13-nsb-installation.rst b/docs/testing/user/userguide/13-nsb-installation.rst index 00f8cfd97..1f6c79b0b 100644 --- a/docs/testing/user/userguide/13-nsb-installation.rst +++ b/docs/testing/user/userguide/13-nsb-installation.rst @@ -135,6 +135,15 @@ Ansible: ansible_user=root ansible_pass=root +.. note:: + + SSH access without password needs to be configured for all your nodes defined in + ``yardstick-install-inventory.ini`` file. + If you want to use password authentication you need to install sshpass + + .. code-block:: console + + sudo -EH apt-get install sshpass To execute an installation for a Bare-Metal or a Standalone context: diff --git a/etc/infra/infra_deploy_multi.yaml.sample b/etc/infra/infra_deploy_multi.yaml.sample new file mode 100644 index 000000000..aa27b735a --- /dev/null +++ b/etc/infra/infra_deploy_multi.yaml.sample @@ -0,0 +1,97 @@ +nodes: + - name: Deployment and Controller node number 1 VM + openstack_node: controller + hostname: control-01 + interfaces: + - network: management + ip: 192.168.1.10 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.10 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_cntrl_1.img + disk: 13000 + ram: 9000 + vcpus: 4 + + - name: Controller node number 2 VM + openstack_node: controller + hostname: control-02 + interfaces: + - network: management + ip: 192.168.1.11 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.11 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_cntrl_2.img + disk: 11000 + ram: 6000 + vcpus: 2 + + - name: Compute node number 1 VM + openstack_node: compute + hostname: compute-01 + interfaces: + - network: management + ip: 192.168.1.12 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.12 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_comp_1.img + disk: 30000 + ram: 16000 + vcpus: 12 + + - name: Compute node number 2 VM + openstack_node: compute + hostname: compute-02 + interfaces: + - network: management + ip: 192.168.1.13 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.13 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_comp_2.img + disk: 12000 + ram: 6000 + vcpus: 4 + + - name: Jump host + hostname: yardstickvm + interfaces: + - network: management + ip: 192.168.1.14 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.14 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_yardstick.img + disk: 28000 + ram: 12000 + vcpus: 4 + +networks: + - name: management + default_gateway: True + host_ip: 192.168.1.1 + netmask: 255.255.255.0 + + - name: traffic + default_gateway: False # This parameter is not mandatory, default value: False + host_ip: 192.20.1.1 + netmask: 255.255.255.0 + dhcp_ip_start: 192.20.1.200 + dhcp_ip_stop: 192.20.1.250 diff --git a/etc/infra/infra_deploy.yaml.sample b/etc/infra/infra_deploy_one.yaml.sample index 8ed793622..f8759d42e 100644 --- a/etc/infra/infra_deploy.yaml.sample +++ b/etc/infra/infra_deploy_one.yaml.sample @@ -1,32 +1,35 @@ nodes: - - name: Yardstick VM - hostname: yardstickvm + - name: Deployment, Controller and Compute single VM + openstack_node: controller # if no compute nodes are defined means a standalone deployment + hostname: allinone interfaces: - network: management - ip: 192.168.1.10 + ip: 192.168.1.21 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.21 netmask: 255.255.255.0 user: ubuntu password: password - image: /tmp/image1.qcow - disk: 50000 - ram: 8192 - vcpus: 4 + image: /tmp/image_one.img + disk: 22000 + ram: 14000 + vcpus: 12 - - name: Controller_Compute VM - openstack_node: controller_compute - hostname: controller_compute + - name: Jump host + hostname: yardstickvm interfaces: - network: management - ip: 192.168.1.20 + ip: 192.168.1.22 netmask: 255.255.255.0 - network: traffic - ip: 192.20.1.20 + ip: 192.20.1.22 netmask: 255.255.255.0 user: ubuntu password: password - image: /tmp/image_2.qcow - disk: 40000 - ram: 32768 + image: /tmp/image_yardstick.img + disk: 22000 + ram: 10000 vcpus: 4 networks: @@ -39,3 +42,5 @@ networks: default_gateway: False # This parameter is not mandatory, default value: False host_ip: 192.20.1.1 netmask: 255.255.255.0 + dhcp_ip_start: 192.20.1.200 + dhcp_ip_stop: 192.20.1.250 diff --git a/etc/infra/infra_deploy_two.yaml.sample b/etc/infra/infra_deploy_two.yaml.sample new file mode 100644 index 000000000..a29f75453 --- /dev/null +++ b/etc/infra/infra_deploy_two.yaml.sample @@ -0,0 +1,63 @@ +nodes: + - name: Deployment and Controller node number 1 VM + openstack_node: controller + hostname: control-01 + interfaces: + - network: management + ip: 192.168.1.118 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.118 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_cntrl_1.img + disk: 12000 + ram: 10000 + vcpus: 6 + + - name: Compute node number 1 VM + openstack_node: compute + hostname: compute-01 + interfaces: + - network: management + ip: 192.168.1.119 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.119 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_comp_1.img + disk: 44000 + ram: 30000 + vcpus: 14 + + - name: Jump host + hostname: yardstickvm + interfaces: + - network: management + ip: 192.168.1.120 + netmask: 255.255.255.0 + - network: traffic + ip: 192.20.1.120 + netmask: 255.255.255.0 + user: ubuntu + password: password + image: /tmp/image_yardstick.img + disk: 22000 + ram: 10000 + vcpus: 4 + +networks: + - name: management + default_gateway: True + host_ip: 192.168.1.1 + netmask: 255.255.255.0 + + - name: traffic + default_gateway: False # This parameter is not mandatory, default value: False + host_ip: 192.20.1.1 + netmask: 255.255.255.0 + dhcp_ip_start: 192.20.1.200 + dhcp_ip_stop: 192.20.1.250 diff --git a/install.sh b/install.sh index 04985f48a..74929345d 100755 --- a/install.sh +++ b/install.sh @@ -119,4 +119,4 @@ tar xvf ${NSB_DIR}/trex_client.tar.gz -C ${NSB_DIR} rm -f ${NSB_DIR}/trex_client.tar.gz service nginx restart -uwsgi -i /etc/yardstick/yardstick.ini +uwsgi -i /etc/yardstick/yardstick.ini
\ No newline at end of file diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh index dee675981..1e1591ce3 100755 --- a/tests/ci/load_images.sh +++ b/tests/ci/load_images.sh @@ -43,6 +43,12 @@ if [ "${YARD_IMG_ARCH}" == "arm64" ]; then fi fi +cleanup_loopbacks() { + # try again to cleanup loopbacks in case of error + losetup -a + losetup -O NAME,BACK-FILE | awk '/yardstick/ { print $1 }' | xargs -l1 losetup -v -d || true +} + build_yardstick_image() { echo @@ -56,6 +62,7 @@ build_yardstick_image() # Build the image. Retry once if the build fails $cmd || $cmd + cleanup_loopbacks if [ ! -f "${RAW_IMAGE}" ]; then echo "Failed building RAW image" exit 1 @@ -70,16 +77,20 @@ build_yardstick_image() -e YARD_IMG_ARCH=${YARD_IMG_ARCH} \ -vvv -i inventory.ini build_yardstick_image.yml + cleanup_loopbacks if [ ! -f "${QCOW_IMAGE}" ]; then echo "Failed building QCOW image" exit 1 fi fi - if [[ $DEPLOY_SCENARIO == *[_-]ovs[_-]* ]]; then + # DPDK compile is not enabled for arm64 yet so disable for now + # JIRA: YARSTICK-1124 + if [[ ! -f "${QCOW_NSB_IMAGE}" && ${DEPLOY_SCENARIO} == *[_-]ovs_dpdk[_-]* && "${YARD_IMG_ARCH}" != "arm64" ]]; then ansible-playbook \ -e img_property="nsb" \ -e YARD_IMG_ARCH=${YARD_IMG_ARCH} \ -vvv -i inventory.ini build_yardstick_image.yml + cleanup_loopbacks if [ ! -f "${QCOW_NSB_IMAGE}" ]; then echo "Failed building QCOW NSB image" exit 1 @@ -122,7 +133,9 @@ load_yardstick_image() ${EXTRA_PARAMS} \ --file ${QCOW_IMAGE} \ yardstick-image) - if [[ $DEPLOY_SCENARIO == *[_-]ovs[_-]* ]]; then + # DPDK compile is not enabled for arm64 yet so disable NSB images for now + # JIRA: YARSTICK-1124 + if [[ $DEPLOY_SCENARIO == *[_-]ovs_dpdk[_-]* && "${YARD_IMG_ARCH}" != "arm64" ]]; then nsb_output=$(eval openstack ${SECURE} image create \ --public \ --disk-format qcow2 \ diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-fdio-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-fdio-noha_daily.yaml index ec0fd224c..bd91a75c7 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-fdio-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-fdio-noha_daily.yaml @@ -21,18 +21,12 @@ test_cases: - file_name: opnfv_yardstick_tc006.yaml - - file_name: opnfv_yardstick_tc007.yaml -- file_name: opnfv_yardstick_tc008.yaml - file_name: opnfv_yardstick_tc009.yaml - file_name: opnfv_yardstick_tc011.yaml - - file_name: opnfv_yardstick_tc020.yaml -- - file_name: opnfv_yardstick_tc021.yaml -- file_name: opnfv_yardstick_tc037.yaml - file_name: opnfv_yardstick_tc038.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-fdio-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-fdio-noha_daily.yaml index 7172979c7..722d885b6 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-fdio-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-fdio-noha_daily.yaml @@ -21,18 +21,12 @@ test_cases: - file_name: opnfv_yardstick_tc006.yaml - - file_name: opnfv_yardstick_tc007.yaml -- file_name: opnfv_yardstick_tc008.yaml - file_name: opnfv_yardstick_tc009.yaml - file_name: opnfv_yardstick_tc011.yaml - - file_name: opnfv_yardstick_tc020.yaml -- - file_name: opnfv_yardstick_tc021.yaml -- file_name: opnfv_yardstick_tc037.yaml - file_name: opnfv_yardstick_tc038.yaml diff --git a/tests/opnfv/test_suites/opnfv_vTC_daily.yaml b/tests/opnfv/test_suites/opnfv_vTC_daily.yaml deleted file mode 100644 index f7efe51fb..000000000 --- a/tests/opnfv/test_suites/opnfv_vTC_daily.yaml +++ /dev/null @@ -1,24 +0,0 @@ -############################################################################## -# Copyright (c) 2017 Ericsson AB and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- -# ERICSSON POD1 VTC daily task suite - -schema: "yardstick:suite:0.1" - -name: "opnfv_vTC_daily" -test_cases_dir: "tests/opnfv/test_cases/" -test_cases: -- - file_name: opnfv_yardstick_tc006.yaml -- - file_name: opnfv_yardstick_tc007.yaml -- - file_name: opnfv_yardstick_tc020.yaml -- - file_name: opnfv_yardstick_tc021.yaml diff --git a/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml b/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml deleted file mode 100644 index 04f607ed4..000000000 --- a/tests/opnfv/test_suites/opnfv_vTC_weekly.yaml +++ /dev/null @@ -1,24 +0,0 @@ -############################################################################## -# Copyright (c) 2017 Ericsson AB and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- -# ERICSSON POD1 VTC weekly task suite - -schema: "yardstick:suite:0.1" - -name: "opnfv_vTC_weekly" -test_cases_dir: "tests/opnfv/test_cases/" -test_cases: -- - file_name: opnfv_yardstick_tc006.yaml -- - file_name: opnfv_yardstick_tc007.yaml -- - file_name: opnfv_yardstick_tc020.yaml -- - file_name: opnfv_yardstick_tc021.yaml diff --git a/tests/unit/network_services/helpers/test_samplevnf_helper.py b/tests/unit/network_services/helpers/test_samplevnf_helper.py index 3b6c89d3a..dc74b1859 100644 --- a/tests/unit/network_services/helpers/test_samplevnf_helper.py +++ b/tests/unit/network_services/helpers/test_samplevnf_helper.py @@ -143,8 +143,6 @@ class TestMultiPortConfig(unittest.TestCase): def setUp(self): self._mock_open = mock.patch.object(six.moves.builtins, 'open') self.mock_open = self._mock_open.start() - self._mock_os = mock.patch.object(os, 'path') - self.mock_os = self._mock_os.start() self._mock_config_parser = mock.patch.object( samplevnf_helper, 'ConfigParser') self.mock_config_parser = self._mock_config_parser.start() @@ -153,7 +151,6 @@ class TestMultiPortConfig(unittest.TestCase): def _cleanup(self): self._mock_open.stop() - self._mock_os.stop() self._mock_config_parser.stop() def test_validate_ip_and_prefixlen(self): @@ -185,7 +182,8 @@ class TestMultiPortConfig(unittest.TestCase): samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen( '::1', '129') - def test___init__(self): + @mock.patch.object(os.path, 'isfile', return_value=False) + def test___init__(self, *args): topology_file = mock.Mock() config_tpl = mock.Mock() tmp_file = mock.Mock() @@ -193,8 +191,6 @@ class TestMultiPortConfig(unittest.TestCase): opnfv_vnf = samplevnf_helper.MultiPortConfig( topology_file, config_tpl, tmp_file, vnfd_mock) self.assertEqual(0, opnfv_vnf.swq) - self.mock_os.path = mock.MagicMock() - self.mock_os.path.isfile = mock.Mock(return_value=False) opnfv_vnf = samplevnf_helper.MultiPortConfig( topology_file, config_tpl, tmp_file, vnfd_mock) self.assertEqual(0, opnfv_vnf.swq) diff --git a/tests/unit/network_services/nfvi/test_resource.py b/tests/unit/network_services/nfvi/test_resource.py index f5f7f0fe7..9f337c673 100644 --- a/tests/unit/network_services/nfvi/test_resource.py +++ b/tests/unit/network_services/nfvi/test_resource.py @@ -19,7 +19,8 @@ import unittest from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.nfvi import resource, collectd - +from yardstick.common.exceptions import ResourceCommandError +from yardstick import ssh class TestResourceProfile(unittest.TestCase): VNFD = {'vnfd:vnfd-catalog': @@ -128,8 +129,31 @@ class TestResourceProfile(unittest.TestCase): self.assertEqual(val, ('error', 'Invalid', '', '')) def test__start_collectd(self): - self.assertIsNone( - self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin")) + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(0, "", "")) + self.assertIsNone(self.resource_profile._start_collectd(ssh_mock, + "/opt/nsb_bin")) + + ssh_mock.execute = mock.Mock(side_effect=ssh.SSHError) + with self.assertRaises(ssh.SSHError): + self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin") + + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + self.assertIsNone(self.resource_profile._start_collectd(ssh_mock, + "/opt/nsb_bin")) + + def test__start_rabbitmq(self): + ssh_mock = mock.Mock() + ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", "")) + self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock)) + + ssh_mock.execute = mock.Mock(return_value=(0, "", "")) + with self.assertRaises(ResourceCommandError): + self.resource_profile._start_rabbitmq(ssh_mock) + + ssh_mock.execute = mock.Mock(return_value=(1, "", "")) + with self.assertRaises(ResourceCommandError): + self.resource_profile._start_rabbitmq(ssh_mock) def test__prepare_collectd_conf(self): self.assertIsNone( @@ -154,11 +178,12 @@ class TestResourceProfile(unittest.TestCase): def test_initiate_systemagent(self): self.resource_profile._start_collectd = mock.Mock() + self.resource_profile._start_rabbitmq = mock.Mock() self.assertIsNone( self.resource_profile.initiate_systemagent("/opt/nsb_bin")) def test_initiate_systemagent_raise(self): - self.resource_profile._start_collectd = mock.Mock(side_effect=RuntimeError) + self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError) with self.assertRaises(RuntimeError): self.resource_profile.initiate_systemagent("/opt/nsb_bin") diff --git a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py index 26bd1dadd..eb59c2837 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py @@ -1661,42 +1661,6 @@ class TestSampleVnf(unittest.TestCase): # test the default resource helper is MyResourceHelper, not subclass self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper) - def test__get_port0localip6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '0064:ff9b:0:0:0:0:9810:6414' - result = sample_vnf._get_port0localip6() - self.assertEqual(result, expected) - - def test__get_port1localip6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '0064:ff9b:0:0:0:0:9810:2814' - result = sample_vnf._get_port1localip6() - self.assertEqual(result, expected) - - def test__get_port0prefixip6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '112' - result = sample_vnf._get_port0prefixlen6() - self.assertEqual(result, expected) - - def test__get_port1prefixip6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '112' - result = sample_vnf._get_port1prefixlen6() - self.assertEqual(result, expected) - - def test__get_port0gateway6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '0064:ff9b:0:0:0:0:9810:6414' - result = sample_vnf._get_port0gateway6() - self.assertEqual(result, expected) - - def test__get_port1gateway6(self): - sample_vnf = SampleVNF('vnf1', self.VNFD_0) - expected = '0064:ff9b:0:0:0:0:9810:2814' - result = sample_vnf._get_port1gateway6() - self.assertEqual(result, expected) - @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process') def test__start_vnf(self, *args): vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] @@ -1785,16 +1749,6 @@ class TestSampleVnf(unittest.TestCase): self.assertEqual(sample_vnf.wait_for_instantiate(), 0) - def test__build_ports(self): - vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] - sample_vnf = SampleVNF('vnf1', vnfd) - - self.assertIsNone(sample_vnf._build_ports()) - self.assertIsNotNone(sample_vnf.networks) - self.assertIsNotNone(sample_vnf.uplink_ports) - self.assertIsNotNone(sample_vnf.downlink_ports) - self.assertIsNotNone(sample_vnf.my_ports) - @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time") def test_vnf_execute_with_queue_data(self, *args): queue_size_list = [ diff --git a/tools/virt_ci_rampup.sh b/tools/virt_ci_rampup.sh index 6a9f2e7cb..aaf162cf7 100755 --- a/tools/virt_ci_rampup.sh +++ b/tools/virt_ci_rampup.sh @@ -13,9 +13,33 @@ # See the License for the specific language governing permissions and # limitations under the License. +env_http_proxy=$(sed -ne "s/^http_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment) +if [[ -z ${http_proxy} ]] && [[ ! -z ${env_http_proxy} ]]; then + export http_proxy=${env_http_proxy} +fi +env_https_proxy=$(sed -ne "s/^https_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment) +if [[ -z ${https_proxy} ]] && [[ ! -z ${env_https_proxy} ]]; then + export https_proxy=${env_https_proxy} +fi +env_ftp_proxy=$(sed -ne "s/^ftp_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment) +if [[ -z ${ftp_proxy} ]] && [[ ! -z ${env_ftp_proxy} ]]; then + export ftp_proxy=${env_ftp_proxy} +fi +if [[ ! -z ${http_proxy} ]] || [[ ! -z ${https_proxy} ]]; then + export no_proxy="${no_proxy}" + extra_args="${extra_args} -e @/tmp/proxy.yml " + cat <<EOF > /tmp/proxy.yml +--- +proxy_env: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + ftp_proxy: ${ftp_proxy} + no_proxy: ${no_proxy} +EOF +fi ANSIBLE_SCRIPTS="${0%/*}/../ansible" -cd ${ANSIBLE_SCRIPTS} &&\ +cd ${ANSIBLE_SCRIPTS} && \ sudo -EH ansible-playbook \ - -e rs_file='../etc/infra/infra_deploy.yaml' \ + -e RS_FILE='../etc/infra/infra_deploy_two.yaml' -e CLEAN_UP=False ${extra_args} \ -i inventory.ini infra_deploy.yml diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py index 0d1dfb86f..0964b7baf 100644 --- a/yardstick/benchmark/contexts/heat.py +++ b/yardstick/benchmark/contexts/heat.py @@ -7,9 +7,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from __future__ import absolute_import -from __future__ import print_function - import collections import logging import os diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py index f18d090d8..4d43f2611 100644 --- a/yardstick/benchmark/contexts/standalone/model.py +++ b/yardstick/benchmark/contexts/standalone/model.py @@ -232,14 +232,40 @@ class Libvirt(object): return ET.tostring(root) @staticmethod - def create_snapshot_qemu(connection, index, vm_image): - # build snapshot image - image = "/var/lib/libvirt/images/%s.qcow2" % index - connection.execute("rm %s" % image) - qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s" - connection.execute(qemu_template % (vm_image, image)) - - return image + def create_snapshot_qemu(connection, index, base_image): + """Create the snapshot image for a VM using a base image + + :param connection: SSH connection to the remote host + :param index: index of the VM to be spawn + :param base_image: path of the VM base image in the remote host + :return: snapshot image path + """ + vm_image = '/var/lib/libvirt/images/%s.qcow2' % index + connection.execute('rm -- "%s"' % vm_image) + status, _, _ = connection.execute('test -r %s' % base_image) + if status: + if not os.access(base_image, os.R_OK): + raise exceptions.LibvirtQemuImageBaseImageNotPresent( + vm_image=vm_image, base_image=base_image) + # NOTE(ralonsoh): done in two steps to avoid root permission + # issues. + LOG.info('Copy %s from execution host to remote host', base_image) + file_name = os.path.basename(os.path.normpath(base_image)) + connection.put_file(base_image, '/tmp/%s' % file_name) + status, _, error = connection.execute( + 'mv -- "/tmp/%s" "%s"' % (file_name, base_image)) + if status: + raise exceptions.LibvirtQemuImageCreateError( + vm_image=vm_image, base_image=base_image, error=error) + + LOG.info('Convert image %s to %s', base_image, vm_image) + qemu_cmd = ('qemu-img create -f qcow2 -o backing_file=%s %s' % + (base_image, vm_image)) + status, _, error = connection.execute(qemu_cmd) + if status: + raise exceptions.LibvirtQemuImageCreateError( + vm_image=vm_image, base_image=base_image, error=error) + return vm_image @classmethod def build_vm_xml(cls, connection, flavor, vm_name, index): diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 955b8cae2..697cc007f 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -112,9 +112,9 @@ class Task(object): # pragma: no cover continue try: - data = self._run(tasks[i]['scenarios'], - tasks[i]['run_in_parallel'], - output_config) + success, data = self._run(tasks[i]['scenarios'], + tasks[i]['run_in_parallel'], + output_config) except KeyboardInterrupt: raise except Exception: # pylint: disable=broad-except @@ -123,9 +123,15 @@ class Task(object): # pragma: no cover testcases[tasks[i]['case_name']] = {'criteria': 'FAIL', 'tc_data': []} else: - LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name']) - testcases[tasks[i]['case_name']] = {'criteria': 'PASS', - 'tc_data': data} + if success: + LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name']) + testcases[tasks[i]['case_name']] = {'criteria': 'PASS', + 'tc_data': data} + else: + LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'], + exc_info=True) + testcases[tasks[i]['case_name']] = {'criteria': 'FAIL', + 'tc_data': data} if args.keep_deploy: # keep deployment, forget about stack @@ -240,6 +246,7 @@ class Task(object): # pragma: no cover background_runners = [] + task_success = True result = [] # Start all background scenarios for scenario in filter(_is_background_scenario, scenarios): @@ -258,8 +265,8 @@ class Task(object): # pragma: no cover for runner in runners: status = runner_join(runner, background_runners, self.outputs, result) if status != 0: - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.error("%s runner status %s", runner.__execution_type__, status) + task_success = False LOG.info("Runner ended") else: # run serially @@ -271,8 +278,8 @@ class Task(object): # pragma: no cover LOG.error('Scenario NO.%s: "%s" ERROR!', scenarios.index(scenario) + 1, scenario.get('type')) - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.error("%s runner status %s", runner.__execution_type__, status) + task_success = False LOG.info("Runner ended") # Abort background runners @@ -289,7 +296,7 @@ class Task(object): # pragma: no cover base_runner.Runner.release(runner) print("Background task ended") - return result + return task_success, result def atexit_handler(self): """handler for process termination""" diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py index 9ac55471d..1fadd2532 100644 --- a/yardstick/benchmark/scenarios/availability/scenario_general.py +++ b/yardstick/benchmark/scenarios/availability/scenario_general.py @@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.intermediate_variables = {} - self.pass_flag = True def setup(self): self.director = Director(self.scenario_cfg, self.context_cfg) @@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario): step['actionType'], step['actionKey']) if actionRollbacker: self.director.executionSteps.append(actionRollbacker) - except Exception: + except Exception: # pylint: disable=broad-except LOG.exception("Exception") LOG.debug( "\033[91m exception when running step: %s .... \033[0m", @@ -59,31 +58,16 @@ class ScenarioGeneral(base.Scenario): self.director.stopMonitors() verify_result = self.director.verify() - - self.director.store_result(result) - for k, v in self.director.data.items(): if v == 0: result['sla_pass'] = 0 verify_result = False - self.pass_flag = False - LOG.info( - "\033[92m The service process not found in the host \ -envrioment, the HA test case NOT pass") + LOG.info("\033[92m The service process (%s) not found in the host environment", k) - if verify_result: - result['sla_pass'] = 1 - LOG.info( - "\033[92m Congratulations, " - "the HA test case PASS! \033[0m") - else: - result['sla_pass'] = 0 - self.pass_flag = False - LOG.info( - "\033[91m Aoh, the HA test case FAIL," - "please check the detail debug information! \033[0m") + result['sla_pass'] = 1 if verify_result else 0 + self.director.store_result(result) + + assert verify_result is True, "The HA test case NOT passed" def teardown(self): self.director.knockoff() - - assert self.pass_flag, "The HA test case NOT passed" diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index 6d0d812af..dcd0fe598 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -29,7 +29,6 @@ class ServiceHA(base.Scenario): self.context_cfg = context_cfg self.setup_done = False self.data = {} - self.pass_flag = True def setup(self): """scenario setup""" @@ -73,18 +72,12 @@ class ServiceHA(base.Scenario): sla_pass = self.monitorMgr.verify_SLA() for k, v in self.data.items(): if v == 0: - result['sla_pass'] = 0 - self.pass_flag = False - LOG.info("The service process not found in the host envrioment, \ -the HA test case NOT pass") - return + sla_pass = False + LOG.info("The service process (%s) not found in the host envrioment", k) + + result['sla_pass'] = 1 if sla_pass else 0 self.monitorMgr.store_result(result) - if sla_pass: - result['sla_pass'] = 1 - LOG.info("The HA test case PASS the SLA") - else: - result['sla_pass'] = 0 - self.pass_flag = False + assert sla_pass is True, "The HA test case NOT pass the SLA" return @@ -94,8 +87,6 @@ the HA test case NOT pass") for attacker in self.attackers: attacker.recover() - assert self.pass_flag, "The HA test case NOT passed" - def _test(): # pragma: no cover """internal test function""" diff --git a/yardstick/benchmark/scenarios/lib/attach_volume.py b/yardstick/benchmark/scenarios/lib/attach_volume.py index 88124964b..96dd130b1 100644 --- a/yardstick/benchmark/scenarios/lib/attach_volume.py +++ b/yardstick/benchmark/scenarios/lib/attach_volume.py @@ -6,30 +6,31 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) class AttachVolume(base.Scenario): - """Attach a volmeu to an instance""" + """Attach a volume to an instance""" __scenario_type__ = "AttachVolume" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] + self.options = self.scenario_cfg["options"] - self.server_id = self.options.get("server_id", "TestServer") - self.volume_id = self.options.get("volume_id", None) + self.server_name_or_id = self.options["server_name_or_id"] + self.volume_name_or_id = self.options["volume_name_or_id"] + self.device = self.options.get("device") + self.wait = self.options.get("wait", True) + self.timeout = self.options.get("timeout") + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -44,10 +45,14 @@ class AttachVolume(base.Scenario): if not self.setup_done: self.setup() - status = op_utils.attach_server_volume(self.server_id, - self.volume_id) + status = openstack_utils.attach_volume_to_server( + self.shade_client, self.server_name_or_id, self.volume_name_or_id, + device=self.device, wait=self.wait, timeout=self.timeout) + + if not status: + result.update({"attach_volume": 0}) + LOG.error("Attach volume to server failed!") + raise exceptions.ScenarioAttachVolumeError - if status: - LOG.info("Attach volume to server successful!") - else: - LOG.info("Attach volume to server failed!") + result.update({"attach_volume": 1}) + LOG.info("Attach volume to server successful!") diff --git a/yardstick/benchmark/scenarios/lib/create_keypair.py b/yardstick/benchmark/scenarios/lib/create_keypair.py index f5b1fff7a..ee9bc440a 100644 --- a/yardstick/benchmark/scenarios/lib/create_keypair.py +++ b/yardstick/benchmark/scenarios/lib/create_keypair.py @@ -6,15 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging -import paramiko from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -27,10 +23,11 @@ class CreateKeypair(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] + self.options = self.scenario_cfg["options"] - self.key_name = self.options.get("key_name", "yardstick_key") - self.key_filename = self.options.get("key_path", "/tmp/yardstick_key") + self.name = self.options["key_name"] + self.public_key = self.options.get("public_key") + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -45,27 +42,17 @@ class CreateKeypair(base.Scenario): if not self.setup_done: self.setup() - rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None) - rsa_key.write_private_key_file(self.key_filename) - LOG.info("Writing key_file %s ...", self.key_filename) - with open(self.key_filename + ".pub", "w") as pubkey_file: - pubkey_file.write( - "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64())) - del rsa_key - - keypair = op_utils.create_keypair(self.key_name, - self.key_filename + ".pub") + keypair = openstack_utils.create_keypair( + self.shade_client, self.name, public_key=self.public_key) - if keypair: - result.update({"keypair_create": 1}) - LOG.info("Create keypair successful!") - else: + if not keypair: result.update({"keypair_create": 0}) - LOG.info("Create keypair failed!") - try: - keys = self.scenario_cfg.get('output', '').split() - except KeyError: - pass - else: - values = [keypair.id] - return self._push_to_outputs(keys, values) + LOG.error("Create keypair failed!") + raise exceptions.ScenarioCreateKeypairError + + result.update({"keypair_create": 1}) + LOG.info("Create keypair successful!") + keys = self.scenario_cfg.get("output", '').split() + keypair_id = keypair["id"] + values = [keypair_id] + return self._push_to_outputs(keys, values) diff --git a/yardstick/benchmark/scenarios/lib/create_sec_group.py b/yardstick/benchmark/scenarios/lib/create_sec_group.py index 3d1aec9e8..1d2e36488 100644 --- a/yardstick/benchmark/scenarios/lib/create_sec_group.py +++ b/yardstick/benchmark/scenarios/lib/create_sec_group.py @@ -7,13 +7,11 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -26,11 +24,12 @@ class CreateSecgroup(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] + self.options = self.scenario_cfg["options"] - self.sg_name = self.options.get("sg_name", "yardstick_sec_group") - self.description = self.options.get("description", None) - self.neutron_client = op_utils.get_neutron_client() + self.sg_name = self.options["sg_name"] + self.description = self.options.get("description", "") + self.project_id = self.options.get("project_id") + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -45,21 +44,16 @@ class CreateSecgroup(base.Scenario): if not self.setup_done: self.setup() - sg_id = op_utils.create_security_group_full(self.neutron_client, - sg_name=self.sg_name, - sg_description=self.description) - - if sg_id: - result.update({"sg_create": 1}) - LOG.info("Create security group successful!") - else: + sg_id = openstack_utils.create_security_group_full( + self.shade_client, self.sg_name, sg_description=self.description, + project_id=self.project_id) + if not sg_id: result.update({"sg_create": 0}) LOG.error("Create security group failed!") + raise exceptions.ScenarioCreateSecurityGroupError - try: - keys = self.scenario_cfg.get('output', '').split() - except KeyError: - pass - else: - values = [sg_id] - return self._push_to_outputs(keys, values) + result.update({"sg_create": 1}) + LOG.info("Create security group successful!") + keys = self.scenario_cfg.get("output", '').split() + values = [sg_id] + return self._push_to_outputs(keys, values) diff --git a/yardstick/benchmark/scenarios/lib/create_server.py b/yardstick/benchmark/scenarios/lib/create_server.py index 31ba18ed4..e2748aecf 100644 --- a/yardstick/benchmark/scenarios/lib/create_server.py +++ b/yardstick/benchmark/scenarios/lib/create_server.py @@ -6,14 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -26,15 +23,27 @@ class CreateServer(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] - - self.image_name = self.options.get("image_name", None) - self.flavor_name = self.options.get("flavor_name", None) - self.openstack = self.options.get("openstack_paras", None) - - self.glance_client = op_utils.get_glance_client() - self.neutron_client = op_utils.get_neutron_client() - self.nova_client = op_utils.get_nova_client() + self.options = self.scenario_cfg["options"] + + self.name = self.options["name"] + self.image = self.options["image"] + self.flavor = self.options["flavor"] + self.auto_ip = self.options.get("auto_ip", True) + self.ips = self.options.get("ips") + self.ip_pool = self.options.get("ip_pool") + self.root_volume = self.options.get("root_volume") + self.terminate_volume = self.options.get("terminate_volume", False) + self.wait = self.options.get("wait", True) + self.timeout = self.options.get("timeout", 180) + self.reuse_ips = self.options.get("reuse_ips", True) + self.network = self.options.get("network") + self.boot_from_volume = self.options.get("boot_from_volume", False) + self.volume_size = self.options.get("volume_size", "20") + self.boot_volume = self.options.get("boot_volume") + self.volumes = self.options.get("volumes") + self.nat_destination = self.options.get("nat_destination") + + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -49,26 +58,23 @@ class CreateServer(base.Scenario): if not self.setup_done: self.setup() - if self.image_name is not None: - self.openstack['image'] = op_utils.get_image_id(self.glance_client, - self.image_name) - if self.flavor_name is not None: - self.openstack['flavor'] = op_utils.get_flavor_id(self.nova_client, - self.flavor_name) - - vm = op_utils.create_instance_and_wait_for_active(self.openstack) - - if vm: - result.update({"instance_create": 1}) - LOG.info("Create server successful!") - else: + server = openstack_utils.create_instance_and_wait_for_active( + self.shade_client, self.name, self.image, + self.flavor, auto_ip=self.auto_ip, ips=self.ips, + ip_pool=self.ip_pool, root_volume=self.root_volume, + terminate_volume=self.terminate_volume, wait=self.wait, + timeout=self.timeout, reuse_ips=self.reuse_ips, + network=self.network, boot_from_volume=self.boot_from_volume, + volume_size=self.volume_size, boot_volume=self.boot_volume, + volumes=self.volumes, nat_destination=self.nat_destination) + + if not server: result.update({"instance_create": 0}) LOG.error("Create server failed!") + raise exceptions.ScenarioCreateServerError - try: - keys = self.scenario_cfg.get('output', '').split() - except KeyError: - pass - else: - values = [vm.id] - return self._push_to_outputs(keys, values) + result.update({"instance_create": 1}) + LOG.info("Create instance successful!") + keys = self.scenario_cfg.get("output", '').split() + values = [server["id"]] + return self._push_to_outputs(keys, values) diff --git a/yardstick/benchmark/scenarios/lib/delete_keypair.py b/yardstick/benchmark/scenarios/lib/delete_keypair.py index 135139959..a52a38567 100644 --- a/yardstick/benchmark/scenarios/lib/delete_keypair.py +++ b/yardstick/benchmark/scenarios/lib/delete_keypair.py @@ -6,14 +6,12 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging +from yardstick.common import openstack_utils +from yardstick.common import exceptions from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils + LOG = logging.getLogger(__name__) @@ -26,11 +24,11 @@ class DeleteKeypair(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] + self.options = self.scenario_cfg["options"] - self.key_name = self.options.get("key_name", "yardstick_key") + self.key_name = self.options["key_name"] - self.nova_client = op_utils.get_nova_client() + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -45,12 +43,13 @@ class DeleteKeypair(base.Scenario): if not self.setup_done: self.setup() - status = op_utils.delete_keypair(self.nova_client, - self.key_name) + status = openstack_utils.delete_keypair(self.shade_client, + self.key_name) - if status: - result.update({"delete_keypair": 1}) - LOG.info("Delete keypair successful!") - else: + if not status: result.update({"delete_keypair": 0}) - LOG.info("Delete keypair failed!") + LOG.error("Delete keypair failed!") + raise exceptions.ScenarioDeleteKeypairError + + result.update({"delete_keypair": 1}) + LOG.info("Delete keypair successful!") diff --git a/yardstick/benchmark/scenarios/lib/delete_network.py b/yardstick/benchmark/scenarios/lib/delete_network.py index 2e8b595f9..8874e8b1e 100644 --- a/yardstick/benchmark/scenarios/lib/delete_network.py +++ b/yardstick/benchmark/scenarios/lib/delete_network.py @@ -10,7 +10,8 @@ import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -24,11 +25,11 @@ class DeleteNetwork(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] + self.options = self.scenario_cfg["options"] - self.network_id = self.options.get("network_id", None) + self.network_name_or_id = self.options["network_name_or_id"] - self.shade_client = op_utils.get_shade_client() + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -43,12 +44,13 @@ class DeleteNetwork(base.Scenario): if not self.setup_done: self.setup() - status = op_utils.delete_neutron_net(self.shade_client, - network_id=self.network_id) - if status: - result.update({"delete_network": 1}) - LOG.info("Delete network successful!") - else: + status = openstack_utils.delete_neutron_net(self.shade_client, + self.network_name_or_id) + + if not status: result.update({"delete_network": 0}) LOG.error("Delete network failed!") - return status + raise exceptions.ScenarioDeleteNetworkError + + result.update({"delete_network": 1}) + LOG.info("Delete network successful!") diff --git a/yardstick/benchmark/scenarios/lib/delete_server.py b/yardstick/benchmark/scenarios/lib/delete_server.py index bcd8faba7..46229ff04 100644 --- a/yardstick/benchmark/scenarios/lib/delete_server.py +++ b/yardstick/benchmark/scenarios/lib/delete_server.py @@ -6,14 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging +from yardstick.common import openstack_utils +from yardstick.common import exceptions from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils LOG = logging.getLogger(__name__) @@ -26,9 +23,13 @@ class DeleteServer(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] - self.server_id = self.options.get("server_id", None) - self.nova_client = op_utils.get_nova_client() + self.options = self.scenario_cfg["options"] + self.server_name_or_id = self.options["name_or_id"] + self.wait = self.options.get("wait", False) + self.timeout = self.options.get("timeout", 180) + self.delete_ips = self.options.get("delete_ips", False) + self.delete_ip_retry = self.options.get("delete_ip_retry", 1) + self.shade_client = openstack_utils.get_shade_client() self.setup_done = False @@ -43,9 +44,15 @@ class DeleteServer(base.Scenario): if not self.setup_done: self.setup() - status = op_utils.delete_instance(self.nova_client, - instance_id=self.server_id) - if status: - LOG.info("Delete server successful!") - else: + status = openstack_utils.delete_instance( + self.shade_client, self.server_name_or_id, wait=self.wait, + timeout=self.timeout, delete_ips=self.delete_ips, + delete_ip_retry=self.delete_ip_retry) + + if not status: + result.update({"delete_server": 0}) LOG.error("Delete server failed!") + raise exceptions.ScenarioDeleteServerError + + result.update({"delete_server": 1}) + LOG.info("Delete server successful!") diff --git a/yardstick/benchmark/scenarios/lib/get_flavor.py b/yardstick/benchmark/scenarios/lib/get_flavor.py index d5e33947e..6727a7343 100644 --- a/yardstick/benchmark/scenarios/lib/get_flavor.py +++ b/yardstick/benchmark/scenarios/lib/get_flavor.py @@ -6,14 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -26,8 +23,12 @@ class GetFlavor(base.Scenario): def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg['options'] - self.flavor_name = self.options.get("flavor_name", "TestFlavor") + self.options = self.scenario_cfg["options"] + self.name_or_id = self.options["name_or_id"] + self.filters = self.options.get("filters") + self.get_extra = self.options.get("get_extra", True) + self.shade_client = openstack_utils.get_shade_client() + self.setup_done = False def setup(self): @@ -41,14 +42,18 @@ class GetFlavor(base.Scenario): if not self.setup_done: self.setup() - LOG.info("Querying flavor: %s", self.flavor_name) - flavor = op_utils.get_flavor_by_name(self.flavor_name) - if flavor: - LOG.info("Get flavor successful!") - values = [self._change_obj_to_dict(flavor)] - else: - LOG.info("Get flavor: no flavor matched!") - values = [] + LOG.info("Querying flavor: %s", self.name_or_id) + flavor = openstack_utils.get_flavor( + self.shade_client, self.name_or_id, filters=self.filters, + get_extra=self.get_extra) + + if not flavor: + result.update({"get_flavor": 0}) + LOG.error("Get flavor failed!") + raise exceptions.ScenarioGetFlavorError - keys = self.scenario_cfg.get('output', '').split() + result.update({"get_flavor": 1}) + LOG.info("Get flavor successful!") + values = [flavor] + keys = self.scenario_cfg.get("output", '').split() return self._push_to_outputs(keys, values) diff --git a/yardstick/benchmark/scenarios/lib/get_server.py b/yardstick/benchmark/scenarios/lib/get_server.py index fcf47c80d..f65fa9ebf 100644 --- a/yardstick/benchmark/scenarios/lib/get_server.py +++ b/yardstick/benchmark/scenarios/lib/get_server.py @@ -6,14 +6,11 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -from __future__ import print_function -from __future__ import absolute_import - import logging from yardstick.benchmark.scenarios import base -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions LOG = logging.getLogger(__name__) @@ -21,63 +18,58 @@ LOG = logging.getLogger(__name__) class GetServer(base.Scenario): """Get a server instance - Parameters - server_id - ID of the server - type: string - unit: N/A - default: null - server_name - name of the server - type: string - unit: N/A - default: null - - Either server_id or server_name is required. - - Outputs + Parameters: + name_or_id - Name or ID of the server + type: string + filters - meta data to use for further filtering + type: dict + detailed: Whether or not to add detailed additional information. + type: bool + bare: Whether to skip adding any additional information to the server + record. + type: bool + all_projects: Whether to get server from all projects or just the current + auth scoped project. + type: bool + + Outputs: rc - response code of getting server instance - 0 for success - 1 for failure + 1 for success + 0 for failure type: int - unit: N/A server - instance of the server type: dict - unit: N/A + """ - __scenario_type__ = "GetServer" + __scenario_type__ = 'GetServer' def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg - self.options = self.scenario_cfg.get('options', {}) + self.options = self.scenario_cfg['options'] - self.server_id = self.options.get("server_id") - if self.server_id: - LOG.debug('Server id is %s', self.server_id) + self.server_name_or_id = self.options.get('name_or_id') + self.filters = self.options.get('filters') + self.detailed = self.options.get('detailed', False) + self.bare = self.options.get('bare', False) - default_name = self.scenario_cfg.get('host', - self.scenario_cfg.get('target')) - self.server_name = self.options.get('server_name', default_name) - if self.server_name: - LOG.debug('Server name is %s', self.server_name) - - self.nova_client = op_utils.get_nova_client() + self.shade_client = openstack_utils.get_shade_client() def run(self, result): """execute the test""" - if self.server_id: - server = self.nova_client.servers.get(self.server_id) - else: - server = op_utils.get_server_by_name(self.server_name) - - keys = self.scenario_cfg.get('output', '').split() + server = openstack_utils.get_server( + self.shade_client, name_or_id=self.server_name_or_id, + filters=self.filters, detailed=self.detailed, bare=self.bare) - if server: - LOG.info("Get server successful!") - values = [0, self._change_obj_to_dict(server)] - else: - LOG.info("Get server failed!") - values = [1] + if not server: + result.update({'get_server': 0}) + LOG.error('Get Server failed!') + raise exceptions.ScenarioGetServerError + result.update({'get_server': 1}) + LOG.info('Get Server successful!') + keys = self.scenario_cfg.get('output', '').split() + values = [server] return self._push_to_outputs(keys, values) diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py index 38d2dd7c2..ca5a110e2 100644 --- a/yardstick/common/ansible_common.py +++ b/yardstick/common/ansible_common.py @@ -514,7 +514,7 @@ class AnsibleCommon(object): parser.add_section('defaults') parser.set('defaults', 'host_key_checking', 'False') - cfg_path = os.path.join(directory, 'setup.cfg') + cfg_path = os.path.join(directory, 'ansible.cfg') with open(cfg_path, 'w') as f: parser.write(f) diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py index 65e444071..04920943d 100644 --- a/yardstick/common/exceptions.py +++ b/yardstick/common/exceptions.py @@ -54,6 +54,10 @@ class YardstickException(Exception): return False +class ResourceCommandError(YardstickException): + message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"' + + class FunctionNotImplemented(YardstickException): message = ('The function "%(function_name)s" is not implemented in ' '"%(class_name)" class.') @@ -117,6 +121,17 @@ class LibvirtCreateError(YardstickException): message = 'Error creating the virtual machine. Error: %(error)s.' +class LibvirtQemuImageBaseImageNotPresent(YardstickException): + message = ('Error creating the qemu image for %(vm_image)s. Base image: ' + '%(base_image)s. Base image not present in execution host or ' + 'remote host.') + + +class LibvirtQemuImageCreateError(YardstickException): + message = ('Error creating the qemu image for %(vm_image)s. Base image: ' + '%(base_image)s. Error: %(error)s.') + + class ScenarioConfigContextNameNotFound(YardstickException): message = 'Context name "%(context_name)s" not found' @@ -137,6 +152,14 @@ class TaskRenderError(YardstickException): message = 'Failed to render template:\n%(input_task)s' +class TimerTimeout(YardstickException): + message = 'Timer timeout expired, %(timeout)s seconds' + + +class WaitTimeout(YardstickException): + message = 'Wait timeout while waiting for condition' + + class ScenarioCreateNetworkError(YardstickException): message = 'Create Neutron Network Scenario failed' @@ -171,3 +194,39 @@ class ScenarioCreateFloatingIPError(YardstickException): class ScenarioDeleteFloatingIPError(YardstickException): message = 'Delete Neutron Floating IP Scenario failed' + + +class ScenarioCreateSecurityGroupError(YardstickException): + message = 'Create Neutron Security Group Scenario failed' + + +class ScenarioDeleteNetworkError(YardstickException): + message = 'Delete Neutron Network Scenario failed' + + +class ScenarioCreateServerError(YardstickException): + message = 'Nova Create Server Scenario failed' + + +class ScenarioDeleteServerError(YardstickException): + message = 'Delete Server Scenario failed' + + +class ScenarioCreateKeypairError(YardstickException): + message = 'Nova Create Keypair Scenario failed' + + +class ScenarioDeleteKeypairError(YardstickException): + message = 'Nova Delete Keypair Scenario failed' + + +class ScenarioAttachVolumeError(YardstickException): + message = 'Nova Attach Volume Scenario failed' + + +class ScenarioGetServerError(YardstickException): + message = 'Nova Get Server Scenario failed' + + +class ScenarioGetFlavorError(YardstickException): + message = 'Nova Get Falvor Scenario failed' diff --git a/yardstick/common/messaging/consumer.py b/yardstick/common/messaging/consumer.py index a0feeb300..24ec6f184 100644 --- a/yardstick/common/messaging/consumer.py +++ b/yardstick/common/messaging/consumer.py @@ -29,9 +29,9 @@ LOG = logging.getLogger(__name__) class NotificationHandler(object): """Abstract class to define a endpoint object for a MessagingConsumer""" - def __init__(self, id, ctx_pid, queue): - self._id = id - self._ctx_pid = ctx_pid + def __init__(self, _id, ctx_pids, queue): + self._id = _id + self._ctx_pids = ctx_pids self._queue = queue @@ -43,12 +43,12 @@ class MessagingConsumer(object): the messages published by a `MessagingNotifier`. """ - def __init__(self, topic, pid, endpoints, fanout=True): + def __init__(self, topic, pids, endpoints, fanout=True): """Init function. :param topic: (string) MQ exchange topic - :param pid: (int) PID of the process implementing the MQ Notifier which - will be in the message context + :param pids: (list of int) list of PIDs of the processes implementing + the MQ Notifier which will be in the message context :param endpoints: (list of class) list of classes implementing the methods (see `MessagingNotifier.send_message) used by the Notifier @@ -58,7 +58,7 @@ class MessagingConsumer(object): :returns: `MessagingConsumer` class object """ - self._pid = pid + self._pids = pids self._endpoints = endpoints self._transport = oslo_messaging.get_rpc_transport( cfg.CONF, url=messaging.TRANSPORT_URL) diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py index 5208a2749..68cf0a521 100644 --- a/yardstick/common/openstack_utils.py +++ b/yardstick/common/openstack_utils.py @@ -8,7 +8,6 @@ ############################################################################## import os -import time import sys import logging @@ -163,197 +162,185 @@ def get_shade_client(): # ********************************************* # NOVA # ********************************************* -def get_instances(nova_client): - try: - return nova_client.servers.list(search_opts={'all_tenants': 1}) - except Exception: # pylint: disable=broad-except - log.exception("Error [get_instances(nova_client)]") - - -def get_instance_status(nova_client, instance): # pragma: no cover - try: - return nova_client.servers.get(instance.id).status - except Exception: # pylint: disable=broad-except - log.exception("Error [get_instance_status(nova_client)]") - - -def get_instance_by_name(nova_client, instance_name): # pragma: no cover - try: - return nova_client.servers.find(name=instance_name) - except Exception: # pylint: disable=broad-except - log.exception("Error [get_instance_by_name(nova_client, '%s')]", - instance_name) - - -def get_aggregates(nova_client): # pragma: no cover - try: - return nova_client.aggregates.list() - except Exception: # pylint: disable=broad-except - log.exception("Error [get_aggregates(nova_client)]") +def create_keypair(shade_client, name, public_key=None): + """Create a new keypair. + :param name: Name of the keypair being created. + :param public_key: Public key for the new keypair. -def get_availability_zones(nova_client): # pragma: no cover - try: - return nova_client.availability_zones.list() - except Exception: # pylint: disable=broad-except - log.exception("Error [get_availability_zones(nova_client)]") - - -def get_availability_zone_names(nova_client): # pragma: no cover + :return: Created keypair. + """ try: - return [az.zoneName for az in get_availability_zones(nova_client)] - except Exception: # pylint: disable=broad-except - log.exception("Error [get_availability_zone_names(nova_client)]") + return shade_client.create_keypair(name, public_key=public_key) + except exc.OpenStackCloudException as o_exc: + log.error("Error [create_keypair(shade_client)]. " + "Exception message, '%s'", o_exc.orig_message) -def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover +def create_instance_and_wait_for_active(shade_client, name, image, + flavor, auto_ip=True, ips=None, + ip_pool=None, root_volume=None, + terminate_volume=False, wait=True, + timeout=180, reuse_ips=True, + network=None, boot_from_volume=False, + volume_size='20', boot_volume=None, + volumes=None, nat_destination=None, + **kwargs): + """Create a virtual server instance. + + :param name:(string) Name of the server. + :param image:(dict) Image dict, name or ID to boot with. Image is required + unless boot_volume is given. + :param flavor:(dict) Flavor dict, name or ID to boot onto. + :param auto_ip: Whether to take actions to find a routable IP for + the server. + :param ips: List of IPs to attach to the server. + :param ip_pool:(string) Name of the network or floating IP pool to get an + address from. + :param root_volume:(string) Name or ID of a volume to boot from. + (defaults to None - deprecated, use boot_volume) + :param boot_volume:(string) Name or ID of a volume to boot from. + :param terminate_volume:(bool) If booting from a volume, whether it should + be deleted when the server is destroyed. + :param volumes:(optional) A list of volumes to attach to the server. + :param wait:(optional) Wait for the address to appear as assigned to the server. + :param timeout: Seconds to wait, defaults to 60. + :param reuse_ips:(bool)Whether to attempt to reuse pre-existing + floating ips should a floating IP be needed. + :param network:(dict) Network dict or name or ID to attach the server to. + Mutually exclusive with the nics parameter. Can also be be + a list of network names or IDs or network dicts. + :param boot_from_volume:(bool) Whether to boot from volume. 'boot_volume' + implies True, but boot_from_volume=True with + no boot_volume is valid and will create a + volume from the image and use that. + :param volume_size: When booting an image from volume, how big should + the created volume be? + :param nat_destination: Which network should a created floating IP + be attached to, if it's not possible to infer from + the cloud's configuration. + :param meta:(optional) A dict of arbitrary key/value metadata to store for + this server. Both keys and values must be <=255 characters. + :param reservation_id: A UUID for the set of servers being requested. + :param min_count:(optional extension) The minimum number of servers to + launch. + :param max_count:(optional extension) The maximum number of servers to + launch. + :param security_groups: A list of security group names. + :param userdata: User data to pass to be exposed by the metadata server + this can be a file type object as well or a string. + :param key_name:(optional extension) Name of previously created keypair to + inject into the instance. + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping:(optional) A dict of block device mappings for + this server. + :param block_device_mapping_v2:(optional) A dict of block device mappings + for this server. + :param nics:(optional extension) An ordered list of nics to be added to + this server, with information about connected networks, fixed + IPs, port etc. + :param scheduler_hints:(optional extension) Arbitrary key-value pairs + specified by the client to help boot an instance. + :param config_drive:(optional extension) Value for config drive either + boolean, or volume-id. + :param disk_config:(optional extension) Control how the disk is partitioned + when the server is created. Possible values are 'AUTO' + or 'MANUAL'. + :param admin_pass:(optional extension) Add a user supplied admin password. + + :returns: The created server. + """ try: - nova_client.aggregates.create(aggregate_name, av_zone) - except Exception: # pylint: disable=broad-except - log.exception("Error [create_aggregate(nova_client, %s, %s)]", - aggregate_name, av_zone) - return False - else: - return True + return shade_client.create_server( + name, image, flavor, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool, + root_volume=root_volume, terminate_volume=terminate_volume, + wait=wait, timeout=timeout, reuse_ips=reuse_ips, network=network, + boot_from_volume=boot_from_volume, volume_size=volume_size, + boot_volume=boot_volume, volumes=volumes, + nat_destination=nat_destination, **kwargs) + except exc.OpenStackCloudException as o_exc: + log.error("Error [create_instance(shade_client)]. " + "Exception message, '%s'", o_exc.orig_message) -def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover - try: - aggregates = get_aggregates(nova_client) - _id = next((ag.id for ag in aggregates if ag.name == aggregate_name)) - except Exception: # pylint: disable=broad-except - log.exception("Error [get_aggregate_id(nova_client, %s)]", - aggregate_name) - else: - return _id +def attach_volume_to_server(shade_client, server_name_or_id, volume_name_or_id, + device=None, wait=True, timeout=None): + """Attach a volume to a server. + This will attach a volume, described by the passed in volume + dict, to the server described by the passed in server dict on the named + device on the server. -def add_host_to_aggregate(nova_client, aggregate_name, - compute_host): # pragma: no cover - try: - aggregate_id = get_aggregate_id(nova_client, aggregate_name) - nova_client.aggregates.add_host(aggregate_id, compute_host) - except Exception: # pylint: disable=broad-except - log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]", - aggregate_name, compute_host) - return False - else: - return True + If the volume is already attached to the server, or generally not + available, then an exception is raised. To re-attach to a server, + but under a different device, the user must detach it first. + :param server_name_or_id:(string) The server name or id to attach to. + :param volume_name_or_id:(string) The volume name or id to attach. + :param device:(string) The device name where the volume will attach. + :param wait:(bool) If true, waits for volume to be attached. + :param timeout: Seconds to wait for volume attachment. None is forever. -def create_aggregate_with_host(nova_client, aggregate_name, av_zone, - compute_host): # pragma: no cover + :returns: True if attached successful, False otherwise. + """ try: - create_aggregate(nova_client, aggregate_name, av_zone) - add_host_to_aggregate(nova_client, aggregate_name, compute_host) - except Exception: # pylint: disable=broad-except - log.exception("Error [create_aggregate_with_host(" - "nova_client, %s, %s, %s)]", - aggregate_name, av_zone, compute_host) - return False - else: + server = shade_client.get_server(name_or_id=server_name_or_id) + volume = shade_client.get_volume(volume_name_or_id) + shade_client.attach_volume( + server, volume, device=device, wait=wait, timeout=timeout) return True - - -def create_keypair(name, key_path=None): # pragma: no cover - try: - with open(key_path) as fpubkey: - keypair = get_nova_client().keypairs.create( - name=name, public_key=fpubkey.read()) - return keypair - except Exception: # pylint: disable=broad-except - log.exception("Error [create_keypair(nova_client)]") - - -def create_instance(json_body): # pragma: no cover - try: - return get_nova_client().servers.create(**json_body) - except Exception: # pylint: disable=broad-except - log.exception("Error create instance failed") - return None - - -def create_instance_and_wait_for_active(json_body): # pragma: no cover - SLEEP = 3 - VM_BOOT_TIMEOUT = 180 - nova_client = get_nova_client() - instance = create_instance(json_body) - for _ in range(int(VM_BOOT_TIMEOUT / SLEEP)): - status = get_instance_status(nova_client, instance) - if status.lower() == "active": - return instance - elif status.lower() == "error": - log.error("The instance went to ERROR status.") - return None - time.sleep(SLEEP) - log.error("Timeout booting the instance.") - return None - - -def attach_server_volume(server_id, volume_id, - device=None): # pragma: no cover - try: - get_nova_client().volumes.create_server_volume(server_id, - volume_id, device) - except Exception: # pylint: disable=broad-except - log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]", - server_id, volume_id) + except exc.OpenStackCloudException as o_exc: + log.error("Error [attach_volume_to_server(shade_client)]. " + "Exception message: %s", o_exc.orig_message) return False - else: - return True - -def delete_instance(nova_client, instance_id): # pragma: no cover - try: - nova_client.servers.force_delete(instance_id) - except Exception: # pylint: disable=broad-except - log.exception("Error [delete_instance(nova_client, '%s')]", - instance_id) - return False - else: - return True +def delete_instance(shade_client, name_or_id, wait=False, timeout=180, + delete_ips=False, delete_ip_retry=1): + """Delete a server instance. -def remove_host_from_aggregate(nova_client, aggregate_name, - compute_host): # pragma: no cover + :param name_or_id: name or ID of the server to delete + :param wait:(bool) If true, waits for server to be deleted. + :param timeout:(int) Seconds to wait for server deletion. + :param delete_ips:(bool) If true, deletes any floating IPs associated with + the instance. + :param delete_ip_retry:(int) Number of times to retry deleting + any floating ips, should the first try be + unsuccessful. + :returns: True if delete succeeded, False otherwise. + """ try: - aggregate_id = get_aggregate_id(nova_client, aggregate_name) - nova_client.aggregates.remove_host(aggregate_id, compute_host) - except Exception: # pylint: disable=broad-except - log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)", - aggregate_name, compute_host) + return shade_client.delete_server( + name_or_id, wait=wait, timeout=timeout, delete_ips=delete_ips, + delete_ip_retry=delete_ip_retry) + except exc.OpenStackCloudException as o_exc: + log.error("Error [delete_instance(shade_client, '%s')]. " + "Exception message: %s", name_or_id, + o_exc.orig_message) return False - else: - return True -def remove_hosts_from_aggregate(nova_client, - aggregate_name): # pragma: no cover - aggregate_id = get_aggregate_id(nova_client, aggregate_name) - hosts = nova_client.aggregates.get(aggregate_id).hosts - assert( - all(remove_host_from_aggregate(nova_client, aggregate_name, host) - for host in hosts)) +def get_server(shade_client, name_or_id=None, filters=None, detailed=False, + bare=False): + """Get a server by name or ID. + :param name_or_id: Name or ID of the server. + :param filters:(dict) A dictionary of meta data to use for further + filtering. + :param detailed:(bool) Whether or not to add detailed additional + information. + :param bare:(bool) Whether to skip adding any additional information to the + server record. -def delete_aggregate(nova_client, aggregate_name): # pragma: no cover - try: - remove_hosts_from_aggregate(nova_client, aggregate_name) - nova_client.aggregates.delete(aggregate_name) - except Exception: # pylint: disable=broad-except - log.exception("Error [delete_aggregate(nova_client, %s)]", - aggregate_name) - return False - else: - return True - - -def get_server_by_name(name): # pragma: no cover + :returns: A server ``munch.Munch`` or None if no matching server is found. + """ try: - return get_nova_client().servers.list(search_opts={'name': name})[0] - except IndexError: - log.exception('Failed to get nova client') - raise + return shade_client.get_server(name_or_id=name_or_id, filters=filters, + detailed=detailed, bare=bare) + except exc.OpenStackCloudException as o_exc: + log.error("Error [get_server(shade_client, '%s')]. " + "Exception message: %s", name_or_id, o_exc.orig_message) def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover @@ -366,14 +353,6 @@ def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover return None -def get_image_by_name(name): # pragma: no cover - images = get_nova_client().images.list() - try: - return next((a for a in images if a.name == name)) - except StopIteration: - log.exception('No image matched') - - def get_flavor_id(nova_client, flavor_name): # pragma: no cover flavors = nova_client.flavors.list(detailed=True) flavor_id = '' @@ -384,27 +363,22 @@ def get_flavor_id(nova_client, flavor_name): # pragma: no cover return flavor_id -def get_flavor_by_name(name): # pragma: no cover - flavors = get_nova_client().flavors.list() - try: - return next((a for a in flavors if a.name == name)) - except StopIteration: - log.exception('No flavor matched') - - -def check_status(status, name, iterations, interval): # pragma: no cover - for _ in range(iterations): - try: - server = get_server_by_name(name) - except IndexError: - log.error('Cannot found %s server', name) - raise +def get_flavor(shade_client, name_or_id, filters=None, get_extra=True): + """Get a flavor by name or ID. - if server.status == status: - return True + :param name_or_id: Name or ID of the flavor. + :param filters: A dictionary of meta data to use for further filtering. + :param get_extra: Whether or not the list_flavors call should get the extra + flavor specs. - time.sleep(interval) - return False + :returns: A flavor ``munch.Munch`` or None if no matching flavor is found. + """ + try: + return shade_client.get_flavor(name_or_id, filters=filters, + get_extra=get_extra) + except exc.OpenStackCloudException as o_exc: + log.error("Error [get_flavor(shade_client, '%s')]. " + "Exception message: %s", name_or_id, o_exc.orig_message) def delete_flavor(flavor_id): # pragma: no cover @@ -417,12 +391,18 @@ def delete_flavor(flavor_id): # pragma: no cover return True -def delete_keypair(nova_client, key): # pragma: no cover +def delete_keypair(shade_client, name): + """Delete a keypair. + + :param name: Name of the keypair to delete. + + :returns: True if delete succeeded, False otherwise. + """ try: - nova_client.keypairs.delete(key=key) - return True - except Exception: # pylint: disable=broad-except - log.exception("Error [delete_keypair(nova_client)]") + return shade_client.delete_keypair(name) + except exc.OpenStackCloudException as o_exc: + log.error("Error [delete_neutron_router(shade_client, '%s')]. " + "Exception message: %s", name, o_exc.orig_message) return False @@ -625,39 +605,6 @@ def delete_floating_ip(shade_client, floating_ip_id, retry=1): return False -def get_security_groups(neutron_client): # pragma: no cover - try: - security_groups = neutron_client.list_security_groups()[ - 'security_groups'] - return security_groups - except Exception: # pylint: disable=broad-except - log.error("Error [get_security_groups(neutron_client)]") - return None - - -def get_security_group_id(neutron_client, sg_name): # pragma: no cover - security_groups = get_security_groups(neutron_client) - id = '' - for sg in security_groups: - if sg['name'] == sg_name: - id = sg['id'] - break - return id - - -def create_security_group(neutron_client, sg_name, - sg_description): # pragma: no cover - json_body = {'security_group': {'name': sg_name, - 'description': sg_description}} - try: - secgroup = neutron_client.create_security_group(json_body) - return secgroup['security_group'] - except Exception: # pylint: disable=broad-except - log.error("Error [create_security_group(neutron_client, '%s', " - "'%s')]", sg_name, sg_description) - return None - - def create_security_group_rule(shade_client, secgroup_name_or_id, port_range_min=None, port_range_max=None, protocol=None, remote_ip_prefix=None, @@ -712,42 +659,52 @@ def create_security_group_rule(shade_client, secgroup_name_or_id, return False -def create_security_group_full(neutron_client, sg_name, - sg_description): # pragma: no cover - sg_id = get_security_group_id(neutron_client, sg_name) - if sg_id != '': +def create_security_group_full(shade_client, sg_name, + sg_description, project_id=None): + security_group = shade_client.get_security_group(sg_name) + + if security_group: log.info("Using existing security group '%s'...", sg_name) - else: - log.info("Creating security group '%s'...", sg_name) - SECGROUP = create_security_group(neutron_client, - sg_name, - sg_description) - if not SECGROUP: - log.error("Failed to create the security group...") - return None - - sg_id = SECGROUP['id'] - - log.debug("Security group '%s' with ID=%s created successfully.", - SECGROUP['name'], sg_id) - - log.debug("Adding ICMP rules in security group '%s'...", sg_name) - if not create_security_group_rule(neutron_client, sg_id, - 'ingress', 'icmp'): - log.error("Failed to create the security group rule...") - return None - - log.debug("Adding SSH rules in security group '%s'...", sg_name) - if not create_security_group_rule( - neutron_client, sg_id, 'ingress', 'tcp', '22', '22'): - log.error("Failed to create the security group rule...") - return None - - if not create_security_group_rule( - neutron_client, sg_id, 'egress', 'tcp', '22', '22'): - log.error("Failed to create the security group rule...") - return None - return sg_id + return security_group['id'] + + log.info("Creating security group '%s'...", sg_name) + try: + security_group = shade_client.create_security_group( + sg_name, sg_description, project_id=project_id) + except (exc.OpenStackCloudException, + exc.OpenStackCloudUnavailableFeature) as op_exc: + log.error("Error [create_security_group(shade_client, %s, %s)]. " + "Exception message: %s", sg_name, sg_description, + op_exc.orig_message) + return + + log.debug("Security group '%s' with ID=%s created successfully.", + security_group['name'], security_group['id']) + + log.debug("Adding ICMP rules in security group '%s'...", sg_name) + if not create_security_group_rule(shade_client, security_group['id'], + direction='ingress', protocol='icmp'): + log.error("Failed to create the security group rule...") + shade_client.delete_security_group(sg_name) + return + + log.debug("Adding SSH rules in security group '%s'...", sg_name) + if not create_security_group_rule(shade_client, security_group['id'], + direction='ingress', protocol='tcp', + port_range_min='22', + port_range_max='22'): + log.error("Failed to create the security group rule...") + shade_client.delete_security_group(sg_name) + return + + if not create_security_group_rule(shade_client, security_group['id'], + direction='egress', protocol='tcp', + port_range_min='22', + port_range_max='22'): + log.error("Failed to create the security group rule...") + shade_client.delete_security_group(sg_name) + return + return security_group['id'] # ********************************************* diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py index 44cc92a7c..108ee17bc 100644 --- a/yardstick/common/utils.py +++ b/yardstick/common/utils.py @@ -23,9 +23,11 @@ import logging import os import random import re +import signal import socket import subprocess import sys +import time import six from flask import jsonify @@ -34,6 +36,8 @@ from oslo_serialization import jsonutils from oslo_utils import encodeutils import yardstick +from yardstick.common import exceptions + logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -405,15 +409,24 @@ class ErrorClass(object): class Timer(object): - def __init__(self): + def __init__(self, timeout=None): super(Timer, self).__init__() self.start = self.delta = None + self._timeout = int(timeout) if timeout else None + + def _timeout_handler(self, *args): + raise exceptions.TimerTimeout(timeout=self._timeout) def __enter__(self): self.start = datetime.datetime.now() + if self._timeout: + signal.signal(signal.SIGALRM, self._timeout_handler) + signal.alarm(self._timeout) return self def __exit__(self, *_): + if self._timeout: + signal.alarm(0) self.delta = datetime.datetime.now() - self.start def __getattr__(self, item): @@ -460,3 +473,22 @@ def open_relative_file(path, task_path): if e.errno == errno.ENOENT: return open(os.path.join(task_path, path)) raise + + +def wait_until_true(predicate, timeout=60, sleep=1, exception=None): + """Wait until callable predicate is evaluated as True + + :param predicate: (func) callable deciding whether waiting should continue + :param timeout: (int) timeout in seconds how long should function wait + :param sleep: (int) polling interval for results in seconds + :param exception: exception instance to raise on timeout. If None is passed + (default) then WaitTimeout exception is raised. + """ + try: + with Timer(timeout=timeout): + while not predicate(): + time.sleep(sleep) + except exceptions.TimerTimeout: + if exception and issubclass(exception, Exception): + raise exception # pylint: disable=raising-bad-type + raise exceptions.WaitTimeout diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index dc5c46a86..0c0bf223a 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -27,6 +27,7 @@ from oslo_config import cfg from oslo_utils.encodeutils import safe_decode from yardstick import ssh +from yardstick.common.exceptions import ResourceCommandError from yardstick.common.task_template import finalize_for_yaml from yardstick.common.utils import validate_non_string_sequence from yardstick.network_services.nfvi.collectd import AmqpConsumer @@ -249,45 +250,46 @@ class ResourceProfile(object): if status != 0: LOG.error("cannot find OVS socket %s", socket_path) + def _start_rabbitmq(self, connection): + # Reset amqp queue + LOG.debug("reset and setup amqp to collect data from collectd") + # ensure collectd.conf.d exists to avoid error/warning + cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d", + "sudo service rabbitmq-server restart", + "sudo rabbitmqctl stop_app", + "sudo rabbitmqctl reset", + "sudo rabbitmqctl start_app", + "sudo rabbitmqctl add_user admin admin", + "sudo rabbitmqctl authenticate_user admin admin", + "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'" + ] + for cmd in cmd_list: + exit_status, stdout, stderr = connection.execute(cmd) + if exit_status != 0: + raise ResourceCommandError(command=cmd, stderr=stderr) + + # check stdout for "sudo rabbitmqctl status" command + cmd = "sudo rabbitmqctl status" + _, stdout, stderr = connection.execute(cmd) + if not re.search("RabbitMQ", stdout): + LOG.error("rabbitmqctl status don't have RabbitMQ in running apps") + raise ResourceCommandError(command=cmd, stderr=stderr) + def _start_collectd(self, connection, bin_path): LOG.debug("Starting collectd to collect NFVi stats") - connection.execute('sudo pkill -x -9 collectd') collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd") config_file_path = os.path.join(bin_path, "collectd", "etc") + self._prepare_collectd_conf(config_file_path) + + connection.execute('sudo pkill -x -9 collectd') exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] if exit_status != 0: LOG.warning("%s is not present disabling", collectd_path) - # disable auto-provisioning because it requires Internet access - # collectd_installer = os.path.join(bin_path, "collectd.sh") - # provision_tool(connection, collectd) - # http_proxy = os.environ.get('http_proxy', '') - # https_proxy = os.environ.get('https_proxy', '') - # connection.execute("sudo %s '%s' '%s'" % ( - # collectd_installer, http_proxy, https_proxy)) return if "ovs_stats" in self.plugins: self._setup_ovs_stats(connection) LOG.debug("Starting collectd to collect NFVi stats") - # ensure collectd.conf.d exists to avoid error/warning - connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d") - self._prepare_collectd_conf(config_file_path) - - # Reset amqp queue - LOG.debug("reset and setup amqp to collect data from collectd") - connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*") - connection.execute("sudo service rabbitmq-server start") - connection.execute("sudo rabbitmqctl stop_app") - connection.execute("sudo rabbitmqctl reset") - connection.execute("sudo rabbitmqctl start_app") - connection.execute("sudo service rabbitmq-server restart") - - LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd") - connection.execute("sudo rabbitmqctl delete_user guest") - connection.execute("sudo rabbitmqctl add_user admin admin") - connection.execute("sudo rabbitmqctl authenticate_user admin admin") - connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'") - LOG.debug("Start collectd service..... %s second timeout", self.timeout) # intel_pmu plug requires large numbers of files open, so try to set # ulimit -n to a large value @@ -299,9 +301,10 @@ class ResourceProfile(object): """ Start system agent for NFVi collection on host """ if self.enable: try: + self._start_rabbitmq(self.connection) self._start_collectd(self.connection, bin_path) - except Exception: - LOG.exception("Exception during collectd start") + except ResourceCommandError as e: + LOG.exception("Exception during collectd and rabbitmq start: %s", str(e)) raise def start(self): diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py index 77488c479..d8b9625fb 100644 --- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py @@ -35,7 +35,6 @@ from yardstick.common import utils from yardstick.network_services import constants from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig -from yardstick.network_services.helpers.samplevnf_helper import PortPairs from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.utils import get_nsb_option from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen @@ -657,49 +656,6 @@ class SampleVNF(GenericVNF): self.vnf_port_pairs = None self._vnf_process = None - def _build_ports(self): - self._port_pairs = PortPairs(self.vnfd_helper.interfaces) - self.networks = self._port_pairs.networks - self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports) - self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports) - self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports) - - def _get_route_data(self, route_index, route_type): - route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', [])) - for _ in range(route_index): - next(route_iter, '') - return next(route_iter, {}).get(route_type, '') - - def _get_port0localip6(self): - return_value = self._get_route_data(0, 'network') - LOG.info("_get_port0localip6 : %s", return_value) - return return_value - - def _get_port1localip6(self): - return_value = self._get_route_data(1, 'network') - LOG.info("_get_port1localip6 : %s", return_value) - return return_value - - def _get_port0prefixlen6(self): - return_value = self._get_route_data(0, 'netmask') - LOG.info("_get_port0prefixlen6 : %s", return_value) - return return_value - - def _get_port1prefixlen6(self): - return_value = self._get_route_data(1, 'netmask') - LOG.info("_get_port1prefixlen6 : %s", return_value) - return return_value - - def _get_port0gateway6(self): - return_value = self._get_route_data(0, 'network') - LOG.info("_get_port0gateway6 : %s", return_value) - return return_value - - def _get_port1gateway6(self): - return_value = self._get_route_data(1, 'network') - LOG.info("_get_port1gateway6 : %s", return_value) - return return_value - def _start_vnf(self): self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT) name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid()) diff --git a/yardstick/tests/functional/common/messaging/test_messaging.py b/yardstick/tests/functional/common/messaging/test_messaging.py index 96deeb35b..99874343b 100644 --- a/yardstick/tests/functional/common/messaging/test_messaging.py +++ b/yardstick/tests/functional/common/messaging/test_messaging.py @@ -13,7 +13,6 @@ # limitations under the License. import multiprocessing -import os import time from yardstick.common.messaging import consumer @@ -33,24 +32,25 @@ class DummyPayload(payloads.Payload): class DummyEndpoint(consumer.NotificationHandler): def info(self, ctxt, **kwargs): - if ctxt['pid'] == self._ctx_pid: - self._queue.put('ID {}, data: {}'.format(self._id, kwargs['data'])) + if ctxt['pid'] in self._ctx_pids: + self._queue.put('ID {}, data: {}, pid: {}'.format( + self._id, kwargs['data'], ctxt['pid'])) class DummyConsumer(consumer.MessagingConsumer): - def __init__(self, id, ctx_pid, queue): - self._id = id - endpoints = [DummyEndpoint(id, ctx_pid, queue)] - super(DummyConsumer, self).__init__(TOPIC, ctx_pid, endpoints) + def __init__(self, _id, ctx_pids, queue): + self._id = _id + endpoints = [DummyEndpoint(_id, ctx_pids, queue)] + super(DummyConsumer, self).__init__(TOPIC, ctx_pids, endpoints) class DummyProducer(producer.MessagingProducer): pass -def _run_consumer(id, ctx_pid, queue): - _consumer = DummyConsumer(id, ctx_pid, queue) +def _run_consumer(_id, ctx_pids, queue): + _consumer = DummyConsumer(_id, ctx_pids, queue) _consumer.start_rpc_server() _consumer.wait() @@ -65,30 +65,35 @@ class MessagingTestCase(base.BaseFunctionalTestCase): def test_run_five_consumers(self): output_queue = multiprocessing.Queue() num_consumers = 10 - ctx_id = os.getpid() - producer = DummyProducer(TOPIC, pid=ctx_id) + ctx_1 = 100001 + ctx_2 = 100002 + producers = [DummyProducer(TOPIC, pid=ctx_1), + DummyProducer(TOPIC, pid=ctx_2)] processes = [] for i in range(num_consumers): processes.append(multiprocessing.Process( name='consumer_{}'.format(i), target=_run_consumer, - args=(i, ctx_id, output_queue))) + args=(i, [ctx_1, ctx_2], output_queue))) processes[i].start() self.addCleanup(self._terminate_consumers, num_consumers, processes) time.sleep(2) # Let consumers to create the listeners - producer.send_message(METHOD_INFO, DummyPayload(version=1, - data='message 0')) - producer.send_message(METHOD_INFO, DummyPayload(version=1, - data='message 1')) - time.sleep(2) # Let consumers attend the calls + for producer in producers: + for message in ['message 0', 'message 1']: + producer.send_message(METHOD_INFO, + DummyPayload(version=1, data=message)) + time.sleep(2) # Let consumers attend the calls output = [] while not output_queue.empty(): output.append(output_queue.get(True, 1)) - self.assertEqual(num_consumers * 2, len(output)) + self.assertEqual(num_consumers * 4, len(output)) + msg_template = 'ID {}, data: {}, pid: {}' for i in range(num_consumers): - self.assertIn('ID {}, data: {}'.format(1, 'message 0'), output) - self.assertIn('ID {}, data: {}'.format(1, 'message 1'), output) + for ctx in [ctx_1, ctx_2]: + for message in ['message 0', 'message 1']: + msg = msg_template.format(i, message, ctx) + self.assertIn(msg, output) diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py index b1dcee209..72e684a68 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py @@ -13,11 +13,11 @@ # limitations under the License. import copy -import mock import os -import unittest import uuid +import mock +import unittest from xml.etree import ElementTree from yardstick import ssh @@ -172,14 +172,70 @@ class ModelLibvirtTestCase(unittest.TestCase): interface_address.get('function')) def test_create_snapshot_qemu(self): - result = "/var/lib/libvirt/images/0.qcow2" - with mock.patch("yardstick.ssh.SSH") as ssh: - ssh_mock = mock.Mock(autospec=ssh.SSH) - ssh_mock.execute = \ - mock.Mock(return_value=(0, "a", "")) - ssh.return_value = ssh_mock - image = model.Libvirt.create_snapshot_qemu(ssh_mock, "0", "ubuntu.img") - self.assertEqual(image, result) + self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0)) + index = 1 + vm_image = '/var/lib/libvirt/images/%s.qcow2' % index + base_image = '/tmp/base_image' + + model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image) + self.mock_ssh.execute.assert_has_calls([ + mock.call('rm -- "%s"' % vm_image), + mock.call('test -r %s' % base_image), + mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' % + (base_image, vm_image)) + ]) + + @mock.patch.object(os.path, 'basename', return_value='base_image') + @mock.patch.object(os.path, 'normpath') + @mock.patch.object(os, 'access', return_value=True) + def test_create_snapshot_qemu_no_image_remote(self, + mock_os_access, mock_normpath, mock_basename): + self.mock_ssh.execute = mock.Mock( + side_effect=[(0, 0, 0), (1, 0, 0), (0, 0, 0), (0, 0, 0)]) + index = 1 + vm_image = '/var/lib/libvirt/images/%s.qcow2' % index + base_image = '/tmp/base_image' + mock_normpath.return_value = base_image + + model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image) + self.mock_ssh.execute.assert_has_calls([ + mock.call('rm -- "%s"' % vm_image), + mock.call('test -r %s' % base_image), + mock.call('mv -- "/tmp/%s" "%s"' % ('base_image', base_image)), + mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' % + (base_image, vm_image)) + ]) + mock_os_access.assert_called_once_with(base_image, os.R_OK) + mock_normpath.assert_called_once_with(base_image) + mock_basename.assert_has_calls([mock.call(base_image)]) + self.mock_ssh.put_file.assert_called_once_with(base_image, + '/tmp/base_image') + + @mock.patch.object(os, 'access', return_value=False) + def test_create_snapshot_qemu_no_image_local(self, mock_os_access): + self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)]) + base_image = '/tmp/base_image' + + with self.assertRaises(exceptions.LibvirtQemuImageBaseImageNotPresent): + model.Libvirt.create_snapshot_qemu(self.mock_ssh, 3, base_image) + mock_os_access.assert_called_once_with(base_image, os.R_OK) + + def test_create_snapshot_qemu_error_qemuimg_command(self): + self.mock_ssh.execute = mock.Mock( + side_effect=[(0, 0, 0), (0, 0, 0), (1, 0, 0)]) + index = 1 + vm_image = '/var/lib/libvirt/images/%s.qcow2' % index + base_image = '/tmp/base_image' + + with self.assertRaises(exceptions.LibvirtQemuImageCreateError): + model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, + base_image) + self.mock_ssh.execute.assert_has_calls([ + mock.call('rm -- "%s"' % vm_image), + mock.call('test -r %s' % base_image), + mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' % + (base_image, vm_image)) + ]) @mock.patch.object(model.Libvirt, 'pin_vcpu_for_perf', return_value='4,5') @mock.patch.object(model.Libvirt, 'create_snapshot_qemu', @@ -422,7 +478,7 @@ class OvsDeployTestCase(unittest.TestCase): def setUp(self): self._mock_ssh = mock.patch.object(ssh, 'SSH') - self.mock_ssh = self._mock_ssh .start() + self.mock_ssh = self._mock_ssh.start() self.ovs_deploy = model.OvsDeploy(self.mock_ssh, '/tmp/dpdk-devbind.py', self.OVS_DETAILS) diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py index a40adf5ae..1d491fe60 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_heat.py +++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py @@ -229,7 +229,7 @@ class HeatContextTestCase(unittest.TestCase): self.assertRaises(y_exc.HeatTemplateError, self.test_context.deploy) - mock_path_exists.assert_called_once() + mock_path_exists.assert_called() mock_resources_template.assert_called_once() @mock.patch.object(os.path, 'exists', return_value=False) @@ -254,7 +254,7 @@ class HeatContextTestCase(unittest.TestCase): 'yardstick/resources/files/yardstick_key-', self.test_context._name_task_id]) mock_genkeys.assert_called_once_with(key_filename) - mock_path_exists.assert_called_once_with(key_filename) + mock_path_exists.assert_any_call(key_filename) @mock.patch.object(heat, 'HeatTemplate') @mock.patch.object(os.path, 'exists', return_value=False) @@ -280,7 +280,7 @@ class HeatContextTestCase(unittest.TestCase): 'yardstick/resources/files/yardstick_key-', self.test_context._name]) mock_genkeys.assert_called_once_with(key_filename) - mock_path_exists.assert_called_once_with(key_filename) + mock_path_exists.assert_any_call(key_filename) @mock.patch.object(heat, 'HeatTemplate') @mock.patch.object(os.path, 'exists', return_value=False) @@ -296,7 +296,6 @@ class HeatContextTestCase(unittest.TestCase): self.test_context._flags.no_setup = True self.test_context.template_file = '/bar/baz/some-heat-file' self.test_context.get_neutron_info = mock.MagicMock() - self.test_context.deploy() mock_retrieve_stack.assert_called_once_with(self.test_context._name) @@ -334,7 +333,7 @@ class HeatContextTestCase(unittest.TestCase): 'yardstick/resources/files/yardstick_key-', self.test_context._name_task_id]) mock_genkeys.assert_called_once_with(key_filename) - mock_path_exists.assert_called_with(key_filename) + mock_path_exists.assert_any_call(key_filename) mock_call_gen_keys = mock.call.gen_keys(key_filename) mock_call_add_resources = ( diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py index 45840d569..d1172d5a6 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py @@ -14,7 +14,8 @@ from yardstick.benchmark.scenarios.availability import scenario_general class ScenarioGeneralTestCase(unittest.TestCase): - def setUp(self): + @mock.patch.object(scenario_general, 'Director') + def setUp(self, *args): self.scenario_cfg = { 'type': "general_scenario", 'options': { @@ -36,32 +37,28 @@ class ScenarioGeneralTestCase(unittest.TestCase): } } self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None) - - self._mock_director = mock.patch.object(scenario_general, 'Director') - self.mock_director = self._mock_director.start() - self.addCleanup(self._stop_mock) - - def _stop_mock(self): - self._mock_director.stop() + self.instance.setup() + self.instance.director.verify.return_value = True def test_scenario_general_all_successful(self): - self.instance.setup() - self.instance.run({}) + + ret = {} + self.instance.run(ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 1) def test_scenario_general_exception(self): - mock_obj = mock.Mock() - mock_obj.createActionPlayer.side_effect = KeyError('Wrong') - self.instance.director = mock_obj + self.instance.director.createActionPlayer.side_effect = KeyError('Wrong') self.instance.director.data = {} - self.instance.run({}) + ret = {} + self.instance.run(ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 1) def test_scenario_general_case_fail(self): - mock_obj = mock.Mock() - mock_obj.verify.return_value = False - self.instance.director = mock_obj + self.instance.director.verify.return_value = False self.instance.director.data = {} - self.instance.run({}) - self.instance.pass_flag = True + ret = {} + self.assertRaises(AssertionError, self.instance.run, ret) self.instance.teardown() + self.assertEqual(ret['sla_pass'], 0) diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py index 6bb3ec63b..dd656fbd5 100644 --- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py +++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py @@ -60,15 +60,16 @@ class ServicehaTestCase(unittest.TestCase): p.setup() self.assertTrue(p.setup_done) - # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor): - # p = serviceha.ServiceHA(self.args, self.ctx) + @mock.patch.object(serviceha, 'baseattacker') + @mock.patch.object(serviceha, 'basemonitor') + def test__serviceha_run_sla_error(self, mock_monitor, *args): + p = serviceha.ServiceHA(self.args, self.ctx) - # p.setup() - # self.assertEqual(p.setup_done, True) + p.setup() + self.assertEqual(p.setup_done, True) - # result = {} - # result["outage_time"] = 10 - # mock_monitor.Monitor().get_result.return_value = result + mock_monitor.MonitorMgr().verify_SLA.return_value = False - # ret = {} - # self.assertRaises(AssertionError, p.run, ret) + ret = {} + self.assertRaises(AssertionError, p.run, ret) + self.assertEqual(ret['sla_pass'], 0) diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py index 2964ecc14..bb7fa4536 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py @@ -6,21 +6,51 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import attach_volume class AttachVolumeTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.attach_server_volume') - def test_attach_volume(self, mock_attach_server_volume): - options = { - 'volume_id': '123-456-000', - 'server_id': '000-123-456' - } - args = {"options": options} - obj = AttachVolume(args, {}) - obj.run({}) - mock_attach_server_volume.assert_called_once() + def setUp(self): + + self._mock_attach_volume_to_server = mock.patch.object( + openstack_utils, 'attach_volume_to_server') + self.mock_attach_volume_to_server = ( + self._mock_attach_volume_to_server.start()) + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(attach_volume, 'LOG') + self.mock_log = self._mock_log.start() + _uuid = uuidutils.generate_uuid() + self.args = {'options': {'server_name_or_id': _uuid, + 'volume_name_or_id': _uuid}} + self.result = {} + self.addCleanup(self._stop_mock) + self.attachvol_obj = attach_volume.AttachVolume(self.args, mock.ANY) + + def _stop_mock(self): + self._mock_attach_volume_to_server.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + self.mock_attach_volume_to_server.return_value = True + self.assertIsNone(self.attachvol_obj.run(self.result)) + self.assertEqual({'attach_volume': 1}, self.result) + self.mock_log.info.asset_called_once_with( + 'Attach volume to server successful!') + + def test_run_fail(self): + self.mock_attach_volume_to_server.return_value = False + with self.assertRaises(exceptions.ScenarioAttachVolumeError): + self.attachvol_obj.run(self.result) + self.assertEqual({'attach_volume': 0}, self.result) + self.mock_log.error.assert_called_once_with( + 'Attach volume to server failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py index 1c3d6cebc..a7b683f47 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py @@ -6,22 +6,52 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## - -import mock +from oslo_utils import uuidutils import unittest +import mock +from yardstick.common import openstack_utils +from yardstick.common import exceptions from yardstick.benchmark.scenarios.lib import create_keypair class CreateKeypairTestCase(unittest.TestCase): - @mock.patch.object(create_keypair, 'paramiko') - @mock.patch.object(create_keypair, 'op_utils') - def test_create_keypair(self, mock_op_utils, *args): - options = { - 'key_name': 'yardstick_key', - 'key_path': '/tmp/yardstick_key' - } - args = {"options": options} - obj = create_keypair.CreateKeypair(args, {}) - obj.run({}) - mock_op_utils.create_keypair.assert_called_once() + + def setUp(self): + + self._mock_create_keypair = mock.patch.object( + openstack_utils, 'create_keypair') + self.mock_create_keypair = ( + self._mock_create_keypair.start()) + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(create_keypair, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'key_name': 'yardstick_key'}} + self.result = {} + + self.ckeypair_obj = create_keypair.CreateKeypair(self.args, mock.ANY) + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_create_keypair.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + _uuid = uuidutils.generate_uuid() + self.ckeypair_obj.scenario_cfg = {'output': 'id'} + self.mock_create_keypair.return_value = { + 'name': 'key-name', 'type': 'ssh', 'id': _uuid} + output = self.ckeypair_obj.run(self.result) + self.assertDictEqual({'keypair_create': 1}, self.result) + self.assertDictEqual({'id': _uuid}, output) + self.mock_log.info.asset_called_once_with('Create keypair successful!') + + def test_run_fail(self): + self.mock_create_keypair.return_value = None + with self.assertRaises(exceptions.ScenarioCreateKeypairError): + self.ckeypair_obj.run(self.result) + self.assertDictEqual({'keypair_create': 0}, self.result) + self.mock_log.error.assert_called_once_with('Create keypair failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py index 21158ab17..0477a49d4 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py @@ -6,25 +6,54 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## + +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup - - -class CreateSecGroupTestCase(unittest.TestCase): - - @mock.patch('yardstick.common.openstack_utils.get_neutron_client') - @mock.patch('yardstick.common.openstack_utils.create_security_group_full') - def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full): - options = { - 'openstack_paras': { - 'sg_name': 'yardstick_sec_group', - 'description': 'security group for yardstick manual VM' - } - } - args = {"options": options} - obj = CreateSecgroup(args, {}) - obj.run({}) - mock_get_neutron_client.assert_called_once() - mock_create_security_group_full.assert_called_once() +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import create_sec_group + + +class CreateSecurityGroupTestCase(unittest.TestCase): + + def setUp(self): + + self._mock_create_security_group_full = mock.patch.object( + openstack_utils, 'create_security_group_full') + self.mock_create_security_group_full = ( + self._mock_create_security_group_full.start()) + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(create_sec_group, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'sg_name': 'yardstick_sg'}} + self.result = {} + + self.csecgp_obj = create_sec_group.CreateSecgroup(self.args, mock.ANY) + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_create_security_group_full.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + _uuid = uuidutils.generate_uuid() + self.csecgp_obj.scenario_cfg = {'output': 'id'} + self.mock_create_security_group_full.return_value = _uuid + output = self.csecgp_obj.run(self.result) + self.assertEqual({'sg_create': 1}, self.result) + self.assertEqual({'id': _uuid}, output) + self.mock_log.info.asset_called_once_with( + 'Create security group successful!') + + def test_run_fail(self): + self.mock_create_security_group_full.return_value = None + with self.assertRaises(exceptions.ScenarioCreateSecurityGroupError): + self.csecgp_obj.run(self.result) + self.assertEqual({'sg_create': 0}, self.result) + self.mock_log.error.assert_called_once_with( + 'Create security group failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py index 9d6d8cb1b..b58785112 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py @@ -6,29 +6,54 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.create_server import CreateServer +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import create_server class CreateServerTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.create_instance_and_wait_for_active') - @mock.patch('yardstick.common.openstack_utils.get_nova_client') - @mock.patch('yardstick.common.openstack_utils.get_glance_client') - @mock.patch('yardstick.common.openstack_utils.get_neutron_client') - def test_create_server(self, mock_get_nova_client, mock_get_neutron_client, - mock_get_glance_client, mock_create_instance_and_wait_for_active): - scenario_cfg = { - 'options': { - 'openstack_paras': 'example' - }, - 'output': 'server' - } - obj = CreateServer(scenario_cfg, {}) - obj.run({}) - mock_get_nova_client.assert_called_once() - mock_get_glance_client.assert_called_once() - mock_get_neutron_client.assert_called_once() - mock_create_instance_and_wait_for_active.assert_called_once() + def setUp(self): + + self._mock_create_instance_and_wait_for_active = mock.patch.object( + openstack_utils, 'create_instance_and_wait_for_active') + self.mock_create_instance_and_wait_for_active = ( + self._mock_create_instance_and_wait_for_active.start()) + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(create_server, 'LOG') + self.mock_log = self._mock_log.start() + self.args = { + 'options': {'name': 'server-name', 'image': 'image-name', + 'flavor': 'flavor-name'}} + self.result = {} + + self.addCleanup(self._stop_mock) + self.cserver_obj = create_server.CreateServer(self.args, mock.ANY) + + def _stop_mock(self): + self._mock_create_instance_and_wait_for_active.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + _uuid = uuidutils.generate_uuid() + self.cserver_obj.scenario_cfg = {'output': 'id'} + self.mock_create_instance_and_wait_for_active.return_value = ( + {'name': 'server-name', 'flavor': 'flavor-name', 'id': _uuid}) + output = self.cserver_obj.run(self.result) + self.assertEqual({'instance_create': 1}, self.result) + self.assertEqual({'id': _uuid}, output) + self.mock_log.info.asset_called_once_with('Create server successful!') + + def test_run_fail(self): + self.mock_create_instance_and_wait_for_active.return_value = None + with self.assertRaises(exceptions.ScenarioCreateServerError): + self.cserver_obj.run(self.result) + self.assertEqual({'instance_create': 0}, self.result) + self.mock_log.error.assert_called_once_with('Create server failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py index 6e790ba90..c7940251e 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py @@ -9,19 +9,43 @@ import unittest import mock -from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import delete_keypair class DeleteKeypairTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.get_nova_client') - @mock.patch('yardstick.common.openstack_utils.delete_keypair') - def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair): - options = { - 'key_name': 'yardstick_key' - } - args = {"options": options} - obj = DeleteKeypair(args, {}) - obj.run({}) - mock_get_nova_client.assert_called_once() - mock_delete_keypair.assert_called_once() + def setUp(self): + self._mock_delete_keypair = mock.patch.object( + openstack_utils, 'delete_keypair') + self.mock_delete_keypair = self._mock_delete_keypair.start() + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(delete_keypair, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'key_name': 'yardstick_key'}} + self.result = {} + self.delkey_obj = delete_keypair.DeleteKeypair(self.args, mock.ANY) + + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_delete_keypair.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + self.mock_delete_keypair.return_value = True + self.assertIsNone(self.delkey_obj.run(self.result)) + self.assertEqual({'delete_keypair': 1}, self.result) + self.mock_log.info.assert_called_once_with( + 'Delete keypair successful!') + + def test_run_fail(self): + self.mock_delete_keypair.return_value = False + with self.assertRaises(exceptions.ScenarioDeleteKeypairError): + self.delkey_obj.run(self.result) + self.assertEqual({'delete_keypair': 0}, self.result) + self.mock_log.error.assert_called_once_with("Delete keypair failed!") diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py index aef99ee94..b6dbf4791 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py @@ -11,7 +11,8 @@ from oslo_utils import uuidutils import unittest import mock -import yardstick.common.openstack_utils as op_utils +from yardstick.common import openstack_utils +from yardstick.common import exceptions from yardstick.benchmark.scenarios.lib import delete_network @@ -19,16 +20,17 @@ class DeleteNetworkTestCase(unittest.TestCase): def setUp(self): self._mock_delete_neutron_net = mock.patch.object( - op_utils, 'delete_neutron_net') + openstack_utils, "delete_neutron_net") self.mock_delete_neutron_net = self._mock_delete_neutron_net.start() self._mock_get_shade_client = mock.patch.object( - op_utils, 'get_shade_client') + openstack_utils, "get_shade_client") self.mock_get_shade_client = self._mock_get_shade_client.start() - self._mock_log = mock.patch.object(delete_network, 'LOG') + self._mock_log = mock.patch.object(delete_network, "LOG") self.mock_log = self._mock_log.start() - _uuid = uuidutils.generate_uuid() - self.args = {'options': {'network_id': _uuid}} - self._del_obj = delete_network.DeleteNetwork(self.args, mock.ANY) + self.args = {"options": {"network_name_or_id": ( + uuidutils.generate_uuid())}} + self.result = {} + self.del_obj = delete_network.DeleteNetwork(self.args, mock.ANY) self.addCleanup(self._stop_mock) @@ -39,11 +41,14 @@ class DeleteNetworkTestCase(unittest.TestCase): def test_run(self): self.mock_delete_neutron_net.return_value = True - self.assertTrue(self._del_obj.run({})) + self.assertIsNone(self.del_obj.run(self.result)) + self.assertEqual({"delete_network": 1}, self.result) self.mock_log.info.assert_called_once_with( "Delete network successful!") def test_run_fail(self): self.mock_delete_neutron_net.return_value = False - self.assertFalse(self._del_obj.run({})) + with self.assertRaises(exceptions.ScenarioDeleteNetworkError): + self.del_obj.run(self.result) + self.assertEqual({"delete_network": 0}, self.result) self.mock_log.error.assert_called_once_with("Delete network failed!") diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py index eee565de7..55fe53df8 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py @@ -6,22 +6,49 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.delete_server import DeleteServer +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import delete_server class DeleteServerTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.delete_instance') - @mock.patch('yardstick.common.openstack_utils.get_nova_client') - def test_delete_server(self, mock_get_nova_client, mock_delete_instance): - options = { - 'server_id': '1234-4567-0000' - } - args = {"options": options} - obj = DeleteServer(args, {}) - obj.run({}) - mock_get_nova_client.assert_called_once() - mock_delete_instance.assert_called_once() + def setUp(self): + self._mock_delete_instance = mock.patch.object( + openstack_utils, 'delete_instance') + self.mock_delete_instance = ( + self._mock_delete_instance.start()) + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(delete_server, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'name_or_id': uuidutils.generate_uuid() + }} + self.result = {} + + self.delserver_obj = delete_server.DeleteServer(self.args, mock.ANY) + + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_delete_instance.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + self.mock_delete_instance.return_value = True + self.assertIsNone(self.delserver_obj.run(self.result)) + self.assertEqual({'delete_server': 1}, self.result) + self.mock_log.info.assert_called_once_with('Delete server successful!') + + def test_run_fail(self): + self.mock_delete_instance.return_value = False + with self.assertRaises(exceptions.ScenarioDeleteServerError): + self.delserver_obj.run(self.result) + self.assertEqual({'delete_server': 0}, self.result) + self.mock_log.error.assert_called_once_with('Delete server failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py index 15a6f7c8f..1c1364348 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py @@ -6,20 +6,52 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.get_flavor import GetFlavor +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import get_flavor class GetFlavorTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.get_flavor_by_name') - def test_get_flavor(self, mock_get_flavor_by_name): - options = { - 'flavor_name': 'yardstick_test_flavor' - } - args = {"options": options} - obj = GetFlavor(args, {}) - obj.run({}) - mock_get_flavor_by_name.assert_called_once() + def setUp(self): + + self._mock_get_flavor = mock.patch.object( + openstack_utils, 'get_flavor') + self.mock_get_flavor = self._mock_get_flavor.start() + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(get_flavor, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'name_or_id': 'yardstick_flavor'}} + self.result = {} + + self.getflavor_obj = get_flavor.GetFlavor(self.args, mock.ANY) + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_get_flavor.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + _uuid = uuidutils.generate_uuid() + self.getflavor_obj.scenario_cfg = {'output': 'flavor'} + self.mock_get_flavor.return_value = ( + {'name': 'flavor-name', 'id': _uuid}) + output = self.getflavor_obj.run(self.result) + self.assertDictEqual({'get_flavor': 1}, self.result) + self.assertDictEqual({'flavor': {'name': 'flavor-name', 'id': _uuid}}, + output) + self.mock_log.info.asset_called_once_with('Get flavor successful!') + + def test_run_fail(self): + self.mock_get_flavor.return_value = None + with self.assertRaises(exceptions.ScenarioGetFlavorError): + self.getflavor_obj.run(self.result) + self.assertDictEqual({'get_flavor': 0}, self.result) + self.mock_log.error.assert_called_once_with('Get flavor failed!') diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py index 83ec903bc..5b5329cb0 100644 --- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py +++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py @@ -6,37 +6,52 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from oslo_utils import uuidutils import unittest import mock -from yardstick.benchmark.scenarios.lib.get_server import GetServer +from yardstick.common import openstack_utils +from yardstick.common import exceptions +from yardstick.benchmark.scenarios.lib import get_server class GetServerTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.get_server_by_name') - @mock.patch('yardstick.common.openstack_utils.get_nova_client') - def test_get_server_with_name(self, mock_get_nova_client, mock_get_server_by_name): - scenario_cfg = { - 'options': { - 'server_name': 'yardstick_server' - }, - 'output': 'status server' - } - obj = GetServer(scenario_cfg, {}) - obj.run({}) - mock_get_nova_client.assert_called_once() - mock_get_server_by_name.assert_called_once() - - @mock.patch('yardstick.common.openstack_utils.get_nova_client') - def test_get_server_with_id(self, mock_get_nova_client): - scenario_cfg = { - 'options': { - 'server_id': '1' - }, - 'output': 'status server' - } - mock_get_nova_client().servers.get.return_value = None - obj = GetServer(scenario_cfg, {}) - obj.run({}) - mock_get_nova_client.assert_called() + def setUp(self): + + self._mock_get_server = mock.patch.object( + openstack_utils, 'get_server') + self.mock_get_server = self._mock_get_server.start() + self._mock_get_shade_client = mock.patch.object( + openstack_utils, 'get_shade_client') + self.mock_get_shade_client = self._mock_get_shade_client.start() + self._mock_log = mock.patch.object(get_server, 'LOG') + self.mock_log = self._mock_log.start() + self.args = {'options': {'name_or_id': 'yardstick_key'}} + self.result = {} + + self.getserver_obj = get_server.GetServer(self.args, mock.ANY) + self.addCleanup(self._stop_mock) + + def _stop_mock(self): + self._mock_get_server.stop() + self._mock_get_shade_client.stop() + self._mock_log.stop() + + def test_run(self): + _uuid = uuidutils.generate_uuid() + self.getserver_obj.scenario_cfg = {'output': 'server'} + self.mock_get_server.return_value = ( + {'name': 'server-name', 'id': _uuid}) + output = self.getserver_obj.run(self.result) + self.assertDictEqual({'get_server': 1}, self.result) + self.assertDictEqual({'server': {'name': 'server-name', 'id': _uuid}}, + output) + self.mock_log.info.asset_called_once_with('Get Server successful!') + + def test_run_fail(self): + self.mock_get_server.return_value = None + with self.assertRaises(exceptions.ScenarioGetServerError): + self.getserver_obj.run(self.result) + self.assertDictEqual({'get_server': 0}, self.result) + self.mock_log.error.assert_called_once_with('Get Server failed!') diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py index 5c7e5bfab..81bcd8c33 100644 --- a/yardstick/tests/unit/common/test_openstack_utils.py +++ b/yardstick/tests/unit/common/test_openstack_utils.py @@ -39,18 +39,17 @@ class DeleteNeutronNetTestCase(unittest.TestCase): def setUp(self): self.mock_shade_client = mock.Mock() - self.mock_shade_client.delete_network = mock.Mock() def test_delete_neutron_net(self): self.mock_shade_client.delete_network.return_value = True output = openstack_utils.delete_neutron_net(self.mock_shade_client, - 'network_id') + 'network_name_or_id') self.assertTrue(output) def test_delete_neutron_net_fail(self): self.mock_shade_client.delete_network.return_value = False output = openstack_utils.delete_neutron_net(self.mock_shade_client, - 'network_id') + 'network_name_or_id') self.assertFalse(output) @mock.patch.object(openstack_utils, 'log') @@ -58,7 +57,7 @@ class DeleteNeutronNetTestCase(unittest.TestCase): self.mock_shade_client.delete_network.side_effect = ( exc.OpenStackCloudException('error message')) output = openstack_utils.delete_neutron_net(self.mock_shade_client, - 'network_id') + 'network_name_or_id') self.assertFalse(output) mock_logger.error.assert_called_once() @@ -282,3 +281,233 @@ class ListImageTestCase(unittest.TestCase): images = openstack_utils.list_images(mock_shade_client) mock_logger.error.assert_called_once() self.assertFalse(images) + + +class SecurityGroupTestCase(unittest.TestCase): + + def setUp(self): + self.mock_shade_client = mock.Mock() + self.sg_name = 'sg_name' + self.sg_description = 'sg_description' + self._uuid = uuidutils.generate_uuid() + + def test_create_security_group_full_existing_security_group(self): + self.mock_shade_client.get_security_group.return_value = ( + {'name': 'name', 'id': self._uuid}) + output = openstack_utils.create_security_group_full( + self.mock_shade_client, self.sg_name, self.sg_description) + self.mock_shade_client.get_security_group.assert_called_once() + self.assertEqual(self._uuid, output) + + @mock.patch.object(openstack_utils, 'log') + def test_create_security_group_full_non_existing_security_group( + self, mock_logger): + self.mock_shade_client.get_security_group.return_value = None + self.mock_shade_client.create_security_group.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.create_security_group_full( + self.mock_shade_client, self.sg_name, self.sg_description) + mock_logger.error.assert_called_once() + self.assertIsNone(output) + + @mock.patch.object(openstack_utils, 'create_security_group_rule') + @mock.patch.object(openstack_utils, 'log') + def test_create_security_group_full_create_rule_fail( + self, mock_logger, mock_create_security_group_rule): + self.mock_shade_client.get_security_group.return_value = None + self.mock_shade_client.create_security_group.return_value = ( + {'name': 'name', 'id': self._uuid}) + mock_create_security_group_rule.return_value = False + output = openstack_utils.create_security_group_full( + self.mock_shade_client, self.sg_name, self.sg_description) + mock_create_security_group_rule.assert_called() + self.mock_shade_client.delete_security_group(self.sg_name) + mock_logger.error.assert_called_once() + self.assertIsNone(output) + + @mock.patch.object(openstack_utils, 'create_security_group_rule') + def test_create_security_group_full( + self, mock_create_security_group_rule): + self.mock_shade_client.get_security_group.return_value = None + self.mock_shade_client.create_security_group.return_value = ( + {'name': 'name', 'id': self._uuid}) + mock_create_security_group_rule.return_value = True + output = openstack_utils.create_security_group_full( + self.mock_shade_client, self.sg_name, self.sg_description) + mock_create_security_group_rule.assert_called() + self.mock_shade_client.delete_security_group(self.sg_name) + self.assertEqual(self._uuid, output) + +# ********************************************* +# NOVA +# ********************************************* + + +class CreateInstanceTestCase(unittest.TestCase): + + def test_create_instance_and_wait_for_active(self): + self.mock_shade_client = mock.Mock() + name = 'server_name' + image = 'image_name' + flavor = 'flavor_name' + self.mock_shade_client.create_server.return_value = ( + {'name': name, 'image': image, 'flavor': flavor}) + output = openstack_utils.create_instance_and_wait_for_active( + self.mock_shade_client, name, image, flavor) + self.assertEqual( + {'name': name, 'image': image, 'flavor': flavor}, output) + + @mock.patch.object(openstack_utils, 'log') + def test_create_instance_and_wait_for_active_fail(self, mock_logger): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.create_server.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.create_instance_and_wait_for_active( + self.mock_shade_client, 'server_name', 'image_name', 'flavor_name') + mock_logger.error.assert_called_once() + self.assertIsNone(output) + + +class DeleteInstanceTestCase(unittest.TestCase): + + def setUp(self): + self.mock_shade_client = mock.Mock() + + def test_delete_instance(self): + self.mock_shade_client.delete_server.return_value = True + output = openstack_utils.delete_instance(self.mock_shade_client, + 'instance_name_id') + self.assertTrue(output) + + def test_delete_instance_fail(self): + self.mock_shade_client.delete_server.return_value = False + output = openstack_utils.delete_instance(self.mock_shade_client, + 'instance_name_id') + self.assertFalse(output) + + @mock.patch.object(openstack_utils, 'log') + def test_delete_instance_exception(self, mock_logger): + self.mock_shade_client.delete_server.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.delete_instance(self.mock_shade_client, + 'instance_name_id') + mock_logger.error.assert_called_once() + self.assertFalse(output) + + +class CreateKeypairTestCase(unittest.TestCase): + + def setUp(self): + self.mock_shade_client = mock.Mock() + self.name = 'key_name' + + def test_create_keypair(self): + self.mock_shade_client.create_keypair.return_value = ( + {'name': 'key-name', 'type': 'ssh'}) + output = openstack_utils.create_keypair( + self.mock_shade_client, self.name) + self.assertEqual( + {'name': 'key-name', 'type': 'ssh'}, + output) + + @mock.patch.object(openstack_utils, 'log') + def test_create_keypair_fail(self, mock_logger): + self.mock_shade_client.create_keypair.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.create_keypair( + self.mock_shade_client, self.name) + mock_logger.error.assert_called_once() + self.assertIsNone(output) + + +class DeleteKeypairTestCase(unittest.TestCase): + + def setUp(self): + self.mock_shade_client = mock.Mock() + + def test_delete_keypair(self): + self.mock_shade_client.delete_keypair.return_value = True + output = openstack_utils.delete_keypair(self.mock_shade_client, + 'key_name') + self.assertTrue(output) + + def test_delete_keypair_fail(self): + self.mock_shade_client.delete_keypair.return_value = False + output = openstack_utils.delete_keypair(self.mock_shade_client, + 'key_name') + self.assertFalse(output) + + @mock.patch.object(openstack_utils, 'log') + def test_delete_keypair_exception(self, mock_logger): + self.mock_shade_client.delete_keypair.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.delete_keypair(self.mock_shade_client, + 'key_name') + mock_logger.error.assert_called_once() + self.assertFalse(output) + + +class AttachVolumeToServerTestCase(unittest.TestCase): + + def test_attach_volume_to_server(self): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.get_server.return_value = {'server_dict'} + self.mock_shade_client.get_volume.return_value = {'volume_dict'} + self.mock_shade_client.attach_volume.return_value = True + output = openstack_utils.attach_volume_to_server( + self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id') + self.assertTrue(output) + + @mock.patch.object(openstack_utils, 'log') + def test_attach_volume_to_server_fail(self, mock_logger): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.attach_volume.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.attach_volume_to_server( + self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id') + mock_logger.error.assert_called_once() + self.assertFalse(output) + + +class GetServerTestCase(unittest.TestCase): + + def test_get_server(self): + self.mock_shade_client = mock.Mock() + _uuid = uuidutils.generate_uuid() + self.mock_shade_client.get_server.return_value = { + 'name': 'server_name', 'id': _uuid} + output = openstack_utils.get_server(self.mock_shade_client, + 'server_name_or_id') + self.assertEqual({'name': 'server_name', 'id': _uuid}, output) + + @mock.patch.object(openstack_utils, 'log') + def test_get_server_exception(self, mock_logger): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.get_server.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.get_server(self.mock_shade_client, + 'server_name_or_id') + mock_logger.error.assert_called_once() + self.assertIsNone(output) + + +class GetFlavorTestCase(unittest.TestCase): + + def test_get_flavor(self): + self.mock_shade_client = mock.Mock() + _uuid = uuidutils.generate_uuid() + self.mock_shade_client.get_flavor.return_value = { + 'name': 'flavor_name', 'id': _uuid} + output = openstack_utils.get_flavor(self.mock_shade_client, + 'flavor_name_or_id') + self.assertEqual({'name': 'flavor_name', 'id': _uuid}, output) + + @mock.patch.object(openstack_utils, 'log') + def test_get_flavor_exception(self, mock_logger): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.get_flavor.side_effect = ( + exc.OpenStackCloudException('error message')) + output = openstack_utils.get_flavor(self.mock_shade_client, + 'flavor_name_or_id') + mock_logger.error.assert_called_once() + self.assertIsNone(output) diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py index 9540a39e8..666b29b5f 100644 --- a/yardstick/tests/unit/common/test_utils.py +++ b/yardstick/tests/unit/common/test_utils.py @@ -16,13 +16,15 @@ import mock import os import six from six.moves import configparser +import time import unittest import yardstick from yardstick import ssh import yardstick.error -from yardstick.common import utils from yardstick.common import constants +from yardstick.common import utils +from yardstick.common import exceptions class IterSubclassesTestCase(unittest.TestCase): @@ -1158,3 +1160,43 @@ class ReadMeminfoTestCase(unittest.TestCase): output = utils.read_meminfo(ssh_client) mock_get_client.assert_called_once_with('/proc/meminfo', mock.ANY) self.assertEqual(self.MEMINFO_DICT, output) + + +class TimerTestCase(unittest.TestCase): + + def test__getattr(self): + with utils.Timer() as timer: + time.sleep(1) + self.assertEqual(1, round(timer.total_seconds(), 0)) + self.assertEqual(1, timer.delta.seconds) + + def test__enter_with_timeout(self): + with utils.Timer(timeout=10) as timer: + time.sleep(1) + self.assertEqual(1, round(timer.total_seconds(), 0)) + + def test__enter_with_timeout_exception(self): + with self.assertRaises(exceptions.TimerTimeout): + with utils.Timer(timeout=1): + time.sleep(2) + + +class WaitUntilTrueTestCase(unittest.TestCase): + + def test_no_timeout(self): + self.assertIsNone(utils.wait_until_true(lambda: True, + timeout=1, sleep=1)) + + def test_timeout_generic_exception(self): + with self.assertRaises(exceptions.WaitTimeout): + self.assertIsNone(utils.wait_until_true(lambda: False, + timeout=1, sleep=1)) + + def test_timeout_given_exception(self): + class MyTimeoutException(exceptions.YardstickException): + message = 'My timeout exception' + + with self.assertRaises(MyTimeoutException): + self.assertIsNone( + utils.wait_until_true(lambda: False, timeout=1, sleep=1, + exception=MyTimeoutException)) diff --git a/tests/unit/network_services/collector/__init__.py b/yardstick/tests/unit/network_services/__init__.py index e69de29bb..e69de29bb 100644 --- a/tests/unit/network_services/collector/__init__.py +++ b/yardstick/tests/unit/network_services/__init__.py diff --git a/tests/unit/network_services/libs/__init__.py b/yardstick/tests/unit/network_services/collector/__init__.py index e69de29bb..e69de29bb 100644 --- a/tests/unit/network_services/libs/__init__.py +++ b/yardstick/tests/unit/network_services/collector/__init__.py diff --git a/tests/unit/network_services/collector/test_publisher.py b/yardstick/tests/unit/network_services/collector/test_publisher.py index 4a175841d..145441ddd 100644 --- a/tests/unit/network_services/collector/test_publisher.py +++ b/yardstick/tests/unit/network_services/collector/test_publisher.py @@ -13,9 +13,6 @@ # limitations under the License. # -# Unittest for yardstick.network_services.collector.publisher - -from __future__ import absolute_import import unittest from yardstick.network_services.collector import publisher diff --git a/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py index d4b4ecf7a..a344f5c85 100644 --- a/tests/unit/network_services/collector/test_subscriber.py +++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py @@ -13,9 +13,6 @@ # limitations under the License. # -# Unittest for yardstick.network_services.collector.subscriber - -from __future__ import absolute_import import unittest import mock @@ -81,7 +78,8 @@ class CollectorTestCase(unittest.TestCase): pass def test_start(self, *_): - self.assertIsNone(self.collector.start()) + with self.assertRaises(Exception): + self.collector.start() def test_stop(self, *_): self.assertIsNone(self.collector.stop()) diff --git a/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/__init__.py index e69de29bb..e69de29bb 100644 --- a/tests/unit/network_services/libs/ixia_libs/__init__.py +++ b/yardstick/tests/unit/network_services/libs/__init__.py diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py diff --git a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py index 2a97048aa..fe750e5ba 100644 --- a/tests/unit/network_services/libs/ixia_libs/test_IxNet.py +++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py @@ -13,9 +13,6 @@ # limitations under the License. # -# Unittest for yardstick.network_services.libs.ixia_libs.IxNet - -from __future__ import absolute_import import unittest import mock @@ -23,10 +20,10 @@ from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IxNextgen from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_4 from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_6 - UPLINK = "uplink" DOWNLINK = "downlink" + class TestIxNextgen(unittest.TestCase): def test___init__(self): @@ -40,7 +37,8 @@ class TestIxNextgen(unittest.TestCase): ixnet_gen.get_config = mock.MagicMock() ixnet_gen.get_ixnet = mock.MagicMock() - self.assertRaises(ImportError, ixnet_gen._connect, {"py_lib_path": "/tmp"}) + self.assertRaises(ImportError, ixnet_gen._connect, + {"py_lib_path": "/tmp"}) def test_clear_ixia_config(self): ixnet = mock.MagicMock() @@ -628,11 +626,9 @@ class TestIxNextgen(unittest.TestCase): def test_set_random_ip_multi_attributes_bad_ip_version(self): bad_ip_version = object() ixnet_gen = IxNextgen(mock.Mock()) - mock1 = mock.Mock() - mock2 = mock.Mock() - mock3 = mock.Mock() with self.assertRaises(ValueError): - ixnet_gen.set_random_ip_multi_attributes(mock1, bad_ip_version, mock2, mock3) + ixnet_gen.set_random_ip_multi_attributes( + mock.Mock(), bad_ip_version, mock.Mock(), mock.Mock()) def test_get_config(self): tg_cfg = { diff --git a/tests/unit/network_services/test_utils.py b/yardstick/tests/unit/network_services/test_utils.py index bf98a4474..2b2eb7109 100644 --- a/tests/unit/network_services/test_utils.py +++ b/yardstick/tests/unit/network_services/test_utils.py @@ -13,8 +13,6 @@ # limitations under the License. # -# Unittest for yardstick.network_services.utils - import os import unittest import mock diff --git a/tests/unit/network_services/test_yang_model.py b/yardstick/tests/unit/network_services/test_yang_model.py index 0b29da701..a7eb36b8a 100644 --- a/tests/unit/network_services/test_yang_model.py +++ b/yardstick/tests/unit/network_services/test_yang_model.py @@ -13,14 +13,8 @@ # limitations under the License. # -# Unittest for yardstick.network_services.utils - -from __future__ import absolute_import - -import unittest import mock - -import yaml +import unittest from yardstick.network_services.yang_model import YangModel @@ -95,9 +89,9 @@ class YangModelTestCase(unittest.TestCase): y._get_entries() self.assertEqual(y._rules, '') - @mock.patch('yardstick.network_services.yang_model.yaml_load') @mock.patch('yardstick.network_services.yang_model.open') - def test__read_config(self, mock_open, mock_safe_load): + @mock.patch('yardstick.network_services.yang_model.yaml_load') + def test__read_config(self, mock_safe_load, *args): cfg = "yang.yaml" y = YangModel(cfg) mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'} |