diff options
27 files changed, 301 insertions, 339 deletions
diff --git a/ansible/build_yardstick_image.yml b/ansible/build_yardstick_image.yml index 025573b4b..5f9158576 100644 --- a/ansible/build_yardstick_image.yml +++ b/ansible/build_yardstick_image.yml @@ -73,7 +73,6 @@ state: absent # download-common - - name: remove {{ workspace }} file: path: "{{ workspace }}" @@ -85,62 +84,9 @@ sha256url: "{{ sha256sums_url }}" dest: "{{ image_dest }}" -# - get_url: -# url: "{{ sha256sums_url }}" -# force: yes -# dest: "{{ workspace }}/{{ sha256sums_filename }}" - - # must use wget to workaround ubuntu cloud SSL certs checking failures -# - command: "curl -sS -o {{ workspace }}/{{ sha256sums_filename }} {{ sha256sums_url }}" - - -# - command: cat "{{ workspace }}/{{ sha256sums_filename }}" -# register: sha256sum_file -# -# - set_fact: -# image_sha256: "{{ sha256sum_file.stdout|regex_search('^([a-f0-9]+).*' ~ img ~ '$', '\\1', multiline=True) }}" - -# - get_url: -# url: "{{ image_url }}" -# force: yes -# dest: "{{ workspace }}/{{ image_filename }}" -# checksum: "sha256:{{ image_sha256 }}" -# register: fetch_image_status -# timeout: 300 -# retries: 2 -# until: fetch_image_status|succeeded - -# - name: "Fetch {{ image_url }}" -# - command: "curl -sS -o {{ workspace }}/{{ image_filename }} {{ image_url }}" -# register: fetch_image_status -# timeout: 300 -# retries: 2 -# until: fetch_image_status|succeeded - -# - name: Verify sha256sum of downloaded image -# - command: "sha256sum -c --ignore-missing {{ workspace }}/{{ sha256sums_filename }}" - -# - name: create loop devices -# command: "mknod -m 660 /dev/loop{{ item }} b 7 {{ item }}" -# args: -# creates: "/dev/loop{{ item }}" -# with_seq: -# - - - # download non-lxd - - name: convert image to raw command: "qemu-img convert {{ image_dest }} {{ raw_imgfile }}" - - # setup non-lxd - -# - shell: echo -e "d\\nn\\np\\n1\\n\\n\\nw" | parted -l "{{ raw_imgfile }}" -# - parted: -# device: "{{ raw_imgfile }}" -# number: 1 -# state: present - - name: create mknod devices in chroot command: "mknod -m 0660 /dev/loop{{ item }} b 7 {{ item }}" args: @@ -148,23 +94,6 @@ with_sequence: start=0 end=9 tags: mknod_devices -# - command: losetup --show --partscan --find "{{ raw_imgfile }}" -# register: loop_device_res -# -# - debug: -# var: loop_device_res -# verbosity: 2 -# -# - set_fact: -# loop_device: "{{ loop_device_res.stdout.strip() }}" -# -# - wait_for: -# path: "{{ loop_device }}" -# state: present -# -# - command: losetup -# - command: dmsetup ls - - name: find first partition device # command: kpartx -l "{{ loop_device }}" command: kpartx -l "{{ raw_imgfile }}" @@ -179,11 +108,9 @@ - name: use kpartx to create device nodes for the raw image loop device # operate on the loop device to avoid /dev namespace missing devices -# command: kpartx -avs "{{ loop_device }}" command: kpartx -avs "{{ raw_imgfile }}" - name: parted dump raw image -# command: parted "{{ loop_device }}" print command: parted "{{ raw_imgfile }}" print register: parted_res @@ -199,7 +126,7 @@ image_fs_type: "{{ blkid_res.stdout.strip() }}" - name: make tmp disposable fstab - command: mktemp fake_fstab.XXXXXXXXXX + command: mktemp --tmpdir fake_fstab.XXXXXXXXXX register: mktemp_res - set_fact: @@ -211,7 +138,9 @@ name: "{{ mountdir }}" # fstype is required fstype: "{{ image_fs_type }}" - #fstab: "{{ fake_fstab }}" + # !!!!!!! this is required otherwise we add entries to /etc/fstab + # and prevent the system from booting + fstab: "{{ fake_fstab }}" state: mounted - name: mount chroot /proc @@ -219,7 +148,9 @@ src: none name: "{{ mountdir }}/proc" fstype: proc - #fstab: "{{ fake_fstab }}" + # !!!!!!! this is required otherwise we add entries to /etc/fstab + # and prevent the system from booting + fstab: "{{ fake_fstab }}" state: mounted - name: if arm copy qemu-aarch64-static into chroot @@ -228,19 +159,6 @@ dest: "{{ mountdir }}/usr/bin" when: 'YARD_IMG_ARCH == "arm64"' - - # setup lxd -# - file: "path={{ mountdir }} state=directory" -# -# - unarchive: -# src: "{{ image_filename }}" -# dest: "{{ mountdir }}" -# remote_src: yes - - # end setup lxd - - # modify - - name: create ubuntu policy-rc.d workaround copy: content: "{{ '#!/bin/sh\nexit 101\n' }}" diff --git a/ansible/clean_images.yml b/ansible/clean_images.yml index a7a6c2af1..f63489d2d 100644 --- a/ansible/clean_images.yml +++ b/ansible/clean_images.yml @@ -18,5 +18,6 @@ - yardstick_config.yml roles: + - convert_openrc - clean_images - clean_flavors diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index 2690228c3..edd6564d0 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -3,3 +3,4 @@ clone_dest: /opt/tempT release: xenial
target_os: Ubuntu
ubuntu_image_file: /tmp/workspace/yardstick/yardstick-trusty-server.raw
+proxy_env: {}
\ No newline at end of file diff --git a/ansible/install_dependencies.yml b/ansible/install_dependencies.yml index 001418497..1c7d20170 100644 --- a/ansible/install_dependencies.yml +++ b/ansible/install_dependencies.yml @@ -17,4 +17,3 @@ roles: - install_dependencies - diff --git a/ansible/load_images.yml b/ansible/load_images.yml index 7cf34adec..a4c130d46 100644 --- a/ansible/load_images.yml +++ b/ansible/load_images.yml @@ -59,8 +59,7 @@ - include: build_yardstick_image.yml -# TEMP -#- include: image_uploaders/upload_yardstick_image.yml +- include: image_uploaders/upload_yardstick_image.yml # upload cirros # upload vanilla ubuntu cloud_image diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml new file mode 100644 index 000000000..e79ccabea --- /dev/null +++ b/ansible/nsb_setup.yml @@ -0,0 +1,57 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- include: ubuntu_server_baremetal_deploy_samplevnfs.yml + vars: + YARD_IMG_ARCH: amd64 + +- hosts: localhost + roles: + - install_dependencies + - docker + +- include: build_yardstick_image.yml + vars: + YARD_IMG_ARCH: amd64 + release: xenial + when: openrc_file is defined + +- include: clean_images.yml + when: openrc_file is defined + +- hosts: localhost + post_tasks: + - os_image: + name: yardstick-samplevnfs + is_public: yes + disk_format: qcow2 + container_format: bare + filename: "{{ raw_imgfile }}" + environment: "{{ openrc }}" + when: openrc_file is defined + + - name: Start yardstick container + docker_container: + name: yardstick + image: opnfv/yardstick:latest + recreate: yes + state: started + restart_policy: always + network_mode: host + privileged: True + interactive: True + volumes: + - "{{ openrc_file|default('/dev/null') }}:/etc/yardstick/openstack.creds:ro" + - /var/run/docker.sock:/var/run/docker.sock + - /opt/:/opt diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 000000000..e961969a4 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,15 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +ubuntu_docker_url: https://apt.dockerproject.org/repo diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 000000000..5e6556031 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,20 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + - name: systemd daemon reload + command: systemctl daemon-reload + + - name: restart docker + service: name=docker state=restarted + diff --git a/ansible/roles/docker/tasks/Debian.yml b/ansible/roles/docker/tasks/Debian.yml new file mode 100644 index 000000000..cf4128774 --- /dev/null +++ b/ansible/roles/docker/tasks/Debian.yml @@ -0,0 +1,26 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + - name: add Ubuntu docker repo + apt_repository: repo='deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' state=present + + - name: ensure correct docker version + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes" + with_items: "{{ docker_packages[ansible_os_family] }}" + + - name: remove Ubuntu docker repo + apt_repository: + repo: 'deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' + state: absent + update_cache: no diff --git a/ansible/roles/docker/tasks/RedHat.yml b/ansible/roles/docker/tasks/RedHat.yml new file mode 100644 index 000000000..2261da3de --- /dev/null +++ b/ansible/roles/docker/tasks/RedHat.yml @@ -0,0 +1,20 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + - name: bootstrap docker project repos + template: "src={{ ansible_os_family }}-repos.j2 dest=/etc/yum.repos.d/docker.repo" + + - name: ensure correct docker version + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: "{{ docker_packages[ansible_os_family] }}" diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 000000000..bbec371a8 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,31 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- + - name: Install docker + include: "{{ ansible_os_family }}.yml" + + - name: create docker service config dir + file: path=/etc/systemd/system/docker.service.d state=directory + + - name: create docker proxy config + template: src=http-proxy-conf.j2 dest=/etc/systemd/system/docker.service.d/http-proxy.conf owner=root mode=0644 + when: 'proxy_env is defined and "http_proxy" in proxy_env or "https_proxy" in proxy_env' + notify: + - systemd daemon reload + - restart docker + + - name: start docker service + service: name=docker state=started enabled=yes + + - meta: flush_handlers diff --git a/ansible/roles/docker/templates/RedHat-repos.j2 b/ansible/roles/docker/templates/RedHat-repos.j2 new file mode 100644 index 000000000..6d367408c --- /dev/null +++ b/ansible/roles/docker/templates/RedHat-repos.j2 @@ -0,0 +1,7 @@ +[dockerrepo] +name=Docker Repository +# $releasever for RHEL 7 is '7Server' so we can't use $releasever +baseurl={{ docker_project_url }}/repo/main/{{ ansible_distribution|lower|regex_replace('redhat', 'centos') }}/{{ ansible_distribution_major_version }}/ +enabled=1 +gpgcheck=1 +gpgkey={{ docker_project_url }}/gpg diff --git a/ansible/roles/docker/templates/http-proxy-conf.j2 b/ansible/roles/docker/templates/http-proxy-conf.j2 new file mode 100644 index 000000000..854ddfe09 --- /dev/null +++ b/ansible/roles/docker/templates/http-proxy-conf.j2 @@ -0,0 +1,2 @@ +[Service] +Environment={% if "http_proxy" in proxy_env %}"HTTP_PROXY={{ proxy_env.http_proxy }}" {% endif %} {% if "https_proxy" in proxy_env %} "HTTPS_PROXY={{ proxy_env.https_proxy }}" {% endif %} {% if "http_proxy" in proxy_env or "https_proxy" in proxy_env %} "NO_PROXY=localhost,127.0.0.0/8" {% endif %} diff --git a/ansible/roles/docker/vars/main.yml b/ansible/roles/docker/vars/main.yml new file mode 100644 index 000000000..8b5077490 --- /dev/null +++ b/ansible/roles/docker/vars/main.yml @@ -0,0 +1,20 @@ +# Copyright (c) 2017 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +docker_project_url: https://yum.dockerproject.org +docker_packages: + "RedHat": + - docker-engine-1.13.1 + "Debian": + - docker-engine=1.13.1* diff --git a/ansible/roles/install_dependencies/tasks/Debian.yml b/ansible/roles/install_dependencies/tasks/Debian.yml index 0047a5e3b..7a9911ebf 100755 --- a/ansible/roles/install_dependencies/tasks/Debian.yml +++ b/ansible/roles/install_dependencies/tasks/Debian.yml @@ -47,4 +47,3 @@ - uwsgi-plugin-python - supervisor - python-setuptools - diff --git a/ansible/roles/install_image_dependencies/defaults/main.yml b/ansible/roles/install_image_dependencies/defaults/main.yml index b1695e278..1540806cc 100644 --- a/ansible/roles/install_image_dependencies/defaults/main.yml +++ b/ansible/roles/install_image_dependencies/defaults/main.yml @@ -13,9 +13,7 @@ install_dependencies: - iproute2 - linux-tools-common - linux-tools-generic - - lmbench - make - - netperf - patch - perl - rt-tests diff --git a/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml index 3a1fbd08f..14bdd7eea 100644 --- a/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml +++ b/ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml @@ -40,4 +40,3 @@ vnf_name: FW - role: install_samplevnf vnf_name: CGNATP - diff --git a/ansible/yardstick-install-inventory.ini b/ansible/yardstick-install-inventory.ini new file mode 100644 index 000000000..e2647b033 --- /dev/null +++ b/ansible/yardstick-install-inventory.ini @@ -0,0 +1,4 @@ +# the group of systems on which to install yardstick +# by default just localhost +[yardstick] +localhost ansible_connection=local diff --git a/nsb_setup.sh b/nsb_setup.sh index c11dc1038..025b8f728 100755 --- a/nsb_setup.sh +++ b/nsb_setup.sh @@ -1,5 +1,5 @@ -#! /bin/bash -# Copyright (c) 2016-2017 Intel Corporation +#!/usr/bin/env bash +# Copyright (c) 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,237 +13,41 @@ # See the License for the specific language governing permissions and # limitations under the License. -# -# Change to yardstick directory ( <current-dir>/.. ), and export it as REPO_DIR -# -cd "$(dirname "${BASH_SOURCE[0]}")" -export REPO_DIR=$PWD -echo "------------------------------------------------------------------------------" -echo " REPO_DIR exported as $REPO_DIR" -echo "------------------------------------------------------------------------------" - -if [ "$(whoami)" != "root" ]; then - echo "Must be root to run $0" - exit 1; -fi - -INSTALL_BIN_PATH="/opt/nsb_bin" -TREX_VERSION="v2.28" -TREX_DOWNLOAD="https://trex-tgn.cisco.com/trex/release/$TREX_VERSION.tar.gz" -DPDK_DOWNLOAD="http://dpdk.org/browse/dpdk/snapshot/dpdk-16.07.zip" -VIRTUAL_VENV="$INSTALL_BIN_PATH/yardstick_venv" - -# -# Install libs needed for NSB -# -install_libs() -{ - echo "Install libs needed to build and run NSB Testing..." - apt-get update > /dev/null 2>&1 - pkg=(git build-essential python-dev virtualenv python-virtualenv virtualenv linux-headers-$(uname -r) unzip python-pip libpcap-dev cmake) - for i in "${pkg[@]}"; do +apt-get update > /dev/null 2>&1 +pkg=(python-pip build-essential libssl-dev libffi-dev python3-dev python-dev) +for i in "${pkg[@]}"; do dpkg-query -W --showformat='${Status}\n' "${i}"|grep "install ok installed" if [ "$?" -eq "1" ]; then apt-get -y install "${i}"; fi - done - echo "Done" -} - -install_yardstick() -{ - echo "Create install directory... $INSTALL_BIN_PATH" - mkdir -p $INSTALL_BIN_PATH - echo "Install yardstick dependencies and build Yardstick in venv..." - pushd . - rm -rf $VIRTUAL_VENV - echo $VIRTUAL_VENV - virtualenv $VIRTUAL_VENV - if [ ! -f "$INSTALL_BIN_PATH/yardstick_venv/bin/activate" ]; then - echo "Installation Error. Failed to create yardstick virtual env..." - exit 1 - fi - source $VIRTUAL_VENV/bin/activate - bash ./install.sh - python setup.py install - popd - - pushd . - echo "Copying yardstick sample conf & pod file to /etc/yardstick/nodes" - mkdir -p /etc/yardstick/nodes - cp "$REPO_DIR/etc/yardstick/yardstick.conf.sample" "/etc/yardstick/yardstick.conf" - cp "$REPO_DIR/etc/yardstick/nodes/pod.yaml.nsb.sample" "/etc/yardstick/nodes/" - popd -} - -# -# Install trex for TH setup -# -install_trex() -{ - TREX_DIR=$INSTALL_BIN_PATH/trex/scripts - if [ -d "$TREX_DIR" ]; then - echo "Trex $TREX_VERSION already installed." - else - echo "Build TRex and installing Trex TG in $INSTALL_BIN_PATH/trex" - rm -rf ${TREX_DOWNLOAD##*/} - if [ ! -e ${TREX_DOWNLOAD##*/} ] ; then - wget $TREX_DOWNLOAD - fi - tar zxvf ${TREX_DOWNLOAD##*/} - pushd . - rm -rf trex - mkdir -p trex - mv $TREX_VERSION trex/scripts - rm -rf $TREX_VERSION.tar.gz - cd trex/scripts/ko/src/ - make - make install - ln -s $TREX_DIR/automation/trex_control_plane $INSTALL_BIN_PATH/trex_client - popd - fi - echo "Done." -} - -install_dpdk() -{ - if [ -d "$INSTALL_BIN_PATH/dpdk-16.07" ]; then - echo "DPDK already installed make sure.. igb_uio is loaded." - else - echo "Build DPDK 16.07..." - pushd . - rm -rf ${DPDK_DOWNLOAD##*/} - rm -rf "$REPO_DIR/dpdk-16.07/" - if [ ! -e ${DPDK_DOWNLOAD##*/} ] ; then - wget ${DPDK_DOWNLOAD} - fi - unzip -o ${DPDK_DOWNLOAD##*/} - - cd dpdk-16.07 - make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc - cd x86_64-native-linuxapp-gcc - echo "Enable Port Stats..." - sed -i -e 's/CONFIG_RTE_PORT_STATS_COLLECT=n/CONFIG_RTE_PORT_STATS_COLLECT=y/g' .config - sed -i -e 's/CONFIG_RTE_PORT_PCAP=n/CONFIG_RTE_PORT_PCAP=y/g' .config - sed -i -e 's/CONFIG_RTE_TABLE_STATS_COLLECT=n/CONFIG_RTE_TABLE_STATS_COLLECT=y/g' .config - sed -i -e 's/CONFIG_RTE_PIPELINE_STATS_COLLECT=n/CONFIG_RTE_PIPELINE_STATS_COLLECT=y/g' .config - make - - echo "Load DPDK modules and setup hugepages" - modprobe uio - mkdir -p "/lib/modules/$(uname -r)/extra" - cp -r "kmod/igb_uio.ko" "/lib/modules/$(uname -r)/extra" - depmod -a - modprobe igb_uio - sh -c "echo 'uio\nigb_uio\n' > /etc/modules-load.d/nsb.conf" - - HUGEPGSZ=$(cat < /proc/meminfo | grep Hugepagesize | cut -d : -f 2 | tr -d ' ') - Pages=16 - if [[ "$HUGEPGSZ" = "2048kB" ]] ; then - Pages=16384 - fi - grep nr_hugepages /etc/sysctl.conf - if [[ "$?" -eq '1' ]] ; then - sh -c "echo 'vm.nr_hugepages=$Pages' >> /etc/sysctl.conf" - fi - echo "echo $Pages > /sys/kernel/mm/hugepages/hugepages-${HUGEPGSZ}/nr_hugepages" > .echo_tmp - echo "Reserving hugepages" - sudo sh .echo_tmp - rm -f .echo_tmp - - service procps start - echo "Creating /mnt/huge and mounting as hugetlbfs" - sudo mkdir -p /mnt/huge - - grep -s '/mnt/huge' /proc/mounts > /dev/null - if [ $? -ne 0 ] ; then - sudo mount -t hugetlbfs nodev /mnt/huge - fi - popd - mv "$REPO_DIR/dpdk-16.07" "$INSTALL_BIN_PATH" - rm dpdk-16.07.zip - fi - export RTE_SDK="$INSTALL_BIN_PATH/dpdk-16.07" - export RTE_TARGET=x86_64-native-linuxapp-gcc - - if [ ! -f "$INSTALL_BIN_PATH/vPE_vnf" ]; then - pushd . - echo "Building vPE VNF..." - cd $INSTALL_BIN_PATH/dpdk-16.07/examples/ip_pipeline/ - make clean - make - cp build/ip_pipeline $INSTALL_BIN_PATH/vPE_vnf - popd - fi - echo "Done" -} - - -push_nsb_binary() -{ - if [ ! -d "$INSTALL_BIN_PATH/trex/scripts" ]; then - cp -fr "$REPO_DIR/trex" "$INSTALL_BIN_PATH" - fi - rm -rf "$REPO_DIR/trex" +done - if [ -d "$INSTALL_BIN_PATH/trex" ]; then - echo "Setup Environment variables for Test Harness...." - PYTHONPATH="$INSTALL_BIN_PATH/trex/scripts/automation/trex_control_plane:$INSTALL_BIN_PATH/trex/scripts/automation/trex_control_plane/stl" - PY_PATH=$(grep PYTHONPATH ~/.bash_profile) - if [ "$PY_PATH" = "" ] ; then - sh -c "echo export PYTHONPATH=$PYTHONPATH >> ~/.bash_profile" > /dev/null - else - echo "Your ~/.bash_profile already contains a PYTHONPATH definition." - echo "Make sure it contains $PYTHONPATH which is required to run TRex" - fi - fi - cp "$REPO_DIR/yardstick/network_services/nfvi/collectd.sh" "$INSTALL_BIN_PATH" - cp "$REPO_DIR/yardstick/network_services/nfvi/collectd.conf" "$INSTALL_BIN_PATH" - cp "$REPO_DIR/nsb_setup.sh" "$INSTALL_BIN_PATH" - - # Get "dpdk-devbind.py" to find the ports for VNF to run - wget http://dpdk.org/browse/dpdk/plain/usertools/dpdk-devbind.py?h=v17.05 -O dpdk-devbind.py - chmod 777 dpdk-devbind.py - mv dpdk-devbind.py "$INSTALL_BIN_PATH" - ln "$INSTALL_BIN_PATH"/dpdk-devbind.py "$INSTALL_BIN_PATH"/dpdk_nic_bind.py - echo "Done" -} +pip install ansible shade docker -check_installed_files() -{ - if [ ! -f "$INSTALL_BIN_PATH/yardstick_venv/bin/activate" ]; then - echo "Installation Error. Failed to create yardstick virtual env..." - exit 1 - fi +if [ $# -eq 1 ]; then + extra_args="-e openrc_file=$1" + OPENRC=$1 + source "${OPENRC}" + CONTROLLER_IP=$(echo ${OS_AUTH_URL} | sed -ne "s/http:\/\/\(.*\):.*/\1/p") + export no_proxy="localhost,127.0.0.1,${CONTROLLER_IP},$no_proxy" +fi - if [ ! -d "$INSTALL_BIN_PATH/dpdk-16.07" ]; then - echo "Installation Error. Failed to download and install dpdk-16.07..." - exit 1 - fi +if [ "$http_proxy" != "" ] || [ "$https_proxy" != "" ]; then + extra_args="${extra_args} -e @/tmp/proxy.yml" - if [ ! -d "$INSTALL_BIN_PATH/trex" ]; then - echo "Installation Error. Failed to download and configure Trex" - exit 1 - fi + cat <<EOF > /tmp/proxy.yml +--- +proxy_env: + http_proxy: $http_proxy + https_proxy: $https_proxy + no_proxy: $no_proxy +EOF +fi - if [ ! -f "$INSTALL_BIN_PATH/vPE_vnf" ]; then - echo "Installation Error. vPE VNF not present in install dir $INSTALL_BIN_PATH" - exit 1 - fi -} +ANSIBLE_SCRIPTS="ansible" -if [ "$1" == "dpdk" ]; then - install_libs - install_dpdk -else - install_libs - install_yardstick - install_dpdk - install_trex - push_nsb_binary - check_installed_files -clear -echo "Installation completed..." -echo "Virtual Environment : $INSTALL_BIN_PATH/yardstick_venv" -echo "Please refer to Chapter 13 of the Yardstick User Guide for how to get started with VNF testing." -fi +cd ${ANSIBLE_SCRIPTS} &&\ +ansible-playbook \ + -e img_modify_playbook='ubuntu_server_cloudimg_modify_samplevnfs.yml' \ + -e YARD_IMG_ARCH='amd64' ${extra_args}\ + -i yardstick-install-inventory.ini nsb_setup.yml diff --git a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml index e07f5f9e9..9808398f2 100644 --- a/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml +++ b/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml @@ -35,6 +35,7 @@ scenarios: vnf__1: rules: acl_1rule.yaml vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1} + nfvi_enable: True runner: type: Iteration iterations: 10 diff --git a/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/tests/unit/benchmark/scenarios/lib/test_create_keypair.py index 99e6b9afa..4b9b72013 100644 --- a/tests/unit/benchmark/scenarios/lib/test_create_keypair.py +++ b/tests/unit/benchmark/scenarios/lib/test_create_keypair.py @@ -8,15 +8,16 @@ ############################################################################## import unittest import mock -import paramiko from yardstick.benchmark.scenarios.lib.create_keypair import CreateKeypair +PREFIX = "yardstick.benchmark.scenarios.lib.create_keypair" -class CreateKeypairTestCase(unittest.TestCase): - @mock.patch('yardstick.common.openstack_utils.create_keypair') - def test_create_keypair(self, mock_create_keypair): +class CreateKeypairTestCase(unittest.TestCase): + @mock.patch('{}.paramiko'.format(PREFIX)) + @mock.patch('{}.op_utils'.format(PREFIX)) + def test_create_keypair(self, mock_op_utils, mock_paramiko): options = { 'key_name': 'yardstick_key', 'key_path': '/tmp/yardstick_key' @@ -24,7 +25,7 @@ class CreateKeypairTestCase(unittest.TestCase): args = {"options": options} obj = CreateKeypair(args, {}) obj.run({}) - self.assertTrue(mock_create_keypair.called) + self.assertTrue(mock_op_utils.create_keypair.called) def main(): diff --git a/tests/unit/network_services/nfvi/test_resource.py b/tests/unit/network_services/nfvi/test_resource.py index 072f06edf..21beba882 100644 --- a/tests/unit/network_services/nfvi/test_resource.py +++ b/tests/unit/network_services/nfvi/test_resource.py @@ -274,6 +274,7 @@ class TestResourceProfile(unittest.TestCase): res = self.resource_profile.parse_collectd_result({}, [0, 1, 2]) expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {}, 'memory': {}, 'ovs_stats': {}, 'timestamp': '', + 'intel_pmu': {}, 'virt': {}} self.assertDictEqual(res, expected_result) @@ -286,6 +287,7 @@ class TestResourceProfile(unittest.TestCase): res = self.resource_profile.parse_collectd_result(metric, [0, 1, 2]) expected_result = {'cpu': {1: {'ipc': '1234'}}, 'dpdkstat': {}, 'hugepages': {}, 'memory': {}, 'ovs_stats': {}, 'timestamp': '', + 'intel_pmu': {}, 'virt': {}} self.assertDictEqual(res, expected_result) @@ -294,6 +296,7 @@ class TestResourceProfile(unittest.TestCase): res = self.resource_profile.parse_collectd_result(metric, [0, 1, 2]) expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {}, 'memory': {'bw': '101'}, 'ovs_stats': {}, 'timestamp': '', + 'intel_pmu': {}, 'virt': {}} self.assertDictEqual(res, expected_result) @@ -305,6 +308,7 @@ class TestResourceProfile(unittest.TestCase): expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {'free': '101'}, 'memory': {}, 'ovs_stats': {}, 'timestamp': '', + 'intel_pmu': {}, 'virt': {}} self.assertDictEqual(res, expected_result) @@ -321,6 +325,7 @@ class TestResourceProfile(unittest.TestCase): res = self.resource_profile.parse_collectd_result(metric, [0, 1, 2]) expected_result = {'cpu': {}, 'dpdkstat': {'tx': '101'}, 'hugepages': {}, 'memory': {}, 'ovs_stats': {'tx': '101'}, 'timestamp': '', + 'intel_pmu': {}, 'virt': {'memory': '101'}} self.assertDictEqual(res, expected_result) diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py index db128a15c..c65c0ab0a 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py @@ -123,7 +123,7 @@ class TestIxLoadTrafficGen(unittest.TestCase): ssh.from_node.return_value = ssh_mock vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0] ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd) - self.assertIsNone(ixload_traffic_gen.data) + self.assertIsNone(ixload_traffic_gen.resource_helper.data) def test_collect_kpi(self): with mock.patch("yardstick.ssh.SSH") as ssh: diff --git a/yardstick/network_services/nfvi/collectd.conf b/yardstick/network_services/nfvi/collectd.conf index 6d8b73f7f..3928dcbca 100644 --- a/yardstick/network_services/nfvi/collectd.conf +++ b/yardstick/network_services/nfvi/collectd.conf @@ -67,6 +67,14 @@ Interval {interval} Cores "" </Plugin> +<Plugin intel_pmu> + ReportHardwareCacheEvents true + ReportKernelPMUEvents true + ReportSoftwareEvents true + EventList "/root/.cache/pmu-events/GenuineIntel-6-2D-core.json" + HardwareEvents "L2_RQSTS.CODE_RD_HIT,L2_RQSTS.CODE_RD_MISS" "L2_RQSTS.ALL_CODE_RD" +</Plugin> + <Plugin hugepages> ReportPerNodeHP true ReportRootHP true diff --git a/yardstick/network_services/nfvi/collectd.sh b/yardstick/network_services/nfvi/collectd.sh index 7666404e4..296c4a213 100755 --- a/yardstick/network_services/nfvi/collectd.sh +++ b/yardstick/network_services/nfvi/collectd.sh @@ -104,6 +104,24 @@ else popd fi +ls $INSTALL_NSB_BIN/pmu-tools >/dev/null +if [ $? -eq 0 ] +then + echo "DPDK already installed. Done" +else + cd $INSTALL_NSB_BIN + + git clone https://github.com/andikleen/pmu-tools.git + cd pmu-tools + cd jevents + sed -i -e 's/CFLAGS := -g -Wall -O2 -Wno-unused-result/CFLAGS := -g -Wall -O2 -Wno-unused-result -fPIC/g' Makefile + make + sudo make install + cd $INSTALL_NSB_BIN/pmu-tools + python event_download.py +fi + +cd $INSTALL_NSB_BIN which $INSTALL_NSB_BIN/collectd/collectd >/dev/null if [ $? -eq 0 ] then @@ -115,9 +133,8 @@ else git clone https://github.com/collectd/collectd.git pushd collectd git stash - git checkout -b nfvi 47c86ace348a1d7a5352a83d10935209f89aa4f5 ./build.sh - ./configure --with-libpqos=/usr/ --with-libdpdk=/usr --with-libyajl=/usr/local --enable-debug --enable-dpdkstat --enable-virt --enable-ovs_stats + ./configure --with-libpqos=/usr/ --with-libdpdk=/usr --with-libyajl=/usr/local --with-libjevents=/usr/local --enable-debug --enable-dpdkstat --enable-virt --enable-ovs_stats --enable-intel_pmu --prefix=$INSTALL_NSB_BIN/collectd make install > /dev/null popd echo "Done." @@ -126,7 +143,7 @@ fi modprobe msr cp $INSTALL_NSB_BIN/collectd.conf /opt/collectd/etc/ - +sudo service rabbitmq-server restart echo "Check if admin user already created" rabbitmqctl list_users | grep '^admin$' > /dev/null if [ $? -eq 0 ]; diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py index 2a9a1a1a2..f0ae67616 100644 --- a/yardstick/network_services/nfvi/resource.py +++ b/yardstick/network_services/nfvi/resource.py @@ -35,7 +35,7 @@ CONF = cfg.CONF ZMQ_OVS_PORT = 5567 ZMQ_POLLING_TIME = 12000 LIST_PLUGINS_ENABLED = ["amqp", "cpu", "cpufreq", "intel_rdt", "memory", - "hugepages", "dpdkstat", "virt", "ovs_stats"] + "hugepages", "dpdkstat", "virt", "ovs_stats", "intel_pmu"] class ResourceProfile(object): @@ -109,6 +109,10 @@ class ResourceProfile(object): def parse_ovs_stats(cls, key, value): return cls.parse_simple_resource(key, value) + @classmethod + def parse_intel_pmu_stats(cls, key, value): + return {''.join(key): value.split(":")[1]} + def parse_collectd_result(self, metrics, core_list): """ convert collectd data into json""" result = { @@ -118,6 +122,7 @@ class ResourceProfile(object): "dpdkstat": {}, "virt": {}, "ovs_stats": {}, + "intel_pmu": {}, } testcase = "" @@ -148,6 +153,9 @@ class ResourceProfile(object): elif "ovs_stats" in res_key0: result["ovs_stats"].update(self.parse_ovs_stats(key_split, value)) + elif "intel_pmu-all" in res_key0: + result["intel_pmu"].update(self.parse_intel_pmu_stats(res_key1, value)) + result["timestamp"] = testcase return result @@ -192,7 +200,6 @@ class ResourceProfile(object): "loadplugin": loadplugin, "dpdk_interface": interfaces, } - self._provide_config_file(bin_path, 'collectd.conf', kwargs) def _start_collectd(self, connection, bin_path): diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py index 6be2b58e1..612799ff5 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_ixload.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_ixload.py @@ -22,8 +22,6 @@ import shutil from collections import OrderedDict from subprocess import call -import six - from yardstick.common.utils import makedirs from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper @@ -79,6 +77,7 @@ class IxLoadResourceHelper(ClientResourceHelper): super(IxLoadResourceHelper, self).__init__(setup_helper) self.result = OrderedDict((key, ResourceDataHelper()) for key in self.KPI_LIST) self.resource_file_name = '' + self.data = None def parse_csv_read(self, reader): for row in reader: @@ -111,6 +110,12 @@ class IxLoadResourceHelper(ClientResourceHelper): return {key_right: self.result[key_left].get_aggregates() for key_left, key_right in self.KPI_LIST.items()} + def collect_kpi(self): + if self.data: + self._result.update(self.data) + LOG.info("Collect {0} KPIs {1}".format(self.RESOURCE_WORD, self._result)) + return self._result + def log(self): for key in self.KPI_LIST: LOG.debug(self.result[key]) @@ -125,7 +130,6 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): super(IxLoadTrafficGen, self).__init__(name, vnfd, setup_env_helper_type, resource_helper_type) self._result = {} - self.data = None def run_traffic(self, traffic_profile): ports = [] @@ -156,16 +160,15 @@ class IxLoadTrafficGen(SampleVNFTrafficGen): with open(self.ssh_helper.join_bin_path("ixLoad_HTTP_Client.csv")) as csv_file: lines = csv_file.readlines()[10:] - with open(self.ssh_helper.join_bin_path("http_result.csv"), 'wb+') as result_file: - result_file.writelines(six.text_type(lines[:-1])) + result_file.writelines(lines[:-1]) result_file.flush() result_file.seek(0) reader = csv.DictReader(result_file) self.resource_helper.parse_csv_read(reader) self.resource_helper.log() - self.data = self.resource_helper.make_aggregates() + self.resource_helper.data = self.resource_helper.make_aggregates() def listen_traffic(self, traffic_profile): pass |