diff options
20 files changed, 843 insertions, 63 deletions
diff --git a/docker/Dockerfile b/docker/Dockerfile index 5813f0245..62ea0d037 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -51,7 +51,7 @@ EXPOSE 5000 5672 ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR} ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR} -COPY ./docker/exec_tests.sh /usr/local/bin/ +COPY ./exec_tests.sh /usr/local/bin/ ENV NSB_DIR="/opt/nsb_bin" ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl" diff --git a/docker/Dockerfile.aarch64.patch b/docker/Dockerfile.aarch64.patch index 720a39970..6c7381284 100644 --- a/docker/Dockerfile.aarch64.patch +++ b/docker/Dockerfile.aarch64.patch @@ -1,15 +1,14 @@ -From: Cristina Pauna <cristina.pauna@enea.com> -Date: Mon, 30 Apr 2018 14:09:00 +0300 -Subject: [PATCH] [PATCH] Patch for Yardstick AARCH64 Docker file +From: ting wu <ting.wu@enea.com> +Date: Tue, 8 May 2018 14:02:52 +0200 +Subject: [PATCH][PATCH] Patch for Yardstick AARCH64 Docker file -Signed-off-by: Cristina Pauna <cristina.pauna@enea.com> -Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com> +Signed-off-by: ting wu <ting.wu@enea.com> --- - docker/Dockerfile | 12 +++++++----- - 1 file changed, 7 insertions(+), 5 deletions(-) + docker/Dockerfile | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile -index fed9f9bd..9654b5dc 100644 +index 62ea0d0..f2f41771 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -7,9 +7,9 @@ @@ -24,18 +23,17 @@ index fed9f9bd..9654b5dc 100644 ARG BRANCH=master -@@ -24,7 +24,9 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ +@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \ RELENG_REPO_DIR="${REPOS_DIR}/releng" \ STORPERF_REPO_DIR="${REPOS_DIR}/storperf" -RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean +RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \ + apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean -+ RUN easy_install -U setuptools==30.0.0 RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2 -@@ -45,8 +47,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf +@@ -48,8 +49,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf # nginx=5000, rabbitmq=5672 EXPOSE 5000 5672 @@ -44,5 +42,6 @@ index fed9f9bd..9654b5dc 100644 +ADD http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img ${IMAGE_DIR} +ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img ${IMAGE_DIR} - COPY ./docker/exec_tests.sh /usr/local/bin/ + COPY ./exec_tests.sh /usr/local/bin/ + diff --git a/etc/yardstick/nodes/apex_baremetal/pod.yaml b/etc/yardstick/nodes/apex_baremetal/pod.yaml new file mode 100644 index 000000000..4b058c499 --- /dev/null +++ b/etc/yardstick/nodes/apex_baremetal/pod.yaml @@ -0,0 +1,46 @@ +############################################################################## +# Copyright (c) 2018 Intracom Telecom and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +nodes: +- + name: node1 + role: Controller + ip: ip1 + user: heat-admin + key_filename: node_keyfile +- + name: node2 + role: Controller + ip: ip2 + user: heat-admin + key_filename: node_keyfile +- + name: node3 + role: Controller + ip: ip3 + user: heat-admin + key_filename: node_keyfile +- + name: node4 + role: Compute + ip: ip4 + user: heat-admin + key_filename: node_keyfile +- + name: node5 + role: Compute + ip: ip5 + user: heat-admin + key_filename: node_keyfile +- + name: node6 + role: Opendaylight-Cluster-Leader + ip: ip6 + user: heat-admin + key_filename: node_keyfile diff --git a/etc/yardstick/nodes/apex_virtual/pod.yaml b/etc/yardstick/nodes/apex_virtual/pod.yaml new file mode 100644 index 000000000..59b51d224 --- /dev/null +++ b/etc/yardstick/nodes/apex_virtual/pod.yaml @@ -0,0 +1,40 @@ +############################################################################## +# Copyright (c) 2018 Intracom Telecom and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +nodes: +- + name: node1 + role: Controller + ip: 192.0.2.15 + user: heat-admin + key_filename: /root/.ssh/id_rsa +- + name: node2 + role: Controller + ip: 192.0.2.4 + user: heat-admin + key_filename: /root/.ssh/id_rsa +- + name: node3 + role: Controller + ip: 192.0.2.6 + user: heat-admin + key_filename: /root/.ssh/id_rsa +- + name: node4 + role: Compute + ip: 192.0.2.10 + user: heat-admin + key_filename: /root/.ssh/id_rsa +- + name: node5 + role: Compute + ip: 192.0.2.14 + user: heat-admin + key_filename: /root/.ssh/id_rsa diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh index d7c60d48f..8b9f887b2 100755 --- a/tests/ci/prepare_env.sh +++ b/tests/ci/prepare_env.sh @@ -16,6 +16,7 @@ : ${EXTERNAL_NETWORK:='admin_floating_net'} : ${USER_NAME:='ubuntu'} : ${SSH_KEY:='/root/.ssh/id_rsa'} +: ${DEPLOY_SCENARIO:='unknown'} # Extract network name from EXTERNAL_NETWORK # e.g. EXTERNAL_NETWORK='ext-net;flat;192.168.0.2;192.168.0.253;192.168.0.1;192.168.0.0/24' @@ -62,7 +63,73 @@ verify_connectivity() { } ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" +if [ "$INSTALLER_TYPE" == "apex" ]; then + # check the connection + verify_connectivity "${INSTALLER_IP}" + + pod_yaml="$YARDSTICK_REPO_DIR/etc/yardstick/nodes/apex_baremetal/pod.yaml" + + # update "ip" according to the CI env + ssh -l root "${INSTALLER_IP}" -i ${SSH_KEY} ${ssh_options} \ + "source /home/stack/stackrc && openstack server list -f yaml" > node_info + + controller_ips=($(awk '/control/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}')) + compute_ips=($(awk '/compute/{getline; {print $2}}' < node_info | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}')) + odl_ip="" + # Get ODL's cluster default module-shard Leader IP in HA scenario + if [[ ${DEPLOY_SCENARIO} == os-odl-*-ha ]]; then + for ip in "${controller_ips[@]}"; + do + if [[ "$odl_ip" ]]; then + break + fi + for ((i=0; i<${#controller_ips[@]}; i++)); + do + ODL_STATE=$(curl -s -u admin:admin -H "Accept: application/json" -H "Content-Type: application/json" \ + "http://"${ip}":8081/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-"${i}"-shard-default-operational,type=DistributedOperationalDatastore" \ + | grep -o \"RaftState\"\:\"Leader\" | tr ":" "\n" | sed -n '2p' | sed 's/\"//g'); + + if [[ ${ODL_STATE} == "Leader" ]]; then + odl_ip=${ip} + break + fi + done; + done + + if [[ -z "$odl_ip" ]]; then + echo "ERROR: Opendaylight Leader IP is emtpy" + exit 1 + fi + + elif [[ ${DEPLOY_SCENARIO} == *"odl"* ]]; then + odl_ip=${controller_ips[0]} + fi + + if [[ ${controller_ips[0]} ]]; then + sed -i "s|ip1|${controller_ips[0]}|" "${pod_yaml}" + fi + if [[ ${controller_ips[1]} ]]; then + sed -i "s|ip2|${controller_ips[1]}|" "${pod_yaml}" + fi + if [[ ${controller_ips[2]} ]]; then + sed -i "s|ip3|${controller_ips[2]}|" "${pod_yaml}" + fi + if [[ ${compute_ips[0]} ]]; then + sed -i "s|ip4|${compute_ips[0]}|" "${pod_yaml}" + fi + if [[ ${compute_ips[1]} ]]; then + sed -i "s|ip5|${compute_ips[1]}|" "${pod_yaml}" + fi + if [[ ${odl_ip} ]]; then + sed -i "s|ip6|${odl_ip}|" "${pod_yaml}" + fi + + + # update 'key_filename' according to the CI env + sed -i "s|node_keyfile|${SSH_KEY}|" "${pod_yaml}" + +fi if [ "$INSTALLER_TYPE" == "fuel" ]; then # check the connection diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml new file mode 100644 index 000000000..85ec510df --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc092.yaml @@ -0,0 +1,276 @@ +############################################################################## +## Copyright (c) 2018 Intracom Telecom and others. +## +## All rights reserved. This program and the accompanying materials +## are made available under the terms of the Apache License, Version 2.0 +## which accompanies this distribution, and is available at +## http://www.apache.org/licenses/LICENSE-2.0 +############################################################################### +--- + +schema: "yardstick:task:0.1" +description: > + Yardstick TC092 config file; + SDN Controller resilience in HA configuration + +{% set file = file or '/etc/yardstick/pod.yaml' %} +{% set attack_host = attack_host or 'node6' %} + +scenarios: + +- + type: "GeneralHA" + options: + monitors: + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "snat" + monitor_time: 50 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "8.8.8.8" + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 50 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "ares" + return_parameter: + all: "@private_ip" + + + steps: + - actionKey: "get-privateip" + actionType: "operation" + index: 1 + + - actionKey: "l2" + actionType: "monitor" + index: 2 + + - actionKey: "snat" + actionType: "monitor" + index: 3 + + + nodes: + {{attack_host}}: {{attack_host}}.LF + athena: athena.ODLHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + +- + type: "GeneralHA" + options: + attackers: + - + fault_type: "kill-process" + process_name: "opendaylight" + key: "kill-process" + host: {{attack_host}} + + monitors: + - monitor_type: "process" + process_name: "opendaylight" + host: {{attack_host}} + key: "monitor-recovery" + monitor_time: 50 + sla: + max_recover_time: 30 + + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "snat" + monitor_time: 70 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "8.8.8.8" + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 70 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "start-service" + host: {{attack_host}} + operation_key: "start-service" + action_parameter: + service: "opendaylight" + rollback_parameter: + service: "opendaylight" + + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "ares" + return_parameter: + all: "@private_ip" + + + + steps: + + - actionKey: "monitor-recovery" + actionType: "monitor" + index: 1 + + - actionKey: "get-privateip" + actionType: "operation" + index: 2 + + - actionKey: "l2" + actionType: "monitor" + index: 3 + + - actionKey: "snat" + actionType: "monitor" + index: 4 + + - actionKey: "kill-process" + actionType: "attacker" + index: 5 + + - actionKey: "start-service" + actionType: "operation" + index: 6 + + + + nodes: + {{attack_host}}: {{attack_host}}.LF + athena: athena.ODLHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + +- + type: "GeneralHA" + options: + monitors: + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 80 + host: athena + sla: + max_outage_time: 40 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "hermes" + return_parameter: + all: "@private_ip" + + - operation_type: "general-operation" + key: "nova-create-instance" + operation_key: "nova-create-instance" + action_parameter: + serverconfig: "hermes yardstick-image yardstick-flavor test_one" + rollback_parameter: + serverconfig: "hermes" + + - operation_type: "general-operation" + key: "add-server-to-secgroup" + operation_key: "add-server-to-secgroup" + action_parameter: + serverconfig: "hermes ODLHA1" + rollback_parameter: + serverconfig: "hermes ODLHA1" + + + steps: + - actionKey: "nova-create-instance" + actionType: "operation" + index: 1 + + - actionKey: "add-server-to-secgroup" + actionType: "operation" + index: 2 + + - actionKey: "get-privateip" + actionType: "operation" + index: 3 + + - actionKey: "l2" + actionType: "monitor" + index: 4 + + nodes: + {{attack_host}}: {{attack_host}}.LF + athena: athena.ODLHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + + +contexts: + - + type: Node + name: LF + file: {{file}} + - + name: ODLHA1 + image: yardstick-image + flavor: yardstick-flavor + user: ubuntu + host: athena + placement_groups: + pgrp1: + policy: "availability" + servers: + athena: + floating_ip: true + placement: "pgrp1" + network_ports: + test_one: + - ens0 + + ares: + floating_ip: true + placement: "pgrp1" + network_ports: + test_one: + - ens0 + + networks: + test_one: + cidr: '10.0.1.0/24' + router: 'test_router' + diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml new file mode 100644 index 000000000..a034471aa --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc093.yaml @@ -0,0 +1,313 @@ +############################################################################## +## Copyright (c) 2018 Intracom Telecom and others. +## +## All rights reserved. This program and the accompanying materials +## are made available under the terms of the Apache License, Version 2.0 +## which accompanies this distribution, and is available at +## http://www.apache.org/licenses/LICENSE-2.0 +############################################################################### +--- + +schema: "yardstick:task:0.1" +description: > + Yardstick TC093 config file; + SDN Vswitch resilience in non-HA or HA configuration + +{% set file = file or '/etc/yardstick/pod.yaml' %} +{% set attack_host_cmp_one = attack_host_cmp_one or 'node4' %} +{% set attack_host_cmp_two = attack_host_cmp_two or 'node5' %} +{% set systemd_service_name = systemd_service_name or 'openvswitch-switch'%} + +scenarios: + +- + type: "GeneralHA" + options: + monitors: + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "snat" + monitor_time: 50 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "8.8.8.8" + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 50 + host: athena + sla: + max_outage_time: 0 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "ares" + return_parameter: + all: "@private_ip" + + + steps: + - actionKey: "get-privateip" + actionType: "operation" + index: 1 + + - actionKey: "l2" + actionType: "monitor" + index: 2 + + - actionKey: "snat" + actionType: "monitor" + index: 3 + + + nodes: + athena: athena.ODLnoHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + + +- + type: "GeneralHA" + options: + attackers: + - + fault_type: "kill-process" + process_name: "openvswitch" + key: "kill-process-cmp-one" + host: {{attack_host_cmp_one}} + + - + fault_type: "kill-process" + process_name: "openvswitch" + key: "kill-process-cmp-two" + host: {{attack_host_cmp_two}} + + monitors: + - monitor_type: "process" + process_name: "openvswitch" + host: {{attack_host_cmp_one}} + key: "monitor-recovery-cmp-one" + monitor_time: 50 + sla: + max_recover_time: 30 + + - monitor_type: "process" + process_name: "openvswitch" + host: {{attack_host_cmp_two}} + key: "monitor-recovery-cmp-two" + monitor_time: 50 + sla: + max_recover_time: 30 + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "snat" + monitor_time: 70 + host: athena + sla: + max_outage_time: 20 + parameter: + destination_ip: "8.8.8.8" + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 70 + host: athena + sla: + max_outage_time: 20 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "restart-service-cmp-one" + host: {{attack_host_cmp_one}} + operation_key: "start-service" + action_parameter: + service: {{systemd_service_name ~ " restart"}} + rollback_parameter: + service: "openvswitch" + + - operation_type: "general-operation" + key: "restart-service-cmp-two" + host: {{attack_host_cmp_two}} + operation_key: "start-service" + action_parameter: + service: {{systemd_service_name ~ " restart"}} + rollback_parameter: + service: "openvswitch" + + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "ares" + return_parameter: + all: "@private_ip" + + + + steps: + + - actionKey: "get-privateip" + actionType: "operation" + index: 1 + + - actionKey: "l2" + actionType: "monitor" + index: 2 + + - actionKey: "snat" + actionType: "monitor" + index: 3 + + - actionKey: "kill-process-cmp-one" + actionType: "attacker" + index: 4 + + - actionKey: "kill-process-cmp-two" + actionType: "attacker" + index: 5 + + - actionKey: "monitor-recovery-cmp-one" + actionType: "monitor" + index: 6 + + - actionKey: "monitor-recovery-cmp-two" + actionType: "monitor" + index: 7 + + + - actionKey: "restart-service-cmp-one" + actionType: "operation" + index: 8 + + - actionKey: "restart-service-cmp-two" + actionType: "operation" + index: 9 + + + nodes: + {{attack_host_cmp_one}}: {{attack_host_cmp_one}}.LF + {{attack_host_cmp_two}}: {{attack_host_cmp_two}}.LF + athena: athena.ODLnoHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + +- + type: "GeneralHA" + options: + monitors: + + - monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "l2" + monitor_time: 80 + host: athena + sla: + max_outage_time: 40 + parameter: + destination_ip: "@private_ip" + + operations: + - operation_type: "general-operation" + key: "get-privateip" + operation_key: "get-privateip" + action_parameter: + server_name: "hermes" + return_parameter: + all: "@private_ip" + + - operation_type: "general-operation" + key: "nova-create-instance" + operation_key: "nova-create-instance" + action_parameter: + serverconfig: "hermes yardstick-image yardstick-flavor test_one" + rollback_parameter: + serverconfig: "hermes" + + - operation_type: "general-operation" + key: "add-server-to-secgroup" + operation_key: "add-server-to-secgroup" + action_parameter: + serverconfig: "hermes ODLnoHA1" + rollback_parameter: + serverconfig: "hermes ODLnoHA1" + + + steps: + - actionKey: "nova-create-instance" + actionType: "operation" + index: 1 + + - actionKey: "add-server-to-secgroup" + actionType: "operation" + index: 2 + + - actionKey: "get-privateip" + actionType: "operation" + index: 3 + + - actionKey: "l2" + actionType: "monitor" + index: 4 + + nodes: + athena: athena.ODLnoHA1 + runner: + type: Duration + duration: 1 + sla: + action: monitor + + +contexts: + - + type: Node + name: LF + file: {{file}} + - + name: ODLnoHA1 + image: yardstick-image + flavor: yardstick-flavor + user: ubuntu + host: athena + placement_groups: + pgrp1: + policy: "availability" + servers: + athena: + floating_ip: true + placement: "pgrp1" + network_ports: + test_one: + - ens0 + + ares: + floating_ip: true + placement: "pgrp1" + network_ports: + test_one: + - ens0 + + networks: + test_one: + cidr: '10.0.1.0/24' + router: 'test_router' + diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml index 13cc710f3..f174a90e4 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-ha_daily.yaml @@ -62,3 +62,18 @@ test_cases: task_args: huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node1"}' +- + file_name: opnfv_yardstick_tc092.yaml + constraint: + installer: apex + task_args: + default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml", + "attack_host": "node6"}' +- + file_name: opnfv_yardstick_tc093.yaml + constraint: + installer: apex + task_args: + default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml", + "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5", + "systemd_service_name": "openvswitch"}' diff --git a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml index 42a170a89..feb8a6631 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl-nofeature-noha_daily.yaml @@ -61,3 +61,11 @@ test_cases: task_args: default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "attack_host": "node6"}' +- + file_name: opnfv_yardstick_tc093.yaml + constraint: + installer: apex + task_args: + default: '{"file": "etc/yardstick/nodes/apex_baremetal/pod.yaml", + "attack_host_cmp_one": "node4","attack_host_cmp_two": "node5", + "systemd_service_name": "openvswitch"}' diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py index cb171eafa..7f1136c08 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py +++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py @@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker): def check(self): with open(self.check_script, "r") as stdin_file: - exit_status, stdout, stderr = self.connection.execute( + _, stdout, stderr = self.connection.execute( "sudo /bin/sh -s {0}".format(self.service_name), stdin=stdin_file) if stdout: - LOG.info("check the environment success!") + LOG.info("Check the environment success!") return int(stdout.strip('\n')) else: - LOG.error( - "the host environment is error, stdout:%s, stderr:%s", - stdout, stderr) + LOG.error("Error checking the host environment, " + "stdout:%s, stderr:%s", stdout, stderr) return False def inject_fault(self): with open(self.inject_script, "r") as stdin_file: - exit_status, stdout, stderr = self.connection.execute( + self.connection.execute( "sudo /bin/sh -s {0}".format(self.service_name), stdin=stdin_file) def recover(self): with open(self.recovery_script, "r") as stdin_file: - exit_status, stdout, stderr = self.connection.execute( + exit_status, _, _ = self.connection.execute( "sudo /bin/bash -s {0} ".format(self.service_name), stdin=stdin_file) if exit_status: - LOG.info("Fail to restart service!") + LOG.info("Failed to restart service: %s", self.recovery_script) diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py index d03d04420..d67a16b98 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py +++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py @@ -71,7 +71,7 @@ class BaseAttacker(object): for attacker_cls in utils.itersubclasses(BaseAttacker): if attacker_type == attacker_cls.__attacker_type__: return attacker_cls - raise RuntimeError("No such runner_type %s" % attacker_type) + raise RuntimeError("No such runner_type: %s" % attacker_type) def get_script_fullpath(self, path): base_path = os.path.dirname(attacker_conf_path) diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py index 71690c135..6cc0cb286 100644 --- a/yardstick/benchmark/scenarios/availability/director.py +++ b/yardstick/benchmark/scenarios/availability/director.py @@ -40,7 +40,7 @@ class Director(object): nodes = self.context_cfg.get("nodes", None) # setup attackers if "attackers" in self.scenario_cfg["options"]: - LOG.debug("start init attackers...") + LOG.debug("Start init attackers...") attacker_cfgs = self.scenario_cfg["options"]["attackers"] self.attackerMgr = baseattacker.AttackerMgr() self.data = self.attackerMgr.init_attackers(attacker_cfgs, @@ -48,19 +48,19 @@ class Director(object): # setup monitors if "monitors" in self.scenario_cfg["options"]: - LOG.debug("start init monitors...") + LOG.debug("Start init monitors...") monitor_cfgs = self.scenario_cfg["options"]["monitors"] self.monitorMgr = basemonitor.MonitorMgr(self.data) self.monitorMgr.init_monitors(monitor_cfgs, nodes) # setup operations if "operations" in self.scenario_cfg["options"]: - LOG.debug("start init operations...") + LOG.debug("Start init operations...") operation_cfgs = self.scenario_cfg["options"]["operations"] self.operationMgr = baseoperation.OperationMgr() self.operationMgr.init_operations(operation_cfgs, nodes) # setup result checker if "resultCheckers" in self.scenario_cfg["options"]: - LOG.debug("start init resultCheckers...") + LOG.debug("Start init resultCheckers...") result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"] self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr() self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes) @@ -69,7 +69,7 @@ class Director(object): if intermediate_variables is None: intermediate_variables = {} LOG.debug( - "the type of current action is %s, the key is %s", type, key) + "The type of current action is %s, the key is %s", type, key) if type == ActionType.ATTACKER: return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables) if type == ActionType.MONITOR: @@ -80,17 +80,17 @@ class Director(object): if type == ActionType.OPERATION: return actionplayers.OperationPlayer(self.operationMgr[key], intermediate_variables) - LOG.debug("something run when creatactionplayer") + LOG.debug("The type is not recognized by createActionPlayer") def createActionRollbacker(self, type, key): LOG.debug( - "the type of current action is %s, the key is %s", type, key) + "The type of current action is %s, the key is %s", type, key) if type == ActionType.ATTACKER: return actionrollbackers.AttackerRollbacker(self.attackerMgr[key]) if type == ActionType.OPERATION: return actionrollbackers.OperationRollbacker( self.operationMgr[key]) - LOG.debug("no rollbacker created for %s", key) + LOG.debug("No rollbacker created for key: %s", key) def verify(self): result = True @@ -99,7 +99,7 @@ class Director(object): if hasattr(self, 'resultCheckerMgr'): result &= self.resultCheckerMgr.verify() if result: - LOG.debug("monitors are passed") + LOG.debug("Monitor results are passed") return result def stopMonitors(self): @@ -107,12 +107,12 @@ class Director(object): self.monitorMgr.wait_monitors() def knockoff(self): - LOG.debug("knock off ....") + LOG.debug("Knock off ....") while self.executionSteps: singleStep = self.executionSteps.pop() singleStep.rollback() def store_result(self, result): - LOG.debug("store result ....") + LOG.debug("Store result ....") if hasattr(self, 'monitorMgr'): self.monitorMgr.store_result(result) diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash index 858d86ca0..2388507d7 100755 --- a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash +++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash @@ -9,24 +9,23 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Start a service and check the service is started +# Start or restart a service and check the service is started set -e service_name=$1 +operation=${2-start} # values are "start" or "restart" -Distributor=$(lsb_release -a | grep "Distributor ID" | awk '{print $3}') - -if [ "$Distributor" != "Ubuntu" -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" ]; then +if [ -f /usr/bin/yum -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" -a "$service_name" != "openvswitch" ]; then service_name="openstack-"${service_name} -elif [ "$Distributor" = "Ubuntu" -a "$service_name" = "keystone" ]; then +elif [ -f /usr/bin/apt -a "$service_name" = "keystone" ]; then service_name="apache2" elif [ "$service_name" = "keystone" ]; then service_name="httpd" fi if which systemctl 2>/dev/null; then - systemctl start $service_name + systemctl $operation $service_name else - service $service_name start + service $service_name $operation fi diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py index 50a63f53d..f6004c774 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py +++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py @@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process): for monitor in utils.itersubclasses(BaseMonitor): if monitor_type == monitor.__monitor_type__: return monitor - raise RuntimeError("No such monitor_type %s" % monitor_type) + raise RuntimeError("No such monitor_type: %s" % monitor_type) def get_script_fullpath(self, path): base_path = os.path.dirname(monitor_conf_path) diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py index d0551bf03..3b36c762d 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py @@ -24,7 +24,7 @@ def _execute_shell_command(command): output = [] try: output = subprocess.check_output(command, shell=True) - except Exception: + except Exception: # pylint: disable=broad-except exitcode = -1 LOG.error("exec command '%s' error:\n ", command, exc_info=True) @@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor): self.connection = ssh.SSH.from_node(host, defaults={"user": "root"}) self.connection.wait(timeout=600) - LOG.debug("ssh host success!") + LOG.debug("ssh host (%s) success!", str(host)) self.check_script = self.get_script_fullpath( "ha_tools/check_openstack_cmd.bash") @@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor): self.cmd = self.cmd + " --insecure" def monitor_func(self): - exit_status = 0 exit_status, stdout = _execute_shell_command(self.cmd) - LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout) + LOG.debug("Executed command '%s'. " + "The stdout is:\n%s", self.cmd, stdout) if exit_status: return False return True def verify_SLA(self): outage_time = self._result.get('outage_time', None) - LOG.debug("the _result:%s", self._result) max_outage_time = self._config["sla"]["max_outage_time"] if outage_time > max_outage_time: LOG.info("SLA failure: %f > %f", outage_time, max_outage_time) return False else: - LOG.info("the sla is passed") return True @@ -97,7 +95,7 @@ def _test(): # pragma: no cover } monitor_configs.append(config) - p = basemonitor.MonitorMgr() + p = basemonitor.MonitorMgr({}) p.init_monitors(monitor_configs, context) p.start_monitors() p.wait_monitors() diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py index dce69f45f..971bae1e9 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py @@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor): monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type) monitor_number = self._config.get("monitor_number", 1) - for i in range(monitor_number): + for _ in range(monitor_number): monitor_ins = monitor_cls(self._config, self._context, self.monitor_data) self.monitors.append(monitor_ins) @@ -70,7 +70,8 @@ class MultiMonitor(basemonitor.BaseMonitor): elif "max_recover_time" in self._config["sla"]: max_outage_time = self._config["sla"]["max_recover_time"] else: - raise RuntimeError("monitor max_outage_time config is not found") + raise RuntimeError("'max_outage_time' or 'max_recover_time' " + "config is not found") self._result = {"outage_time": outage_time} if outage_time > max_outage_time: diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py index b0f6f8e9d..8d2f2633c 100644 --- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py +++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py @@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor): self.connection = ssh.SSH.from_node(host, defaults={"user": "root"}) self.connection.wait(timeout=600) - LOG.debug("ssh host success!") + LOG.debug("ssh host (%s) success!", str(host)) self.check_script = self.get_script_fullpath( "ha_tools/check_process_python.bash") self.process_name = self._config["process_name"] def monitor_func(self): with open(self.check_script, "r") as stdin_file: - exit_status, stdout, stderr = self.connection.execute( + _, stdout, _ = self.connection.execute( "sudo /bin/sh -s {0}".format(self.process_name), stdin=stdin_file) @@ -45,14 +45,12 @@ class MonitorProcess(basemonitor.BaseMonitor): return True def verify_SLA(self): - LOG.debug("the _result:%s", self._result) outage_time = self._result.get('outage_time', None) max_outage_time = self._config["sla"]["max_recover_time"] if outage_time > max_outage_time: - LOG.error("SLA failure: %f > %f", outage_time, max_outage_time) + LOG.info("SLA failure: %f > %f", outage_time, max_outage_time) return False else: - LOG.info("the sla is passed") return True @@ -73,7 +71,7 @@ def _test(): # pragma: no cover } monitor_configs.append(config) - p = basemonitor.MonitorMgr() + p = basemonitor.MonitorMgr({}) p.init_monitors(monitor_configs, context) p.start_monitors() p.wait_monitors() diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py index dcd0fe598..42941c6e7 100755 --- a/yardstick/benchmark/scenarios/availability/serviceha.py +++ b/yardstick/benchmark/scenarios/availability/serviceha.py @@ -34,7 +34,7 @@ class ServiceHA(base.Scenario): """scenario setup""" nodes = self.context_cfg.get("nodes", None) if nodes is None: - LOG.error("the nodes info is none") + LOG.error("The nodes info is none") return self.attackers = [] @@ -57,17 +57,17 @@ class ServiceHA(base.Scenario): def run(self, result): """execute the benchmark""" if not self.setup_done: - LOG.error("The setup not finished!") + LOG.error("The setup is not finished!") return self.monitorMgr.start_monitors() - LOG.info("HA monitor start!") + LOG.info("Monitor '%s' start!", self.__scenario_type__) for attacker in self.attackers: attacker.inject_fault() self.monitorMgr.wait_monitors() - LOG.info("HA monitor stop!") + LOG.info("Monitor '%s' stop!", self.__scenario_type__) sla_pass = self.monitorMgr.verify_SLA() for k, v in self.data.items(): diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py index 53f0ccc34..5a83ddb24 100644 --- a/yardstick/common/openstack_utils.py +++ b/yardstick/common/openstack_utils.py @@ -784,9 +784,8 @@ def list_images(shade_client=None): # ********************************************* # CINDER # ********************************************* -def get_volume_id(volume_name): # pragma: no cover - volumes = get_cinder_client().volumes.list() - return next((v.id for v in volumes if v.name == volume_name), None) +def get_volume_id(shade_client, volume_name): + return shade_client.get_volume_id(volume_name) def create_volume(cinder_client, volume_name, volume_size, diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py index 67ca826a5..cc19d98b4 100644 --- a/yardstick/tests/unit/common/test_openstack_utils.py +++ b/yardstick/tests/unit/common/test_openstack_utils.py @@ -536,3 +536,25 @@ class GetFlavorTestCase(unittest.TestCase): 'flavor_name_or_id') mock_logger.error.assert_called_once() self.assertIsNone(output) + +# ********************************************* +# CINDER +# ********************************************* + + +class GetVolumeIDTestCase(unittest.TestCase): + + def test_get_volume_id(self): + self.mock_shade_client = mock.Mock() + _uuid = uuidutils.generate_uuid() + self.mock_shade_client.get_volume_id.return_value = _uuid + output = openstack_utils.get_volume_id(self.mock_shade_client, + 'volume_name') + self.assertEqual(_uuid, output) + + def test_get_volume_id_None(self): + self.mock_shade_client = mock.Mock() + self.mock_shade_client.get_volume_id.return_value = None + output = openstack_utils.get_volume_id(self.mock_shade_client, + 'volume_name') + self.assertIsNone(output) |