diff options
-rw-r--r-- | docs/userguide/opnfv_yardstick_tc004.rst | 77 | ||||
-rw-r--r-- | docs/userguide/opnfv_yardstick_tc052.rst | 141 | ||||
-rw-r--r-- | docs/userguide/opnfv_yardstick_tc074.rst | 137 | ||||
-rw-r--r-- | plugin/CI/storperf.yaml | 13 | ||||
-rw-r--r-- | tests/ci/docker/yardstick-ci/Dockerfile | 1 | ||||
-rwxr-xr-x | tests/ci/yardstick-verify | 51 | ||||
-rw-r--r-- | tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml | 85 | ||||
-rw-r--r-- | tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml | 79 | ||||
-rw-r--r-- | tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml | 27 | ||||
-rw-r--r-- | tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml | 8 | ||||
-rwxr-xr-x | tools/ubuntu-server-cloudimg-dpdk-modify.sh | 4 | ||||
-rw-r--r-- | tools/yardstick-img-dpdk-finalize.sh | 3 | ||||
-rw-r--r-- | yardstick/cmd/commands/plugin.py | 37 |
13 files changed, 563 insertions, 100 deletions
diff --git a/docs/userguide/opnfv_yardstick_tc004.rst b/docs/userguide/opnfv_yardstick_tc004.rst new file mode 100644 index 000000000..301286126 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc004.rst @@ -0,0 +1,77 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC004 +************************************* + +.. _cachestat: https://github.com/brendangregg/perf-tools/tree/master/fs + ++-----------------------------------------------------------------------------+ +|Cache Utilization | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC004_Cache Utilization | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Cache Utilization | +| | | ++--------------+--------------------------------------------------------------+ +|test purpose | To evaluate the IaaS compute capability with regards to | +| | cache utilization.This test case should be run in parallel | +| | to other Yardstick test cases and not run as a stand-alone | +| | test case. | +| | Measure the cache usage statistics including cache hit, | +| | cache miss, hit ratio, page cache size and page cache size. | +| | Both average and maximun values are obtained. | +| | The purpose is also to be able to spot trends. | +| | Test results, graphs and similar shall be stored for | +| | comparison reasons and product evolution understanding | +| | between different OPNFV versions and/or configurations. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | File: cachestat.yaml (in the 'samples' directory) | +| | | +| | * interval: 1 - repeat, pausing every 1 seconds in-between. | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | cachestat | +| | | +| | cachestat is not always part of a Linux distribution, hence | +| | it needs to be installed. | +| | | ++--------------+--------------------------------------------------------------+ +|references | cachestat_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * interval; | +| | * runner Duration. | +| | | +| | There are default values for each above-mentioned option. | +| | Run in background with other test cases. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | The test case image needs to be installed into Glance | +|conditions | with cachestat included in the image. | +| | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The host is installed as client. The related TC, or TCs, is | +| | invoked and cachestat logs are produced and stored. | +| | | +| | Result: logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. Cache utilization results are fetched and stored. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc052.rst b/docs/userguide/opnfv_yardstick_tc052.rst new file mode 100644 index 000000000..9514b6819 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc052.rst @@ -0,0 +1,141 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Yin Kanglin and others. +.. 14_ykl@tongji.edu.cn + +************************************* +Yardstick Test Case Description TC052 +************************************* + ++-----------------------------------------------------------------------------+ +|OpenStack Controller Node Disk I/O Block High Availability | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC052: OpenStack Controller Node Disk I/O | +| | Block High Availability | ++--------------+--------------------------------------------------------------+ +|test purpose | This test case will verify the high availability of control | +| | node. When the disk I/O of a specified disk is blocked, | +| | which breaks down the Openstack services on this node. Read | +| | and write services should still be accessed by other | +| | controller nodes, and the services on failed controller node | +| | should be isolated. | ++--------------+--------------------------------------------------------------+ +|test method | This test case blocks the disk I/O of a specified control | +| | node, then checks whether the services that need to read or | +| | wirte the disk of the control node are OK with some monitor | +| | tools. | ++--------------+--------------------------------------------------------------+ +|attackers | In this test case, an attacker called "disk-block" is | +| | needed. This attacker includes two parameters: | +| | 1) fault_type: which is used for finding the attacker's | +| | scripts. It should be always set to "disk-block" in this | +| | test case. | +| | 2) host: which is the name of a control node being attacked. | +| | e.g. | +| | -fault_type: "disk-block" | +| | -host: node1 | ++--------------+--------------------------------------------------------------+ +|monitors | In this test case, two kinds of monitor are needed: | +| | 1. the "openstack-cmd" monitor constantly request a specific | +| | Openstack command, which needs two parameters: | +| | 1) monitor_type: which is used for finding the monitor class | +| | and related scripts. It should be always set to | +| | "openstack-cmd" for this monitor. | +| | 2) command_name: which is the command name used for request. | +| | | +| | e.g. | +| | -monitor_type: "openstack-cmd" | +| | -command_name: "nova flavor-list" | +| | | +| | 2. the second monitor verifies the read and write function | +| | by a "operation" and a "result checker". | +| | the "operation" have two parameters: | +| | 1) operation_type: which is used for finding the operation | +| | class and related scripts. | +| | 2) action_parameter: parameters for the operation. | +| | the "result checker" have three parameters: | +| | 1) checker_type: which is used for finding the reuslt | +| | checker class and realted scripts. | +| | 2) expectedValue: the expected value for the output of the | +| | checker script. | +| | 3) condition: whether the expected value is in the output of | +| | checker script or is totally same with the output. | +| | | +| | In this case, the "operation" adds a flavor and the "result | +| | checker" checks whether ths flavor is created. Their | +| | parameters show as follows: | +| | operation: | +| | -operation_type: "nova-create-flavor" | +| | -action_parameter: | +| | flavorconfig: "test-001 test-001 100 1 1" | +| | result checker: | +| | -checker_type: "check-flavor" | +| | -expectedValue: "test-001" | +| | -condition: "in" | ++--------------+--------------------------------------------------------------+ +|metrics | In this test case, there is one metric: | +| | 1)service_outage_time: which indicates the maximum outage | +| | time (seconds) of the specified Openstack command request. | ++--------------+--------------------------------------------------------------+ +|test tool | Developed by the project. Please see folder: | +| | "yardstick/benchmark/scenarios/availability/ha_tools" | +| | | ++--------------+--------------------------------------------------------------+ +|references | ETSI NFV REL001 | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | This test case needs two configuration files: | +| | 1) test case file: opnfv_yardstick_tc052.yaml | +| | -Attackers: see above "attackers" discription | +| | -waiting_time: which is the time (seconds) from the process | +| | being killed to stoping monitors the monitors | +| | -Monitors: see above "monitors" discription | +| | -SLA: see above "metrics" discription | +| | | +| | 2)POD file: pod.yaml | +| | The POD configuration should record on pod.yaml first. | +| | the "host" item in this test case will use the node name in | +| | the pod.yaml. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | do attacker: connect the host through SSH, and then execute | +| | the block disk I/O script on the host. | +| | | +| | Result: The disk I/O of the host will be blocked | +| | | ++--------------+--------------------------------------------------------------+ +|step 2 | start monitors: | +| | each monitor will run with independently process | +| | | +| | Result: The monitor info will be collected. | +| | | ++--------------+--------------------------------------------------------------+ +|step 3 | do operation: add a flavor | +| | | ++--------------+--------------------------------------------------------------+ +|step 4 | do result checker: check whether the falvor is created | +| | | ++--------------+--------------------------------------------------------------+ +|step 5 | stop monitors after a period of time specified by | +| | "waiting_time" | +| | | +| | Result: The monitor info will be aggregated. | +| | | ++--------------+--------------------------------------------------------------+ +|step 6 | verify the SLA | +| | | +| | Result: The test case is passed or not. | +| | | ++--------------+--------------------------------------------------------------+ +|post-action | It is the action when the test cases exist. It excutes the | +| | release disk I/O script to release the blocked I/O. | ++--------------+--------------------------------------------------------------+ +|test verdict | Fails if monnitor SLA is not passed or the result checker is | +| | not passed, or if there is a test case execution problem. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc074.rst b/docs/userguide/opnfv_yardstick_tc074.rst new file mode 100644 index 000000000..c938f5dfd --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc074.rst @@ -0,0 +1,137 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC074 +************************************* + +.. Storperf: https://wiki.opnfv.org/display/storperf/Storperf + ++-----------------------------------------------------------------------------+ +|Storperf | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC074_Storperf | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Storage performance | +| | | ++--------------+--------------------------------------------------------------+ +|test purpose | Storperf integration with yardstick. The purpose of StorPerf | +| | is to provide a tool to measure block and object storage | +| | performance in an NFVI. When complemented with a | +| | characterization of typical VF storage performance | +| | requirements, it can provide pass/fail thresholds for test, | +| | staging, and production NFVI environments. | +| | | +| | The benchmarks developed for block and object storage will | +| | be sufficiently varied to provide a good preview of expected | +| | storage performance behavior for any type of VNF workload. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | file: opnfv_yardstick_tc074.yaml | +| | | +| | * agent_count: 1 - the number of VMs to be created | +| | * agent_image: "Ubuntu-14.04" - image used for creating VMs | +| | * public_network: "ext-net" - name of public network | +| | * volume_size: 2 - cinder volume size | +| | * block_sizes: "4096" - data block size | +| | * queue_depths: "4" | +| | * StorPerf_ip: "192.168.200.2" | +| | * query_interval: 10 - state query interval | +| | * timeout: 600 - maximum allowed job time | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | Storperf | +| | | +| | StorPerf is a tool to measure block and object storage | +| | performance in an NFVI. | +| | | +| | StorPerf is delivered as a Docker container from | +| | https://hub.docker.com/r/opnfv/storperf/tags/. | +| | | ++--------------+--------------------------------------------------------------+ +|references | Storperf_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * agent_count | +| | * volume_size | +| | * block_sizes | +| | * queue_depths | +| | * query_interval | +| | * timeout | +| | * target=[device or path] | +| | The path to either an attached storage device | +| | (/dev/vdb, etc) or a directory path (/opt/storperf) that | +| | will be used to execute the performance test. In the case | +| | of a device, the entire device will be used. If not | +| | specified, the current directory will be used. | +| | * workload=[workload module] | +| | If not specified, the default is to run all workloads. The | +| | workload types are: | +| | - rs: 100% Read, sequential data | +| | - ws: 100% Write, sequential data | +| | - rr: 100% Read, random access | +| | - wr: 100% Write, random access | +| | - rw: 70% Read / 30% write, random access | +| | * nossd: Do not perform SSD style preconditioning. | +| | * nowarm: Do not perform a warmup prior to | +| | measurements. | +| | * report= [job_id] | +| | Query the status of the supplied job_id and report on | +| | metrics. If a workload is supplied, will report on only | +| | that subset. | +| | | +| | There are default values for each above-mentioned option. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will | +|conditions | need to add one. A key pair for launching agents is also | +| | required. | +| | | +| | Storperf is required to be installed in the environment. | +| | There are two possible methods for Storperf installation: | +| | Run container on Jump Host | +| | Run container in a VM | +| | | +| | Running StorPerf on Jump Host | +| | Requirements: | +| | - Docker must be installed | +| | - Jump Host must have access to the OpenStack Controller | +| | API | +| | - Jump Host must have internet connectivity for | +| | downloading docker image | +| | - Enough floating IPs must be available to match your | +| | agent count | +| | | +| | Running StorPerf in a VM | +| | Requirements: | +| | - VM has docker installed | +| | - VM has OpenStack Controller credentials and can | +| | communicate with the Controller API | +| | - VM has internet connectivity for downloading the | +| | docker image | +| | - Enough floating IPs must be available to match your | +| | agent count | +| | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored | +| | in glance. TC is invoked and logs are produced and stored. | +| | | +| | Result: Logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. Storage performance results are fetched and stored. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/plugin/CI/storperf.yaml b/plugin/CI/storperf.yaml new file mode 100644 index 000000000..4407ddf8c --- /dev/null +++ b/plugin/CI/storperf.yaml @@ -0,0 +1,13 @@ +--- +# StorPerf plugin configration file for huawei-pod1 +# Used for integration StorPerf into Yardstick as a plugin + +schema: "yardstick:plugin:0.1" + +plugins: + name: storperf + +deployment: + ip: local + user: root + password: root diff --git a/tests/ci/docker/yardstick-ci/Dockerfile b/tests/ci/docker/yardstick-ci/Dockerfile index cc23073d2..2d59fd69e 100644 --- a/tests/ci/docker/yardstick-ci/Dockerfile +++ b/tests/ci/docker/yardstick-ci/Dockerfile @@ -48,5 +48,6 @@ RUN cd ${YARDSTICK_REPO_DIR} && pip install -r tests/ci/requirements.txt RUN cd ${YARDSTICK_REPO_DIR} && pip install . ADD http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img /home/opnfv/images/ +ADD http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img /home/opnfv/images/ COPY ./exec_tests.sh /usr/local/bin/ diff --git a/tests/ci/yardstick-verify b/tests/ci/yardstick-verify index c83193574..0e234f54d 100755 --- a/tests/ci/yardstick-verify +++ b/tests/ci/yardstick-verify @@ -80,7 +80,7 @@ cleanup() return fi - for image in $(glance image-list | grep -e cirros-0.3.3 -e yardstick-trusty-server | awk '{print $2}'); do + for image in $(glance image-list | grep -e cirros-0.3.3 -e yardstick-trusty-server -e Ubuntu-14.04 | awk '{print $2}'); do echo "Deleting image $image..." glance image-delete $image || true done @@ -121,6 +121,20 @@ install_yardstick() pip install . } +install_storperf() +{ + # Install Storper on huawei-pod1 + if [ "$NODE_NAME" == "huawei-pod1" ]; then + echo + echo "========== Installing storperf ==========" + + if ! yardstick plugin install plugin/CI/storperf.yaml; then + echo "Install storperf plugin FAILED"; + exit 1 + fi + fi +} + build_yardstick_image() { echo @@ -174,6 +188,30 @@ load_cirros_image() echo "Cirros image id: $CIRROS_IMAGE_ID" } +load_ubuntu_image() +{ + echo + echo "========== Loading ubuntu cloud image ==========" + + local ubuntu_image_file=/home/opnfv/images/trusty-server-cloudimg-amd64-disk1.img + + output=$(glance image-create \ + --name Ubuntu-14.04 \ + --disk-format qcow2 \ + --container-format bare \ + --file $ubuntu_image_file) + echo "$output" + + UBUNTU_IMAGE_ID=$(echo "$output" | grep " id " | awk '{print $(NF-1)}') + + if [ -z "$UBUNTU_IMAGE_ID" ]; then + echo 'Failed uploading UBUNTU image to cloud'. + exit 1 + fi + + echo "Ubuntu image id: $UBUNTU_IMAGE_ID" +} + load_yardstick_image() { echo @@ -255,13 +293,18 @@ EOF done - if [ $failed -gt 0 ]; then + local sceanrio_status="success" + if [ $failed -gt 0 ]; then + scenario_status="failed" echo "---------------------------" echo "$failed out of ${SUITE_FILES[*]} test suites FAILED" echo "---------------------------" exit 1 - fi + fi + curl -i -H 'content-type: application/json' -X POST -d \ + '{\"details\":{\"${DEPLOY_SCENARIO}\":\"${sceanrio_status}\"}}' \ + http://${DISPATCHER_HTTP_TARGET}:3570 else @@ -361,8 +404,10 @@ main() build_yardstick_image load_yardstick_image load_cirros_image + load_ubuntu_image create_nova_flavor + install_storperf run_test } diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml deleted file mode 100644 index 2d10e4073..000000000 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml +++ /dev/null @@ -1,85 +0,0 @@ ---- -# Yardstick TC004 config file -# Measure cache hit/miss ratio and usage, network throughput and latency. -# Different amounts of flows are tested with, from 2 up to 1001000. -# All tests are run 2 times each. First 2 times with the least -# amount of ports, then 2 times with the next amount of ports, -# and so on until all packet sizes have been run with. -# -# During the measurements cache hit/miss ration, cache usage statistics and -# network latency are recorded/measured using cachestat and ping, respectively. - -schema: "yardstick:task:0.1" - -scenarios: -- - type: CACHEstat - run_in_background: true - - options: - interval: 1 - - host: demeter.yardstick -- - type: CACHEstat - run_in_background: true - - options: - interval: 1 - - host: poseidon.yardstick -- - type: Ping - run_in_background: true - - options: - packetsize: 100 - - host: demeter.yardstick - target: poseidon.yardstick - - sla: - max_rtt: 10 - action: monitor -{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %} -- - type: Pktgen - options: - packetsize: 64 - number_of_ports: {{num_ports}} - duration: 20 - - host: demeter.yardstick - target: poseidon.yardstick - - runner: - type: Iteration - iterations: 2 - interval: 1 - - sla: - max_ppm: 1000 - action: monitor -{% endfor %} - -context: - name: yardstick - image: yardstick-trusty-server - flavor: yardstick-flavor - user: ubuntu - - placement_groups: - pgrp1: - policy: "availability" - - servers: - demeter: - floating_ip: true - placement: "pgrp1" - poseidon: - floating_ip: true - placement: "pgrp1" - - networks: - test: - cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml new file mode 100644 index 000000000..714306881 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml @@ -0,0 +1,79 @@ +--- +# Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "general-attacker" + host: node1 + key: "block-io" + attack_key: "block-io" + + monitors: + - + monitor_type: "openstack-cmd" + key: "nova-flavor-list" + command_name: "nova flavor-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + operations: + - + operation_type: "general-operation" + key: "create-flavor" + operation_key: "nova-create-flavor" + host: node1 + action_parameter: + flavorconfig: "test-001 test-001 100 1 1" + rollback_parameter: + flavorid: "test-001" + + resultCheckers: + - + checker_type: "general-result-checker" + key: "check-flavor" + host: node1 + checker_key: "nova-flavor-checker" + expectedValue: "test-001" + condition: "in" + + steps: + - + actionKey: "block-io" + actionType: "attacker" + index: 1 + + - + actionKey: "nova-flavor-list" + actionType: "monitor" + index: 2 + + - + actionKey: "create-flavor" + actionType: "operation" + index: 3 + + - + actionKey: "check-flavor" + actionType: "resultchecker" + index: 4 + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml new file mode 100644 index 000000000..d506ccc1e --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml @@ -0,0 +1,27 @@ +--- +# Test case for TC074 StorPerf benchmark task config file +# StorPerf is a tool to measure block and object storage performance in an NFVI + +schema: "yardstick:task:0.1" +{% set public_network = public_network or "ext-net" %} +{% set StorPerf_ip = StorPerf_ip or "192.168.200.2" %} +scenarios: +- + type: StorPerf + options: + agent_count: 1 + agent_image: "Ubuntu-14.04" + public_network: {{public_network}} + volume_size: 4 + block_sizes: "4096" + queue_depths: "4" + StorPerf_ip: {{StorPerf_ip}} + query_interval: 10 + timeout: 300 + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml index a8ab42849..3c2ff556a 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml @@ -71,3 +71,11 @@ test_cases: file_name: opnfv_yardstick_tc071.yaml - file_name: opnfv_yardstick_tc072.yaml +- + file_name: opnfv_yardstick_tc074.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"public_network": "ext-net", + "StorPerf_ip": "192.168.200.2"}' diff --git a/tools/ubuntu-server-cloudimg-dpdk-modify.sh b/tools/ubuntu-server-cloudimg-dpdk-modify.sh index edcbeb61a..aa4e252ea 100755 --- a/tools/ubuntu-server-cloudimg-dpdk-modify.sh +++ b/tools/ubuntu-server-cloudimg-dpdk-modify.sh @@ -33,8 +33,8 @@ grep wily /etc/apt/sources.list && \ # Force apt to use ipv4 due to build problems on LF POD. echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4 -echo 'GRUB_CMDLINE_LINUX="resume=/dev/sda1 default_hugepagesz=1G hugepagesz=1G hugepages=1 iommu=on iommu=pt intel_iommu=on"' >> /etc/default/grub -echo 'vm.nr_hugepages=512' >> /etc/sysctl.conf +echo 'GRUB_CMDLINE_LINUX="resume=/dev/sda1 default_hugepagesz=1G hugepagesz=1G hugepages=2 iommu=on iommu=pt intel_iommu=on"' >> /etc/default/grub +echo 'vm.nr_hugepages=1024' >> /etc/sysctl.conf echo 'huge /mnt/huge hugetlbfs defaults 0 0' >> vi /etc/fstab mkdir /mnt/huge diff --git a/tools/yardstick-img-dpdk-finalize.sh b/tools/yardstick-img-dpdk-finalize.sh index b9ea83e11..7a450e269 100644 --- a/tools/yardstick-img-dpdk-finalize.sh +++ b/tools/yardstick-img-dpdk-finalize.sh @@ -29,6 +29,9 @@ do fi done +# workaround: Without wait time, the file size of pktgen is zero in the snapshot. +sleep 60 + status=$(nova image-create --poll $stackname $new_image_name) if [[ "$status" =~ "Finished" ]];then echo "$new_image_name finished" diff --git a/yardstick/cmd/commands/plugin.py b/yardstick/cmd/commands/plugin.py index 8e3ddb5a5..0ab24fcfc 100644 --- a/yardstick/cmd/commands/plugin.py +++ b/yardstick/cmd/commands/plugin.py @@ -9,6 +9,7 @@ """ Handler for yardstick command 'plugin' """ +import os import sys import yaml import time @@ -80,12 +81,20 @@ class PluginCommands(object): deployment_user = deployment.get("user") deployment_ip = deployment.get("ip") - deployment_password = deployment.get("password") - LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) - self.client = ssh.SSH(deployment_user, deployment_ip, - password=deployment_password) - self.client.wait(timeout=600) + + if deployment_ip == "local": + installer_ip = os.environ.get("INSTALLER_IP", None) + + LOG.debug("user:%s, host:%s", deployment_user, installer_ip) + self.client = ssh.SSH(deployment_user, installer_ip, + password=deployment_password) + self.client.wait(timeout=600) + else: + LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) + self.client = ssh.SSH(deployment_user, deployment_ip, + password=deployment_password) + self.client.wait(timeout=600) # copy script to host cmd = "cat > ~/%s.sh" % plugin_name @@ -99,12 +108,20 @@ class PluginCommands(object): deployment_user = deployment.get("user") deployment_ip = deployment.get("ip") - deployment_password = deployment.get("password") - LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) - self.client = ssh.SSH(deployment_user, deployment_ip, - password=deployment_password) - self.client.wait(timeout=600) + + if deployment_ip == "local": + installer_ip = os.environ.get("INSTALLER_IP", None) + + LOG.debug("user:%s, host:%s", deployment_user, installer_ip) + self.client = ssh.SSH(deployment_user, installer_ip, + password=deployment_password) + self.client.wait(timeout=600) + else: + LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) + self.client = ssh.SSH(deployment_user, deployment_ip, + password=deployment_password) + self.client.wait(timeout=600) # copy script to host cmd = "cat > ~/%s.sh" % plugin_name |