summaryrefslogtreecommitdiffstats
path: root/etc/testcase/functest.tempest.multi_node_scheduling.yml
blob: 090d00fe7654bd4677c4cbd500465f21a231d97f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
##############################################################################
# Copyright (c) 2019 opnfv.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

---
functest.tempest.multi_node_scheduling:
  name: functest.tempest.multi_node_scheduling
  objective: simple virtual machine resource scheduling on multiple nodes
  validate:
    type: functest
    testcase: tempest_custom
    pre_condition:
      - 'cp /home/opnfv/pre_config/tempest_conf.yaml /usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml'
      - 'cp /home/opnfv/userconfig/tempest_custom_testcases.yaml /usr/lib/python2.7/site-packages/xtesting/ci/testcases.yaml'
      - 'cp /home/opnfv/functest/results/tempest_custom.txt /usr/lib/python2.7/site-packages/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt'
  report:
    source_archive_files:
      - functest.log
      - tempest_custom/rally.log
      - tempest_custom/tempest-report.html
    dest_archive_files:
      - tempest_logs/functest.tempest.multi_node_scheduling.functest.log
      - tempest_logs/functest.tempest.multi_node_scheduling.log
      - tempest_logs/functest.tempest.multi_node_scheduling.html
    check_results_files:
      - 'functest_results.txt'
    sub_testcase_list:
      - tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes[compute,id-9cecbe35-b9d4-48da-a37e-7ce70aa43d30,network,smoke]
      - tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_multiple_server_groups_with_same_name_policy[id-154dc5a4-a2fe-44b5-b99e-f15806a4a113]
      - tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_server_group_with_affinity_policy[id-5dc57eda-35b7-4af7-9e5f-3c2be3d2d68b]
      - tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_create_delete_server_group_with_anti_affinity_policy[id-3645a102-372f-4140-afad-13698d850d23]
      - tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_list_server_groups[id-d4874179-27b4-4d7d-80e4-6c560cdfe321]
      - tempest.api.compute.servers.test_server_group.ServerGroupTestJSON.test_show_server_group[id-b3545034-dd78-48f0-bdc2-a4adfa6d0ead]
| | 1) fault_type: which is used for finding the attacker's | | | scripts. It should be always set to "kill-process" in this | | | test case. | | | 2) process_name: which is the process name of the specified | | | OpenStack service. If there are multiple processes use the | | | same name on the host, all of them are killed by this | | | attacker. | | | In this case. This parameter should always set to "neutron- | | | server". | | | 3) host: which is the name of a control node being attacked. | | | | | | e.g. | | | -fault_type: "kill-process" | | | -process_name: "neutron-server" | | | -host: node1 | | | | +--------------+--------------------------------------------------------------+ |monitors | In this test case, two kinds of monitor are needed: | | | 1. the "openstack-cmd" monitor constantly request a specific | | | Openstack command, which needs two parameters: | | | 1) monitor_type: which is used for finding the monitor class | | | and related scritps. It should be always set to | | | "openstack-cmd" for this monitor. | | | 2) command_name: which is the command name used for request. | | | In this case, the command name should be neutron related | | | commands. | | | | | | 2. the "process" monitor check whether a process is running | | | on a specific node, which needs three parameters: | | | 1) monitor_type: which used for finding the monitor class and| | | related scritps. It should be always set to "process" | | | for this monitor. | | | 2) process_name: which is the process name for monitor | | | 3) host: which is the name of the node runing the process | | | | | | e.g. | | | monitor1: | | | -monitor_type: "openstack-cmd" | | | -command_name: "neutron agent-list" | | | monitor2: | | | -monitor_type: "process" | | | -process_name: "neutron-server" | | | -host: node1 | | | | +--------------+--------------------------------------------------------------+ |metrics | In this test case, there are two metrics: | | | 1)service_outage_time: which indicates the maximum outage | | | time (seconds) of the specified Openstack command request. | | | 2)process_recover_time: which indicates the maximun time | | | (seconds) from the process being killed to recovered | | | | +--------------+--------------------------------------------------------------+ |test tool | Developed by the project. Please see folder: | | | "yardstick/benchmark/scenarios/availability/ha_tools" | | | | +--------------+--------------------------------------------------------------+ |references | ETSI NFV REL001 | | | | +--------------+--------------------------------------------------------------+ |configuration | This test case needs two configuration files: | | | 1) test case file: opnfv_yardstick_tc045.yaml | | | -Attackers: see above "attackers" discription | | | -waiting_time: which is the time (seconds) from the process | | | being killed to stoping monitors the monitors | | | -Monitors: see above "monitors" discription | | | -SLA: see above "metrics" discription | | | | | | 2)POD file: pod.yaml | | | The POD configuration should record on pod.yaml first. | | | the "host" item in this test case will use the node name in | | | the pod.yaml. | | | | +--------------+--------------------------------------------------------------+ |test sequence | description and expected result | | | | +--------------+--------------------------------------------------------------+ |step 1 | start monitors: | | | each monitor will run with independently process | | | | | | Result: The monitor info will be collected. | | | | +--------------+--------------------------------------------------------------+ |step 2 | do attacker: connect the host through SSH, and then execute | | | the kill process script with param value specified by | | | "process_name" | | | | | | Result: Process will be killed. | | | | +--------------+--------------------------------------------------------------+ |step 3 | stop monitors after a period of time specified by | | | "waiting_time" | | | | | | Result: The monitor info will be aggregated. | | | | +--------------+--------------------------------------------------------------+ |step 4 | verify the SLA | | | | | | Result: The test case is passed or not. | | | | +--------------+--------------------------------------------------------------+ |post-action | It is the action when the test cases exist. It will check | | | the status of the specified process on the host, and restart | | | the process if it is not running for next test cases. | | | | | | Notice: This post-action uses 'lsb_release' command to check | | | the host linux distribution and determine the OpenStack | | | service name to restart the process. Lack of 'lsb_release' | | | on the host may cause failure to restart the process. | | | | +--------------+--------------------------------------------------------------+ |test verdict | Fails only if SLA is not passed, or if there is a test case | | | execution problem. | | | | +--------------+--------------------------------------------------------------+