aboutsummaryrefslogtreecommitdiffstats
path: root/tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml
blob: d952464a1deb6b278a2cd3a0047d6c4c76c35fde (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
##############################################################################
# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---

schema: "yardstick:task:0.1"
description: >
    Yardstick TC091 config file;
    HA test case: Control node Openstack service down - heat-api.

{% set file = file or '/etc/yardstick/pod.yaml' %}
{% set attack_host = attack_host or "node1" %}
{% set attack_process = attack_process or "heat-api" %}

scenarios:
-
  type: ServiceHA
  options:
    attackers:
    - fault_type: "kill-process"
      process_name: "{{ attack_process }}"
      host: {{attack_host}}

    monitors:
    - monitor_type: "openstack-cmd"
      command_name: "openstack stack list"
      monitor_time: 10
      monitor_number: 3
      sla:
        max_outage_time: 5
    - monitor_type: "process"
      process_name: "{{ attack_process }}"
      host: {{attack_host}}
      monitor_time: 30
      monitor_number: 3
      sla:
        max_recover_time: 30

  nodes:
    {{attack_host}}: {{attack_host}}.LF

  runner:
    type: Duration
    duration: 1
  sla:
    outage_time: 5
    action: monitor


context:
  type: Node
  name: LF
  file: {{file}}
node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) vm_definition_overwrite = self.dha.get_vm_definition( self.dea.get_node_main_role(node_id, self.fuel_node_id)) self.define_vm(vm_name, temp_vm_file, disk_path, vm_definition_overwrite) delete(temp_dir) def start_vms(self): for node_id in self.node_ids: self.dha.node_power_on(node_id) def create_networks(self): for net_file in glob.glob('%s/*' % self.network_dir): exec_cmd('virsh net-define %s' % net_file) for net in self.net_names: log('Creating network %s' % net) exec_cmd('virsh net-autostart %s' % net) exec_cmd('virsh net-start %s' % net) def delete_networks(self): for net in self.net_names: log('Deleting network %s' % net) exec_cmd('virsh net-destroy %s' % net, False) exec_cmd('virsh net-undefine %s' % net, False) def get_net_name(self, net_file): with open(net_file) as f: net_xml = etree.parse(f) name_list = net_xml.xpath('/network/name') for name in name_list: net_name = name.text return net_name def collect_net_names(self): net_list = [] for net_file in glob.glob('%s/*' % self.network_dir): name = self.get_net_name(net_file) net_list.append(name) return net_list def delete_vms(self): for node_id in self.node_ids: self.delete_vm(node_id) def setup_environment(self): check_dir_exists(self.network_dir) self.cleanup_environment() self.create_networks() self.create_vms() self.start_vms() def cleanup_environment(self): self.delete_vms() self.delete_networks()