aboutsummaryrefslogtreecommitdiffstats
path: root/samples/cyclictest-node-context.yaml
blob: 6f161a5e829f75335ae333608e4ee0e31d6e8818 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
# Sample benchmark task config file
# Measure system high resolution by using Cyclictest
#
# For this sample just like running the command below on the test vm and
# getting latencies info back to the yardstick.
#
# sudo bash cyclictest -a 1 -i 1000 -p 99 -l 1000 -t 1 -h 90 -m -n -q
#

schema: "yardstick:task:0.1"

scenarios:
-
  type: Cyclictest
  options:
    affinity: 1
    interval: 1000
    priority: 99
    loops: 1000
    threads: 1
    histogram: 90
  host: kvm.LF
  runner:
    type: Duration
    duration: 1
    interval: 1
  sla:
    max_min_latency: 50
    max_avg_latency: 100
    max_max_latency: 1000
    action: monitor
  setup_options:
    rpm_dir: "/opt/rpm"
    script_dir: "/opt/scripts"
    image_dir: "/opt/image"
    host_setup_seqs:
    - "host-setup0.sh"
    - "reboot"
    - "host-setup1.sh"
    - "host-run-qemu.sh"
    guest_setup_seqs:
    - "guest-setup0.sh"
    - "reboot"
    - "guest-setup1.sh"

context:
  type: Node
  name: LF
  file: /root/yardstick/pod.yaml
class="l l-Scalar l-Scalar-Plain">cluster.baremetal-mcp-ocata-ovs-dpdk-ha.infra parameters: _param: linux_system_codename: xenial cluster_vip_address: ${_param:infra_kvm_address} cluster_node01_address: ${_param:infra_kvm_node01_address} cluster_node02_address: ${_param:infra_kvm_node02_address} cluster_node03_address: ${_param:infra_kvm_node03_address} keepalived_vip_interface: br-ctl keepalived_vip_virtual_router_id: 69 deploy_nic: ${_param:openstack_primary_nic} salt: control: size: # RAM 4096,8192,16384,32768,65536 # Default production sizing openstack.control: cpu: 4 ram: 12288 disk_profile: small net_profile: default openstack.database: cpu: 4 ram: 6144 disk_profile: large net_profile: default openstack.message_queue: cpu: 4 ram: 2048 disk_profile: small net_profile: default openstack.telemetry: cpu: 2 ram: 3072 disk_profile: xxlarge net_profile: default openstack.proxy: cpu: 2 ram: 2048 disk_profile: small net_profile: default # stacklight.log: # cpu: 2 # ram: 4096 # disk_profile: xxlarge # net_profile: default # stacklight.server: # cpu: 2 # ram: 4096 # disk_profile: small # net_profile: default # stacklight.telemetry: # cpu: 2 # ram: 4096 # disk_profile: xxlarge # net_profile: default cluster: internal: node: mdb01: image: ${_param:salt_control_xenial_image} mdb02: image: ${_param:salt_control_xenial_image} mdb03: image: ${_param:salt_control_xenial_image} ctl01: image: ${_param:salt_control_xenial_image} ctl02: image: ${_param:salt_control_xenial_image} ctl03: image: ${_param:salt_control_xenial_image} dbs01: image: ${_param:salt_control_xenial_image} dbs02: image: ${_param:salt_control_xenial_image} dbs03: image: ${_param:salt_control_xenial_image} msg01: image: ${_param:salt_control_xenial_image} msg02: image: ${_param:salt_control_xenial_image} msg03: image: ${_param:salt_control_xenial_image} prx01: image: ${_param:salt_control_xenial_image} prx02: image: ${_param:salt_control_xenial_image} virt: nic: default: eth1: bridge: br-mgmt model: virtio eth0: bridge: br-ctl model: virtio glusterfs: server: volumes: nova_instances: storage: /srv/glusterfs/nova_instances replica: 3 bricks: - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances options: cluster.readdir-optimize: 'On' nfs.disable: 'On' network.remote-dio: 'On' diagnostics.client-log-level: WARNING diagnostics.brick-log-level: WARNING linux: network: interface: deploy: enabled: true type: eth proto: manual address: 0.0.0.0 netmask: 255.255.255.0 name: ${_param:deploy_nic} noifupdown: true br-mgmt: enabled: true proto: dhcp type: bridge name_servers: ${_param:opnfv_name_servers} use_interfaces: - ${_param:deploy_nic} noifupdown: true mgmt-vlan: enabled: true proto: manual type: vlan name: ${_param:deploy_nic}.${_param:opnfv_net_mgmt_vlan} use_interfaces: - ${_param:deploy_nic} br-ctl: enabled: true type: bridge proto: static address: ${_param:single_address} netmask: 255.255.255.0 use_interfaces: - ${_param:deploy_nic}.${_param:opnfv_net_mgmt_vlan}