summaryrefslogtreecommitdiffstats
path: root/puppet/services/pacemaker/haproxy.yaml
blob: 598deaef6fa084d2ab966049bd08ed2f3c2ea795 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
heat_template_version: ocata

description: >
  HAproxy service with Pacemaker configured with Puppet

parameters:
  ServiceNetMap:
    default: {}
    description: Mapping of service_name -> network name. Typically set
                 via parameter_defaults in the resource registry.  This
                 mapping overrides those in ServiceNetMapDefaults.
    type: json
  DefaultPasswords:
    default: {}
    type: json
  EndpointMap:
    default: {}
    description: Mapping of service endpoint -> protocol. Typically set
                 via parameter_defaults in the resource registry.
    type: json

resources:
  LoadbalancerServiceBase:
    type: ../haproxy.yaml
    properties:
      ServiceNetMap: {get_param: ServiceNetMap}
      DefaultPasswords: {get_param: DefaultPasswords}
      EndpointMap: {get_param: EndpointMap}

outputs:
  role_data:
    description: Role data for the HAproxy with pacemaker role.
    value:
      service_name: haproxy
      monitoring_subscription: {get_attr: [LoadbalancerServiceBase, role_data, monitoring_subscription]}
      config_settings:
        map_merge:
          - get_attr: [LoadbalancerServiceBase, role_data, config_settings]
          - tripleo::haproxy::haproxy_service_manage: false
            tripleo::haproxy::mysql_clustercheck: true
      step_config: |
        include ::tripleo::profile::pacemaker::haproxy
      metadata_settings:
        get_attr: [LoadbalancerServiceBase, role_data, metadata_settings]
p">: 64 number_of_ports: {{num_ports}} duration: 20 host: demeter.yardstick-TC072 target: poseidon.yardstick-TC072 runner: type: Iteration iterations: 2 interval: 1 sla: max_ppm: 1000 action: monitor {% endfor %} context: name: yardstick-TC072 image: yardstick-image flavor: yardstick-flavor user: ubuntu placement_groups: pgrp1: policy: "availability" servers: demeter: floating_ip: true placement: "pgrp1" poseidon: floating_ip: true placement: "pgrp1" networks: test: cidr: '10.0.1.0/24'