aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/external-loadbalancer-vip.yaml14
-rw-r--r--network/ports/ctlplane_vip.yaml1
-rw-r--r--network/ports/external.yaml1
-rw-r--r--network/ports/internal_api.yaml1
-rw-r--r--network/ports/net_vip_map_external.yaml50
-rw-r--r--network/ports/storage.yaml2
-rw-r--r--network/ports/storage_mgmt.yaml1
-rw-r--r--network/ports/tenant.yaml1
-rw-r--r--network/ports/vip.yaml1
-rw-r--r--overcloud-resource-registry-puppet.yaml1
-rw-r--r--overcloud-resource-registry.yaml1
-rw-r--r--overcloud-without-mergepy.yaml22
-rw-r--r--puppet/controller.yaml6
-rw-r--r--puppet/manifests/overcloud_controller.pp10
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp290
15 files changed, 246 insertions, 156 deletions
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
new file mode 100644
index 00000000..47d5bd9b
--- /dev/null
+++ b/environments/external-loadbalancer-vip.yaml
@@ -0,0 +1,14 @@
+resource_registry:
+ OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml
+
+parameter_defaults:
+ # When using an external loadbalancer set the following in parameter_defaults
+ # to control your VIPs (currently one per network)
+ # NOTE: we will eventually move to one VIP per service
+ #
+ # ControlNetworkVip:
+ # ExternalNetworkVip:
+ # InternalApiNetworkVip:
+ # StorageNetworkVip:
+ # StorageMgmtNetworkVip:
+ EnableLoadBalancer: false \ No newline at end of file
diff --git a/network/ports/ctlplane_vip.yaml b/network/ports/ctlplane_vip.yaml
index 0d2945bc..3e949f41 100644
--- a/network/ports/ctlplane_vip.yaml
+++ b/network/ports/ctlplane_vip.yaml
@@ -15,6 +15,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork:
description: The name of the undercloud Neutron control plane
diff --git a/network/ports/external.yaml b/network/ports/external.yaml
index 63e3eeb3..1e2fff68 100644
--- a/network/ports/external.yaml
+++ b/network/ports/external.yaml
@@ -15,6 +15,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml
description: The name of the undercloud Neutron control plane
diff --git a/network/ports/internal_api.yaml b/network/ports/internal_api.yaml
index da1b1856..b671aa5f 100644
--- a/network/ports/internal_api.yaml
+++ b/network/ports/internal_api.yaml
@@ -14,6 +14,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
resources:
diff --git a/network/ports/net_vip_map_external.yaml b/network/ports/net_vip_map_external.yaml
new file mode 100644
index 00000000..36426b32
--- /dev/null
+++ b/network/ports/net_vip_map_external.yaml
@@ -0,0 +1,50 @@
+heat_template_version: 2015-04-30
+
+parameters:
+ # Set these via parameter defaults to configure external VIPs
+ ControlNetworkVip:
+ default: ''
+ type: string
+ ExternalNetworkVip:
+ default: ''
+ type: string
+ InternalApiNetworkVip:
+ default: ''
+ type: string
+ StorageNetworkVip:
+ default: ''
+ type: string
+ StorageMgmtNetworkVip:
+ default: ''
+ type: string
+ # The following are unused in this template
+ ControlPlaneIp:
+ default: ''
+ type: string
+ ExternalIp:
+ default: ''
+ type: string
+ InternalApiIp:
+ default: ''
+ type: string
+ StorageIp:
+ default: ''
+ type: string
+ StorageMgmtIp:
+ default: ''
+ type: string
+ TenantIp:
+ default: ''
+ type: string
+
+outputs:
+ net_ip_map:
+ description: >
+ A Hash containing a mapping of network names to assigned IPs
+ for a specific machine.
+ value:
+ ctlplane: {get_param: ControlNetworkVip}
+ external: {get_param: ExternalNetworkVip}
+ internal_api: {get_param: InternalApiNetworkVip}
+ storage: {get_param: StorageNetworkVip}
+ storage_mgmt: {get_param: StorageMgmtNetworkVip}
diff --git a/network/ports/storage.yaml b/network/ports/storage.yaml
index ecb20b8f..e1ef8e03 100644
--- a/network/ports/storage.yaml
+++ b/network/ports/storage.yaml
@@ -14,6 +14,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
resources:
@@ -39,4 +40,3 @@ outputs:
- '/'
- {get_attr: [StoragePort, subnets, 0, cidr, -2]}
- {get_attr: [StoragePort, subnets, 0, cidr, -1]}
-
diff --git a/network/ports/storage_mgmt.yaml b/network/ports/storage_mgmt.yaml
index 2ab39f21..e5b5b884 100644
--- a/network/ports/storage_mgmt.yaml
+++ b/network/ports/storage_mgmt.yaml
@@ -14,6 +14,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
resources:
diff --git a/network/ports/tenant.yaml b/network/ports/tenant.yaml
index aae12d46..9a351a24 100644
--- a/network/ports/tenant.yaml
+++ b/network/ports/tenant.yaml
@@ -14,6 +14,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
resources:
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index 299579dc..56efc178 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -15,6 +15,7 @@ parameters:
type: string
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
+ default: ''
type: string
ControlPlaneNetwork:
description: The name of the undercloud Neutron control plane
diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
index 18824ace..c072c292 100644
--- a/overcloud-resource-registry-puppet.yaml
+++ b/overcloud-resource-registry-puppet.yaml
@@ -58,6 +58,7 @@ resource_registry:
OS::TripleO::Network::Storage: network/noop.yaml
OS::TripleO::Network::Tenant: network/noop.yaml
+ OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
diff --git a/overcloud-resource-registry.yaml b/overcloud-resource-registry.yaml
index ed02551b..11a33599 100644
--- a/overcloud-resource-registry.yaml
+++ b/overcloud-resource-registry.yaml
@@ -39,6 +39,7 @@ resource_registry:
OS::TripleO::Network::Storage: network/noop.yaml
OS::TripleO::Network::Tenant: network/noop.yaml
+ OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
index a0fc7581..10d1f278 100644
--- a/overcloud-without-mergepy.yaml
+++ b/overcloud-without-mergepy.yaml
@@ -882,8 +882,8 @@ resources:
SwiftPartPower: {get_param: SwiftPartPower}
SwiftPassword: {get_param: SwiftPassword}
SwiftReplicas: { get_param: SwiftReplicas}
- VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]} # deprecated. Use per service VIP settings instead now.
- PublicVirtualIP: {get_attr: [PublicVirtualIP, ip_address]}
+ VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]} # deprecated. Use per service VIP settings instead now.
+ PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
@@ -957,7 +957,7 @@ resources:
NovaComputeExtraConfig: {get_param: NovaComputeExtraConfig}
NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
- NovaPublicIP: {get_attr: [PublicVirtualIP, ip_address]}
+ NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
NovaPassword: {get_param: NovaPassword}
NtpServer: {get_param: NtpServer}
RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
@@ -994,7 +994,7 @@ resources:
CinderPassword: {get_param: CinderPassword}
KeyName: {get_param: KeyName}
Flavor: {get_param: OvercloudBlockStorageFlavor}
- VirtualIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
+ VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]}
GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
RabbitPassword: {get_param: RabbitPassword}
RabbitUserName: {get_param: RabbitUserName}
@@ -1172,7 +1172,7 @@ resources:
PortName: storage_management_virtual_ip
VipMap:
- type: OS::TripleO::Network::Ports::NetIpMap
+ type: OS::TripleO::Network::Ports::NetVipMap
properties:
ControlPlaneIp: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
ExternalIp: {get_attr: [PublicVirtualIP, ip_address]}
@@ -1207,11 +1207,11 @@ resources:
mysql_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
rabbit_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
# direct configuration of Virtual IPs for each network
- control_virtual_ip: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
- public_virtual_ip: {get_attr: [PublicVirtualIP, ip_address]}
- internal_api_virtual_ip: {get_attr: [InternalApiVirtualIP, ip_address]}
- storage_virtual_ip: {get_attr: [StorageVirtualIP, ip_address]}
- storage_mgmt_virtual_ip: {get_attr: [StorageMgmtVirtualIP, ip_address]}
+ control_virtual_ip: {get_attr: [VipMap, net_ip_map, ctlplane]}
+ public_virtual_ip: {get_attr: [VipMap, net_ip_map, external]}
+ internal_api_virtual_ip: {get_attr: [VipMap, net_ip_map, internal_api]}
+ storage_virtual_ip: {get_attr: [VipMap, net_ip_map, storage]}
+ storage_mgmt_virtual_ip: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
ControllerBootstrapNodeConfig:
type: OS::TripleO::BootstrapNode::SoftwareConfig
@@ -1445,7 +1445,7 @@ outputs:
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
PublicVip:
description: Controller VIP for public API endpoints
- value: {get_attr: [PublicVirtualIP, ip_address]}
+ value: {get_attr: [VipMap, net_ip_map, external]}
CeilometerInternalVip:
description: VIP for Ceilometer API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index 9792e7dc..861b29a8 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -106,6 +106,10 @@ parameters:
default: true
description: Whether to use Galera instead of regular MariaDB.
type: boolean
+ EnableLoadBalancer:
+ default: true
+ description: Whether to deploy a LoadBalancer on the Controller
+ type: boolean
EnableCephStorage:
default: false
description: Whether to deploy Ceph Storage (OSD) on the Controller
@@ -835,6 +839,7 @@ resources:
keystone_ec2_uri: { get_param: [EndpointMap, KeystoneEC2, uri] }
enable_fencing: {get_param: EnableFencing}
enable_galera: {get_param: EnableGalera}
+ enable_load_balancer: {get_param: EnableLoadBalancer}
enable_ceph_storage: {get_param: EnableCephStorage}
enable_swift_storage: {get_param: EnableSwiftStorage}
manage_firewall: {get_param: ManageFirewall}
@@ -1054,6 +1059,7 @@ resources:
# Pacemaker
enable_fencing: {get_input: enable_fencing}
+ enable_load_balancer: {get_input: enable_load_balancer}
hacluster_pwd: {get_input: pcsd_password}
tripleo::fencing::config: {get_input: fencing_config}
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index f758c55a..afbdd19b 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -16,15 +16,19 @@
include ::tripleo::packages
include ::tripleo::firewall
+$enable_load_balancer = hiera('enable_load_balancer', true)
+
if hiera('step') >= 1 {
create_resources(sysctl::value, hiera('sysctl_settings'), {})
$controller_node_ips = split(hiera('controller_node_ips'), ',')
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- manage_vip => true,
+ if $enable_load_balancer {
+ class { '::tripleo::loadbalancer' :
+ controller_hosts => $controller_node_ips,
+ manage_vip => true,
+ }
}
}
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 95b7992c..58b5c80b 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -30,6 +30,7 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
}
$enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
+$enable_load_balancer = hiera('enable_load_balancer', true)
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
@@ -46,12 +47,14 @@ if hiera('step') >= 1 {
$controller_node_ips = split(hiera('controller_node_ips'), ',')
$controller_node_names = split(downcase(hiera('controller_node_names')), ',')
- class { '::tripleo::loadbalancer' :
- controller_hosts => $controller_node_ips,
- controller_hosts_names => $controller_node_names,
- manage_vip => false,
- mysql_clustercheck => true,
- haproxy_service_manage => false,
+ if $enable_load_balancer {
+ class { '::tripleo::loadbalancer' :
+ controller_hosts => $controller_node_ips,
+ controller_hosts_names => $controller_node_names,
+ manage_vip => false,
+ mysql_clustercheck => true,
+ haproxy_service_manage => false,
+ }
}
$pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
@@ -179,156 +182,160 @@ if hiera('step') >= 2 {
if $pacemaker_master {
- include ::pacemaker::resource_defaults
+ if $enable_load_balancer {
- # FIXME: we should not have to access tripleo::loadbalancer class
- # parameters here to configure pacemaker VIPs. The configuration
- # of pacemaker VIPs could move into puppet-tripleo or we should
- # make use of less specific hiera parameters here for the settings.
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
+ include ::pacemaker::resource_defaults
- $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
- pacemaker::resource::ip { 'control_vip':
- ip_address => $control_vip,
- }
- pacemaker::constraint::base { 'control_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${control_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
- pacemaker::constraint::colocation { 'control_vip-with-haproxy':
- source => "ip-${control_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['control_vip']],
- }
-
- $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- if $public_vip and $public_vip != $control_vip {
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
- }
- pacemaker::constraint::base { 'public_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${public_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
- }
- pacemaker::constraint::colocation { 'public_vip-with-haproxy':
- source => "ip-${public_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['public_vip']],
+ # FIXME: we should not have to access tripleo::loadbalancer class
+ # parameters here to configure pacemaker VIPs. The configuration
+ # of pacemaker VIPs could move into puppet-tripleo or we should
+ # make use of less specific hiera parameters here for the settings.
+ pacemaker::resource::service { 'haproxy':
+ clone_params => true,
}
- }
- $redis_vip = hiera('redis_vip')
- if $redis_vip and $redis_vip != $control_vip {
- pacemaker::resource::ip { 'redis_vip':
- ip_address => $redis_vip,
+ $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
+ pacemaker::resource::ip { 'control_vip':
+ ip_address => $control_vip,
}
- pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ pacemaker::constraint::base { 'control_vip-then-haproxy':
constraint_type => 'order',
- first_resource => "ip-${redis_vip}",
+ first_resource => "ip-${control_vip}",
second_resource => 'haproxy-clone',
first_action => 'start',
second_action => 'start',
constraint_params => 'kind=Optional',
require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
+ Pacemaker::Resource::Ip['control_vip']],
}
- pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
- source => "ip-${redis_vip}",
+ pacemaker::constraint::colocation { 'control_vip-with-haproxy':
+ source => "ip-${control_vip}",
target => 'haproxy-clone',
score => 'INFINITY',
require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['redis_vip']],
+ Pacemaker::Resource::Ip['control_vip']],
}
- }
- $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
- if $internal_api_vip and $internal_api_vip != $control_vip {
- pacemaker::resource::ip { 'internal_api_vip':
- ip_address => $internal_api_vip,
- }
- pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${internal_api_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
- }
- pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
- source => "ip-${internal_api_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['internal_api_vip']],
+ $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
+ if $public_vip and $public_vip != $control_vip {
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
+ pacemaker::constraint::base { 'public_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${public_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ pacemaker::constraint::colocation { 'public_vip-with-haproxy':
+ source => "ip-${public_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
}
- }
- $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
- if $storage_vip and $storage_vip != $control_vip {
- pacemaker::resource::ip { 'storage_vip':
- ip_address => $storage_vip,
- }
- pacemaker::constraint::base { 'storage_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
- }
- pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
- source => "ip-${storage_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_vip']],
+ $redis_vip = hiera('redis_vip')
+ if $redis_vip and $redis_vip != $control_vip {
+ pacemaker::resource::ip { 'redis_vip':
+ ip_address => $redis_vip,
+ }
+ pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${redis_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
+ pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
+ source => "ip-${redis_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
}
- }
- $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
- if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
- pacemaker::resource::ip { 'storage_mgmt_vip':
- ip_address => $storage_mgmt_vip,
+ $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
+ if $internal_api_vip and $internal_api_vip != $control_vip {
+ pacemaker::resource::ip { 'internal_api_vip':
+ ip_address => $internal_api_vip,
+ }
+ pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${internal_api_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
+ pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
+ source => "ip-${internal_api_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
}
- pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
- constraint_type => 'order',
- first_resource => "ip-${storage_mgmt_vip}",
- second_resource => 'haproxy-clone',
- first_action => 'start',
- second_action => 'start',
- constraint_params => 'kind=Optional',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
+
+ $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
+ if $storage_vip and $storage_vip != $control_vip {
+ pacemaker::resource::ip { 'storage_vip':
+ ip_address => $storage_vip,
+ }
+ pacemaker::constraint::base { 'storage_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
+ source => "ip-${storage_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
}
- pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
- source => "ip-${storage_mgmt_vip}",
- target => 'haproxy-clone',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Ip['storage_mgmt_vip']],
+
+ $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
+ if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
+ pacemaker::resource::ip { 'storage_mgmt_vip':
+ ip_address => $storage_mgmt_vip,
+ }
+ pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_mgmt_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
+ source => "ip-${storage_mgmt_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
}
+
}
pacemaker::resource::service { $::memcached::params::service_name :
@@ -924,15 +931,16 @@ if hiera('step') >= 4 {
File['/etc/keystone/ssl/private/signing_key.pem'],
File['/etc/keystone/ssl/certs/signing_cert.pem']],
}
-
- pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
- constraint_type => 'order',
- first_resource => 'haproxy-clone',
- second_resource => "${::keystone::params::service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service['haproxy'],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ if $enable_load_balancer {
+ pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
+ constraint_type => 'order',
+ first_resource => 'haproxy-clone',
+ second_resource => "${::keystone::params::service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
}
pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
constraint_type => 'order',