aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ceph-storage-post.yaml3
-rw-r--r--ceph-storage.yaml3
-rw-r--r--cinder-storage-post.yaml3
-rw-r--r--cinder-storage.yaml3
-rw-r--r--compute-post.yaml3
-rw-r--r--compute.yaml3
-rw-r--r--controller-post.yaml3
-rw-r--r--controller.yaml39
-rw-r--r--environments/network-isolation.yaml3
-rw-r--r--network/config/bond-with-vlans/ceph-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/cinder-storage.yaml10
-rw-r--r--network/config/bond-with-vlans/compute.yaml10
-rw-r--r--network/config/bond-with-vlans/controller.yaml22
-rw-r--r--network/config/bond-with-vlans/swift-storage.yaml10
-rw-r--r--network/config/single-nic-vlans/controller.yaml8
-rw-r--r--network/ports/vip.yaml4
-rw-r--r--overcloud-without-mergepy.yaml44
-rw-r--r--puppet/all-nodes-config.yaml2
-rw-r--r--puppet/ceph-storage-post-puppet.yaml6
-rw-r--r--puppet/ceph-storage-puppet.yaml4
-rw-r--r--puppet/cinder-storage-post.yaml3
-rw-r--r--puppet/cinder-storage-puppet.yaml3
-rw-r--r--puppet/compute-post-puppet.yaml6
-rw-r--r--puppet/compute-puppet.yaml6
-rw-r--r--puppet/controller-post-puppet.yaml20
-rw-r--r--puppet/controller-puppet.yaml43
-rw-r--r--puppet/hieradata/RedHat.yaml7
-rw-r--r--puppet/hieradata/controller.yaml7
-rw-r--r--puppet/manifests/overcloud_compute.pp2
-rw-r--r--puppet/manifests/overcloud_controller.pp1
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp204
-rw-r--r--puppet/swift-storage-post.yaml8
-rw-r--r--puppet/swift-storage-puppet.yaml4
-rw-r--r--swift-storage-post.yaml3
-rw-r--r--swift-storage.yaml3
35 files changed, 406 insertions, 107 deletions
diff --git a/ceph-storage-post.yaml b/ceph-storage-post.yaml
index 06c2a291..734f90bd 100644
--- a/ceph-storage-post.yaml
+++ b/ceph-storage-post.yaml
@@ -6,6 +6,9 @@ description: 'Ceph Storage Post Deployment'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/ceph-storage.yaml b/ceph-storage.yaml
index 2b44642b..15092bae 100644
--- a/ceph-storage.yaml
+++ b/ceph-storage.yaml
@@ -152,3 +152,6 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: "None - NO_SIGNAL"
diff --git a/cinder-storage-post.yaml b/cinder-storage-post.yaml
index 8943c502..ad4e0460 100644
--- a/cinder-storage-post.yaml
+++ b/cinder-storage-post.yaml
@@ -6,6 +6,9 @@ description: 'Common Block Storage Post Deployment'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/cinder-storage.yaml b/cinder-storage.yaml
index 2b59607e..be088d66 100644
--- a/cinder-storage.yaml
+++ b/cinder-storage.yaml
@@ -222,3 +222,6 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: "None - NO_SIGNAL"
diff --git a/compute-post.yaml b/compute-post.yaml
index e568a5a6..695690d4 100644
--- a/compute-post.yaml
+++ b/compute-post.yaml
@@ -6,6 +6,9 @@ description: 'Compute Post Deployment'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/compute.yaml b/compute.yaml
index 408e0f31..4a9a92b9 100644
--- a/compute.yaml
+++ b/compute.yaml
@@ -477,3 +477,6 @@ outputs:
description: Heat resource handle for the Nova compute server
value:
{get_resource: NovaCompute}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: "None - NO_SIGNAL"
diff --git a/controller-post.yaml b/controller-post.yaml
index 926adeae..aac96357 100644
--- a/controller-post.yaml
+++ b/controller-post.yaml
@@ -6,6 +6,9 @@ description: 'Controller Post Deployment'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/controller.yaml b/controller.yaml
index 36bd77bc..5596591f 100644
--- a/controller.yaml
+++ b/controller.yaml
@@ -67,6 +67,10 @@ parameters:
default: ''
description: Set to True to enable debugging on all services.
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
EnableGalera:
default: true
description: Whether to use Galera instead of regular MariaDB.
@@ -117,6 +121,38 @@ parameters:
}
}
type: json
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
Flavor:
description: Flavor for control nodes to request when deploying.
type: string
@@ -1046,3 +1082,6 @@ outputs:
template: "IP:11211"
params:
IP: {get_attr: [Controller, networks, ctlplane, 0]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: "None - NO_SIGNAL"
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
index 5eb2b2da..937931d1 100644
--- a/environments/network-isolation.yaml
+++ b/environments/network-isolation.yaml
@@ -33,3 +33,6 @@ resource_registry:
OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+ # Port assignments for service virtual IPs for the controller role
+ OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
diff --git a/network/config/bond-with-vlans/ceph-storage.yaml b/network/config/bond-with-vlans/ceph-storage.yaml
index ed9fff7a..cd70cbef 100644
--- a/network/config/bond-with-vlans/ceph-storage.yaml
+++ b/network/config/bond-with-vlans/ceph-storage.yaml
@@ -50,16 +50,6 @@ resources:
network_config:
-
type: ovs_bridge
- name: br-storage
- use_dhcp: true
- members:
- -
- type: interface
- name: nic1
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
name: br-bond
members:
-
diff --git a/network/config/bond-with-vlans/cinder-storage.yaml b/network/config/bond-with-vlans/cinder-storage.yaml
index 95733fca..866112cb 100644
--- a/network/config/bond-with-vlans/cinder-storage.yaml
+++ b/network/config/bond-with-vlans/cinder-storage.yaml
@@ -53,16 +53,6 @@ resources:
network_config:
-
type: ovs_bridge
- name: br-storage
- use_dhcp: true
- members:
- -
- type: interface
- name: nic1
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
name: br-bond
members:
-
diff --git a/network/config/bond-with-vlans/compute.yaml b/network/config/bond-with-vlans/compute.yaml
index 709b3b5b..5105ee14 100644
--- a/network/config/bond-with-vlans/compute.yaml
+++ b/network/config/bond-with-vlans/compute.yaml
@@ -53,16 +53,6 @@ resources:
network_config:
-
type: ovs_bridge
- name: {get_input: bridge_name}
- use_dhcp: true
- members:
- -
- type: interface
- name: nic1
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
name: br-bond
members:
-
diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml
index 16a0dec2..9d6a6810 100644
--- a/network/config/bond-with-vlans/controller.yaml
+++ b/network/config/bond-with-vlans/controller.yaml
@@ -50,6 +50,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
resources:
OsNetConfigImpl:
@@ -62,16 +66,6 @@ resources:
-
type: ovs_bridge
name: {get_input: bridge_name}
- use_dhcp: true
- members:
- -
- type: interface
- name: nic1
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
- name: br-bond
members:
-
type: ovs_bond
@@ -90,8 +84,12 @@ resources:
device: bond1
vlan_id: {get_param: ExternalNetworkVlanID}
addresses:
- -
- ip_netmask: {get_param: ExternalIpSubnet}
+ -
+ ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ ip_netmask: 0.0.0.0/0
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
type: vlan
device: bond1
diff --git a/network/config/bond-with-vlans/swift-storage.yaml b/network/config/bond-with-vlans/swift-storage.yaml
index 6549ce7a..f31ed0e7 100644
--- a/network/config/bond-with-vlans/swift-storage.yaml
+++ b/network/config/bond-with-vlans/swift-storage.yaml
@@ -53,16 +53,6 @@ resources:
network_config:
-
type: ovs_bridge
- name: br-storage
- use_dhcp: true
- members:
- -
- type: interface
- name: nic1
- # force the MAC address of the bridge to this interface
- primary: true
- -
- type: ovs_bridge
name: br-bond
members:
-
diff --git a/network/config/single-nic-vlans/controller.yaml b/network/config/single-nic-vlans/controller.yaml
index ca0cbcad..4cfa1317 100644
--- a/network/config/single-nic-vlans/controller.yaml
+++ b/network/config/single-nic-vlans/controller.yaml
@@ -45,6 +45,10 @@ parameters:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
+ ExternalInterfaceDefaultRoute:
+ default: '10.0.0.1'
+ description: default route for the external network
+ type: string
resources:
OsNetConfigImpl:
@@ -70,6 +74,10 @@ resources:
addresses:
-
ip_netmask: {get_param: ExternalIpSubnet}
+ routes:
+ -
+ ip_netmask: 0.0.0.0/0
+ next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
type: vlan
vlan_id: {get_param: InternalApiNetworkVlanID}
diff --git a/network/ports/vip.yaml b/network/ports/vip.yaml
index b957e132..ab72083d 100644
--- a/network/ports/vip.yaml
+++ b/network/ports/vip.yaml
@@ -15,6 +15,10 @@ parameters:
ControlPlaneIP: # Here for compatability with noop.yaml
description: IP address on the control plane
type: string
+ ControlPlaneNetwork:
+ description: The name of the undercloud Neutron control plane
+ default: ctlplane
+ type: string
resources:
diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
index e13b9acb..14e2ac24 100644
--- a/overcloud-without-mergepy.yaml
+++ b/overcloud-without-mergepy.yaml
@@ -257,6 +257,10 @@ parameters:
default: 'br-ex'
description: Interface where virtual ip will be assigned.
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
EnableGalera:
default: true
description: Whether to use Galera instead of regular MariaDB.
@@ -307,6 +311,38 @@ parameters:
}
}
type: json
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
GlanceLogFile:
description: The filepath of the file to use for logging messages from Glance.
type: string
@@ -624,10 +660,12 @@ resources:
ControlVirtualInterface: {get_param: ControlVirtualInterface}
ControllerExtraConfig: {get_param: controllerExtraConfig}
Debug: {get_param: Debug}
+ EnableFencing: {get_param: EnableFencing}
EnableGalera: {get_param: EnableGalera}
EnableCephStorage: {get_param: ControllerEnableCephStorage}
EnableSwiftStorage: {get_param: ControllerEnableSwiftStorage}
ExtraConfig: {get_param: ExtraConfig}
+ FencingConfig: {get_param: FencingConfig}
Flavor: {get_param: OvercloudControlFlavor}
GlancePort: {get_param: GlancePort}
GlanceProtocol: {get_param: GlanceProtocol}
@@ -1110,30 +1148,36 @@ resources:
depends_on: [ControllerBootstrapNodeDeployment, ControllerAllNodesDeployment, ControllerSwiftDeployment, ControllerCephDeployment]
properties:
servers: {get_attr: [Controller, attributes, nova_server_resource]}
+ NodeConfigIdentifiers: {get_attr: [Controller, attributes, config_identifier]}
ComputeNodesPostDeployment:
type: OS::TripleO::ComputePostDeployment
depends_on: [ComputeAllNodesDeployment, ComputeCephDeployment]
properties:
servers: {get_attr: [Compute, attributes, nova_server_resource]}
+ NodeConfigIdentifiers: {get_attr: [Compute, attributes, config_identifier]}
ObjectStorageNodesPostDeployment:
type: OS::TripleO::ObjectStoragePostDeployment
depends_on: [ObjectStorageSwiftDeployment, ObjectStorageAllNodesDeployment]
properties:
servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers: {get_attr: [ObjectStorage, attributes, config_identifier]}
+
BlockStorageNodesPostDeployment:
type: OS::TripleO::BlockStoragePostDeployment
depends_on: [ControllerNodesPostDeployment, BlockStorageAllNodesDeployment]
properties:
servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers: {get_attr: [BlockStorage, attributes, config_identifier]}
CephStorageNodesPostDeployment:
type: OS::TripleO::CephStoragePostDeployment
depends_on: [ControllerNodesPostDeployment, CephStorageCephDeployment, CephStorageAllNodesDeployment]
properties:
servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+ NodeConfigIdentifiers: {get_attr: [CephStorage, attributes, config_identifier]}
outputs:
KeystoneURL:
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index c50d6820..060f4c81 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -78,6 +78,8 @@ resources:
- {get_param: ceph_storage_hosts}
hiera:
datafiles:
+ RedHat:
+ raw_data: {get_file: hieradata/RedHat.yaml}
all_nodes:
mapped_data:
controller_node_ips:
diff --git a/puppet/ceph-storage-post-puppet.yaml b/puppet/ceph-storage-post-puppet.yaml
index eb62ba1e..1b5b944d 100644
--- a/puppet/ceph-storage-post-puppet.yaml
+++ b/puppet/ceph-storage-post-puppet.yaml
@@ -6,6 +6,10 @@ description: >
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
+
resources:
CephStoragePuppetConfig:
@@ -22,6 +26,8 @@ resources:
properties:
servers: {get_param: servers}
config: {get_resource: CephStoragePuppetConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/ceph-storage-puppet.yaml b/puppet/ceph-storage-puppet.yaml
index 2250f429..245d8ebb 100644
--- a/puppet/ceph-storage-puppet.yaml
+++ b/puppet/ceph-storage-puppet.yaml
@@ -157,3 +157,7 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: {get_attr: [CephStorageDeployment, deploy_stdout]}
+
diff --git a/puppet/cinder-storage-post.yaml b/puppet/cinder-storage-post.yaml
index f17a1567..24d2b8a3 100644
--- a/puppet/cinder-storage-post.yaml
+++ b/puppet/cinder-storage-post.yaml
@@ -4,6 +4,9 @@ description: 'OpenStack cinder storage post deployment for Puppet'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/puppet/cinder-storage-puppet.yaml b/puppet/cinder-storage-puppet.yaml
index a368ffd1..cc8d17c4 100644
--- a/puppet/cinder-storage-puppet.yaml
+++ b/puppet/cinder-storage-puppet.yaml
@@ -295,3 +295,6 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: {get_attr: [BlockStorageDeployment, deploy_stdout]}
diff --git a/puppet/compute-post-puppet.yaml b/puppet/compute-post-puppet.yaml
index ca5eb649..b4a6126b 100644
--- a/puppet/compute-post-puppet.yaml
+++ b/puppet/compute-post-puppet.yaml
@@ -6,6 +6,10 @@ description: >
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
+
resources:
@@ -23,6 +27,8 @@ resources:
properties:
servers: {get_param: servers}
config: {get_resource: ComputePuppetConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/compute-puppet.yaml b/puppet/compute-puppet.yaml
index b34e7a6f..afe85d18 100644
--- a/puppet/compute-puppet.yaml
+++ b/puppet/compute-puppet.yaml
@@ -351,7 +351,7 @@ resources:
nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type}
nova_api_host: {get_input: nova_api_host}
nova::compute::vncproxy_host: {get_input: nova_public_ip}
- nova_enable_rbd_backend: {get_input: nova_enable_rbd_backend}
+ nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend}
nova_password: {get_input: nova_password}
nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address}
ceilometer::debug: {get_input: debug}
@@ -516,3 +516,7 @@ outputs:
description: Heat resource handle for the Nova compute server
value:
{get_resource: NovaCompute}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: {get_attr: [NovaComputeDeployment, deploy_stdout]}
+
diff --git a/puppet/controller-post-puppet.yaml b/puppet/controller-post-puppet.yaml
index 373daba2..49cbe1e2 100644
--- a/puppet/controller-post-puppet.yaml
+++ b/puppet/controller-post-puppet.yaml
@@ -6,6 +6,10 @@ description: >
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
+
resources:
@@ -23,6 +27,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 1
+ update_identifier: {get_param: NodeConfigIdentifiers}
actions: ['CREATE'] # no need for two passes on an UPDATE
ControllerServicesBaseDeployment_Step2:
@@ -33,6 +38,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 2
+ update_identifier: {get_param: NodeConfigIdentifiers}
actions: ['CREATE'] # no need for two passes on an UPDATE
ControllerRingbuilderPuppetConfig:
@@ -54,6 +60,8 @@ resources:
properties:
servers: {get_param: servers}
config: {get_resource: ControllerRingbuilderPuppetConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
ControllerOvercloudServicesDeployment_Step4:
type: OS::Heat::StructuredDeployments
@@ -63,6 +71,7 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 3
+ update_identifier: {get_param: NodeConfigIdentifiers}
ControllerOvercloudServicesDeployment_Step5:
type: OS::Heat::StructuredDeployments
@@ -72,6 +81,17 @@ resources:
config: {get_resource: ControllerPuppetConfig}
input_values:
step: 4
+ update_identifier: {get_param: NodeConfigIdentifiers}
+
+ ControllerOvercloudServicesDeployment_Step6:
+ type: OS::Heat::StructuredDeployments
+ depends_on: ControllerOvercloudServicesDeployment_Step5
+ properties:
+ servers: {get_param: servers}
+ config: {get_resource: ControllerPuppetConfig}
+ input_values:
+ step: 5
+ update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/controller-puppet.yaml b/puppet/controller-puppet.yaml
index 3d7ecd58..5a356bcc 100644
--- a/puppet/controller-puppet.yaml
+++ b/puppet/controller-puppet.yaml
@@ -72,6 +72,10 @@ parameters:
default: ''
description: Set to True to enable debugging on all services.
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
EnableGalera:
default: true
description: Whether to use Galera instead of regular MariaDB.
@@ -122,6 +126,38 @@ parameters:
}
}
type: json
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
Flavor:
description: Flavor for control nodes to request when deploying.
type: string
@@ -634,6 +670,7 @@ resources:
- - 'http://'
- {get_param: KeystonePublicApiVirtualIP}
- ':5000/v2.0/'
+ enable_fencing: {get_param: EnableFencing}
enable_galera: {get_param: EnableGalera}
enable_ceph_storage: {get_param: EnableCephStorage}
enable_swift_storage: {get_param: EnableSwiftStorage}
@@ -714,6 +751,7 @@ resources:
- '@'
- {get_param: MysqlVirtualIP}
- '/nova'
+ fencing_config: {get_param: FencingConfig}
pcsd_password: {get_param: PcsdPassword}
rabbit_username: {get_param: RabbitUserName}
rabbit_password: {get_param: RabbitPassword}
@@ -803,7 +841,9 @@ resources:
bootstack_nodeid: {get_input: bootstack_nodeid}
# Pacemaker
+ enable_fencing: {get_input: enable_fencing}
hacluster_pwd: {get_input: pcsd_password}
+ tripleo::fencing::config: {get_input: fencing_config}
# Swift
swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network}
@@ -1070,3 +1110,6 @@ outputs:
template: "IP:11211"
params:
IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
+ config_identifier:
+ description: identifier which changes if the controller configuration may need re-applying
+ value: {get_attr: [ControllerDeployment, deploy_stdout]}
diff --git a/puppet/hieradata/RedHat.yaml b/puppet/hieradata/RedHat.yaml
index f42c7159..25902828 100644
--- a/puppet/hieradata/RedHat.yaml
+++ b/puppet/hieradata/RedHat.yaml
@@ -1,2 +1,9 @@
# RedHat specific overrides go here
rabbitmq::package_provider: 'yum'
+
+# The Galera package should work in cluster and
+# non-cluster modes based on the config file.
+# We set the package name here explicitly so
+# that it matches what we pre-install
+# in tripleo-puppet-elements.
+mysql::server::package_name: 'mariadb-galera-server'
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index d0cbe890..3de9bd91 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -48,7 +48,6 @@ swift::proxy::pipeline:
- 'tempurl'
- 'formpost'
- 'staticweb'
- - 'ceilometer'
- 'authtoken'
- 'keystone'
- 'proxy-logging'
@@ -90,13 +89,9 @@ pacemaker::corosync::manage_fw: false
horizon::allowed_hosts: '*'
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
-
+# mysql
mysql::server::manage_config_file: true
-mysql::server::package_name: mariadb-galera-server
-
-tripleo::loadbalancer::galera_master_ip: "%{hiera('bootstrap_nodeid_ip')}"
-tripleo::loadbalancer::galera_master_hostname: "%{hiera('bootstrap_nodeid')}"
tripleo::loadbalancer::keystone_admin: true
tripleo::loadbalancer::keystone_public: true
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
index 00bab7f6..cc26f851 100644
--- a/puppet/manifests/overcloud_compute.pp
+++ b/puppet/manifests/overcloud_compute.pp
@@ -51,7 +51,7 @@ nova_config {
'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver';
}
-$nova_enable_rbd_backend = hiera('nova_enable_rbd_backend', false)
+$nova_enable_rbd_backend = hiera('nova::compute::rbd::ephemeral_storage', false)
if $nova_enable_rbd_backend {
include ::ceph::profile::client
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 7312c774..896545c3 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -366,7 +366,6 @@ if hiera('step') >= 3 {
include ::swift::proxy::keystone
include ::swift::proxy::authtoken
include ::swift::proxy::staticweb
- include ::swift::proxy::ceilometer
include ::swift::proxy::ratelimit
include ::swift::proxy::catch_errors
include ::swift::proxy::tempurl
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index 3c986e2f..3b5b8fe4 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -37,6 +37,8 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) {
$sync_db = false
}
+$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
+
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
# (occurences of this variable will be gradually replaced with false)
@@ -56,6 +58,7 @@ if hiera('step') >= 1 {
controller_hosts => $controller_node_ips,
controller_hosts_names => $controller_node_names,
manage_vip => false,
+ mysql_clustercheck => true,
haproxy_service_manage => false,
}
@@ -71,7 +74,13 @@ if hiera('step') >= 1 {
setup_cluster => $pacemaker_master,
}
class { '::pacemaker::stonith':
- disable => true,
+ disable => !$enable_fencing,
+ }
+ if $enable_fencing {
+ include tripleo::fencing
+
+ # enable stonith after all fencing devices have been created
+ Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
# Only configure RabbitMQ in this step, don't start it yet to
@@ -156,6 +165,7 @@ if hiera('step') >= 1 {
config_file => $mysql_config_file,
override_options => $mysqld_options,
service_manage => false,
+ service_enabled => false,
}
}
@@ -173,13 +183,78 @@ if hiera('step') >= 2 {
# parameters here to configure pacemaker VIPs. The configuration
# of pacemaker VIPs could move into puppet-tripleo or we should
# make use of less specific hiera parameters here for the settings.
+ pacemaker::resource::service { 'haproxy':
+ clone_params => true,
+ }
+
$control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
pacemaker::resource::ip { 'control_vip':
ip_address => $control_vip,
}
+ pacemaker::constraint::base { 'control_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${control_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['control_vip']],
+ }
+ pacemaker::constraint::colocation { 'control_vip-with-haproxy':
+ source => "ip-${control_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['control_vip']],
+ }
+
$public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
+ if $public_vip and $public_vip != $control_vip {
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
+ pacemaker::constraint::base { 'public_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${public_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ pacemaker::constraint::colocation { 'public_vip-with-haproxy':
+ source => "ip-${public_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ }
+
+ $redis_vip = hiera('redis_vip')
+ if $redis_vip and $redis_vip != $control_vip {
+ pacemaker::resource::ip { 'redis_vip':
+ ip_address => $redis_vip,
+ }
+ pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${redis_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
+ pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
+ source => "ip-${redis_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
}
$internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
@@ -187,6 +262,23 @@ if hiera('step') >= 2 {
pacemaker::resource::ip { 'internal_api_vip':
ip_address => $internal_api_vip,
}
+ pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${internal_api_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
+ pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
+ source => "ip-${internal_api_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
}
$storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
@@ -194,6 +286,23 @@ if hiera('step') >= 2 {
pacemaker::resource::ip { 'storage_vip':
ip_address => $storage_vip,
}
+ pacemaker::constraint::base { 'storage_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
+ source => "ip-${storage_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
}
$storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
@@ -201,11 +310,25 @@ if hiera('step') >= 2 {
pacemaker::resource::ip { 'storage_mgmt_vip':
ip_address => $storage_mgmt_vip,
}
+ pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_mgmt_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
+ source => "ip-${storage_mgmt_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
}
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
pacemaker::resource::service { $::memcached::params::service_name :
clone_params => true,
require => Class['::memcached'],
@@ -227,6 +350,7 @@ if hiera('step') >= 2 {
# NOTE (spredzy) : The replset can only be run
# once all the nodes have joined the cluster.
mongodb_conn_validator { $mongo_node_ips_with_port :
+ timeout => '600',
require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
before => Mongodb_replset[$mongodb_replset],
}
@@ -252,28 +376,6 @@ if hiera('step') >= 2 {
resource_params => 'wait_last_known_master=true',
require => Class['::redis'],
}
- $redis_vip = hiera('redis_vip')
- if $redis_vip and $redis_vip != $control_vip {
- pacemaker::resource::ip { 'vip-redis':
- ip_address => $redis_vip,
- }
- }
- pacemaker::constraint::base { 'redis-master-then-vip-redis':
- constraint_type => 'order',
- first_resource => 'redis-master',
- second_resource => "ip-${redis_vip}",
- first_action => 'promote',
- second_action => 'start',
- require => [Pacemaker::Resource::Ocf['redis'],
- Pacemaker::Resource::Ip['vip-redis']],
- }
- pacemaker::constraint::colocation { 'vip-redis-with-redis-master':
- source => "ip-${redis_vip}",
- target => 'redis-master',
- score => 'INFINITY',
- require => [Pacemaker::Resource::Ocf['redis'],
- Pacemaker::Resource::Ip['vip-redis']],
- }
}
@@ -841,6 +943,16 @@ if hiera('step') >= 4 {
}
# Neutron
+ # NOTE(gfidente): Neutron will try to populate the database with some data
+ # as soon as neutron-server is started; to avoid races we want to make this
+ # happen only on one node, before normal Pacemaker initialization
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+ exec { 'neutron-server-start-wait-stop' :
+ command => "systemctl start neutron-server && \
+ sleep 5s && \
+ systemctl stop neutron-server",
+ path => ["/usr/bin", "/usr/sbin"],
+ } ->
pacemaker::resource::service { $::neutron::params::server_service:
op_params => "start timeout=90",
clone_params => "interleave=true",
@@ -1111,6 +1223,15 @@ if hiera('step') >= 4 {
clone_params => 'interleave=true',
resource_params => 'startdelay=10',
}
+ pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
constraint_type => 'order',
first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
@@ -1211,24 +1332,6 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::mongodb::params::service_name]],
}
}
- pacemaker::constraint::base { 'vip-redis-then-ceilometer-central':
- constraint_type => 'order',
- first_resource => "ip-${redis_vip}",
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Ip['vip-redis']],
- }
- pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
- }
# Heat
pacemaker::resource::service { $::heat::params::api_service_name :
@@ -1243,6 +1346,15 @@ if hiera('step') >= 4 {
pacemaker::resource::service { $::heat::params::engine_service_name :
clone_params => 'interleave=true',
}
+ pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::heat::params::api_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
constraint_type => 'order',
first_resource => "${::heat::params::api_service_name}-clone",
diff --git a/puppet/swift-storage-post.yaml b/puppet/swift-storage-post.yaml
index 7a3c1abb..ee50c86a 100644
--- a/puppet/swift-storage-post.yaml
+++ b/puppet/swift-storage-post.yaml
@@ -4,6 +4,10 @@ description: 'OpenStack swift storage node post deployment for Puppet'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
+
resources:
@@ -21,6 +25,8 @@ resources:
properties:
servers: {get_param: servers}
config: {get_resource: StoragePuppetConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
StorageRingbuilderPuppetConfig:
type: OS::Heat::SoftwareConfig
@@ -37,6 +43,8 @@ resources:
properties:
servers: {get_param: servers}
config: {get_resource: StorageRingbuilderPuppetConfig}
+ input_values:
+ update_identifier: {get_param: NodeConfigIdentifiers}
# Note, this should come last, so use depends_on to ensure
# this is created after any other resources.
diff --git a/puppet/swift-storage-puppet.yaml b/puppet/swift-storage-puppet.yaml
index 15481032..82922a87 100644
--- a/puppet/swift-storage-puppet.yaml
+++ b/puppet/swift-storage-puppet.yaml
@@ -216,3 +216,7 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]}
+
diff --git a/swift-storage-post.yaml b/swift-storage-post.yaml
index 53490d71..1b1c406d 100644
--- a/swift-storage-post.yaml
+++ b/swift-storage-post.yaml
@@ -6,6 +6,9 @@ description: 'Swift Storage Post Deployment'
parameters:
servers:
type: json
+ NodeConfigIdentifiers:
+ type: json
+ description: Value which changes if the node configuration may need to be re-applied
resources:
diff --git a/swift-storage.yaml b/swift-storage.yaml
index 466f2fbb..e4cacf3c 100644
--- a/swift-storage.yaml
+++ b/swift-storage.yaml
@@ -198,3 +198,6 @@ outputs:
storage_mgmt_ip_address:
description: IP address of the server in the storage_mgmt network
value: {get_attr: [StorageMgmtPort, ip_address]}
+ config_identifier:
+ description: identifier which changes if the node configuration may need re-applying
+ value: "None - NO_SIGNAL"