diff options
Diffstat (limited to 'puppet')
21 files changed, 442 insertions, 68 deletions
diff --git a/puppet/ceph-storage-puppet.yaml b/puppet/ceph-storage-puppet.yaml index 245d8ebb..2d089419 100644 --- a/puppet/ceph-storage-puppet.yaml +++ b/puppet/ceph-storage-puppet.yaml @@ -76,6 +76,12 @@ resources: StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]} StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]} + NetIpMap: + type: OS::TripleO::Network::Ports::NetIpMap + properties: + StorageIp: {get_attr: [StoragePort, ip_address]} + StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]} + NetIpSubnetMap: type: OS::TripleO::Network::Ports::NetIpMap properties: @@ -145,7 +151,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [CephStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]} HOST: {get_attr: [CephStorage, name]} nova_server_resource: description: Heat resource handle for the ceph storage server @@ -160,4 +166,3 @@ outputs: config_identifier: description: identifier which changes if the node configuration may need re-applying value: {get_attr: [CephStorageDeployment, deploy_stdout]} - diff --git a/puppet/cinder-storage-puppet.yaml b/puppet/cinder-storage-puppet.yaml index cc8d17c4..94a0a5c4 100644 --- a/puppet/cinder-storage-puppet.yaml +++ b/puppet/cinder-storage-puppet.yaml @@ -280,7 +280,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [BlockStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]} HOST: {get_attr: [BlockStorage, name]} nova_server_resource: description: Heat resource handle for the block storage server diff --git a/puppet/compute-puppet.yaml b/puppet/compute-puppet.yaml index 7e49bc22..357a097d 100644 --- a/puppet/compute-puppet.yaml +++ b/puppet/compute-puppet.yaml @@ -351,7 +351,7 @@ resources: nova::compute::libvirt::libvirt_virt_type: {get_input: nova_compute_libvirt_type} nova_api_host: {get_input: nova_api_host} nova::compute::vncproxy_host: {get_input: nova_public_ip} - nova_enable_rbd_backend: {get_input: nova_enable_rbd_backend} + nova::compute::rbd::ephemeral_storage: {get_input: nova_enable_rbd_backend} nova_password: {get_input: nova_password} nova::compute::vncserver_proxyclient_address: {get_input: nova_vnc_proxyclient_address} ceilometer::debug: {get_input: debug} @@ -510,7 +510,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [NovaCompute, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]} HOST: {get_attr: [NovaCompute, name]} nova_server_resource: description: Heat resource handle for the Nova compute server @@ -519,4 +519,3 @@ outputs: config_identifier: description: identifier which changes if the node configuration may need re-applying value: {get_attr: [NovaComputeDeployment, deploy_stdout]} - diff --git a/puppet/controller-post-puppet.yaml b/puppet/controller-post-puppet.yaml index e88561e6..49cbe1e2 100644 --- a/puppet/controller-post-puppet.yaml +++ b/puppet/controller-post-puppet.yaml @@ -83,6 +83,16 @@ resources: step: 4 update_identifier: {get_param: NodeConfigIdentifiers} + ControllerOvercloudServicesDeployment_Step6: + type: OS::Heat::StructuredDeployments + depends_on: ControllerOvercloudServicesDeployment_Step5 + properties: + servers: {get_param: servers} + config: {get_resource: ControllerPuppetConfig} + input_values: + step: 5 + update_identifier: {get_param: NodeConfigIdentifiers} + # Note, this should come last, so use depends_on to ensure # this is created after any other resources. ExtraConfig: diff --git a/puppet/controller-puppet.yaml b/puppet/controller-puppet.yaml index 1e563331..3f01b094 100644 --- a/puppet/controller-puppet.yaml +++ b/puppet/controller-puppet.yaml @@ -72,6 +72,10 @@ parameters: default: '' description: Set to True to enable debugging on all services. type: string + EnableFencing: + default: false + description: Whether to enable fencing in Pacemaker or not. + type: boolean EnableGalera: default: true description: Whether to use Galera instead of regular MariaDB. @@ -122,6 +126,38 @@ parameters: } } type: json + FencingConfig: + default: {} + description: | + Pacemaker fencing configuration. The JSON should have + the following structure: + { + "devices": [ + { + "agent": "AGENT_NAME", + "host_mac": "HOST_MAC_ADDRESS", + "params": {"PARAM_NAME": "PARAM_VALUE"} + } + ] + } + For instance: + { + "devices": [ + { + "agent": "fence_xvm", + "host_mac": "52:54:00:aa:bb:cc", + "params": { + "multicast_address": "225.0.0.12", + "port": "baremetal_0", + "manage_fw": true, + "manage_key_file": true, + "key_file": "/etc/fence_xvm.key", + "key_file_password": "abcdef" + } + } + ] + } + type: json Flavor: description: Flavor for control nodes to request when deploying. type: string @@ -222,10 +258,18 @@ parameters: lower level default. type: number default: 0 + MysqlMaxConnections: + description: Configures MySQL max_connections config setting + type: number + default: 1024 MysqlRootPassword: type: string hidden: true default: '' # Has to be here because of the ignored empty value bug + NeutronExternalNetworkBridge: + description: Name of bridge used for external network traffic. + type: string + default: 'br-ex' NeutronBridgeMappings: description: > The OVS logical->physical bridge mappings to use. See the Neutron @@ -248,6 +292,10 @@ parameters: default: 'False' description: Whether to enable l3-agent HA type: string + NeutronDhcpAgentsPerNetwork: + type: number + default: 3 + description: The number of neutron dhcp agents to schedule per network NeutronDVR: default: 'False' description: Whether to configure Neutron Distributed Virtual Routers @@ -634,10 +682,12 @@ resources: - - 'http://' - {get_param: KeystonePublicApiVirtualIP} - ':5000/v2.0/' + enable_fencing: {get_param: EnableFencing} enable_galera: {get_param: EnableGalera} enable_ceph_storage: {get_param: EnableCephStorage} enable_swift_storage: {get_param: EnableSwiftStorage} mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize} + mysql_max_connections: {get_param: MysqlMaxConnections} mysql_root_password: {get_param: MysqlRootPassword} mysql_cluster_name: str_replace: @@ -651,6 +701,7 @@ resources: neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers} neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover} neutron_l3_ha: {get_param: NeutronL3HA} + neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork} neutron_network_vlan_ranges: str_replace: template: "['RANGES']" @@ -660,6 +711,7 @@ resources: - "','" - {get_param: NeutronNetworkVLANRanges} neutron_bridge_mappings: {get_param: NeutronBridgeMappings} + neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge} neutron_public_interface: {get_param: NeutronPublicInterface} neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice} neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute} @@ -714,6 +766,7 @@ resources: - '@' - {get_param: MysqlVirtualIP} - '/nova' + fencing_config: {get_param: FencingConfig} pcsd_password: {get_param: PcsdPassword} rabbit_username: {get_param: RabbitUserName} rabbit_password: {get_param: RabbitPassword} @@ -786,6 +839,7 @@ resources: - vip_data # provided by vip-config - '"%{::osfamily}"' - common + - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre datafiles: common: raw_data: {get_file: hieradata/common.yaml} @@ -803,7 +857,9 @@ resources: bootstack_nodeid: {get_input: bootstack_nodeid} # Pacemaker + enable_fencing: {get_input: enable_fencing} hacluster_pwd: {get_input: pcsd_password} + tripleo::fencing::config: {get_input: fencing_config} # Swift swift::proxy::proxy_local_net_ip: {get_input: swift_proxy_network} @@ -902,6 +958,7 @@ resources: enable_ceph_storage: {get_input: enable_ceph_storage} enable_swift_storage: {get_input: enable_swift_storage} mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size} + mysql_max_connections: {get_input: mysql_max_connections} mysql::server::root_password: {get_input: mysql_root_password} mysql_cluster_name: {get_input: mysql_cluster_name} mysql_bind_host: {get_input: mysql_network} @@ -916,6 +973,7 @@ resources: neutron::server::auth_uri: {get_input: keystone_auth_uri} neutron::server::identity_uri: {get_input: keystone_identity_uri} neutron::server::database_connection: {get_input: neutron_dsn} + neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge} neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling} neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} neutron_flat_networks: {get_input: neutron_flat_networks} @@ -926,6 +984,7 @@ resources: neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers} neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover} neutron::server::l3_ha: {get_input: neutron_l3_ha} + neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network} neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges} neutron_bridge_mappings: {get_input: neutron_bridge_mappings} neutron_public_interface: {get_input: neutron_public_interface} @@ -1001,6 +1060,13 @@ resources: tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface} enable_package_install: {get_input: enable_package_install} + # Hook for site-specific additional pre-deployment config, e.g extra hieradata + ControllerExtraConfigPre: + depends_on: ControllerDeployment + type: OS::TripleO::ControllerExtraConfigPre + properties: + server: {get_resource: Controller} + UpdateConfig: type: OS::TripleO::Tasks::PackageUpdate @@ -1049,7 +1115,7 @@ outputs: str_replace: template: IP HOST.localdomain HOST CLOUDNAME params: - IP: {get_attr: [Controller, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]} HOST: {get_attr: [Controller, name]} CLOUDNAME: {get_param: CloudName} nova_server_resource: @@ -1072,4 +1138,8 @@ outputs: IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]} config_identifier: description: identifier which changes if the controller configuration may need re-applying - value: {get_attr: [ControllerDeployment, deploy_stdout]} + value: + list_join: + - ',' + - - {get_attr: [ControllerDeployment, deploy_stdout]} + - {get_attr: [ControllerExtraConfigPre, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/README b/puppet/extraconfig/pre_deploy/README new file mode 100644 index 00000000..51fc3406 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/README @@ -0,0 +1,12 @@ +This tree contains additional configuration which happens "pre deployment", +e.g before the OpenStack services themselves are configured but after the +nodes themselves have been provisioned and initially configured. + +Typically for puppet deployments these additional configs will put in place +hieradata which is then consumed by the subsequent puppet configuration +which occurs during the post-deployment phase. + +If you need to specify multiple configs, you can chain them together in a +template, see the multiple.yaml example: + + OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/controller/multiple.yaml diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml new file mode 100644 index 00000000..1d982dff --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml @@ -0,0 +1,145 @@ +heat_template_version: 2015-04-30 + +description: Configure hieradata for Cinder Netapp configuration + +parameters: + server: + description: ID of the controller node to apply this config to + type: string + + # Config specific parameters, to be provided via parameter_defaults + CinderEnableNetappBackend: + type: string + default: false + CinderNetappBackendName: + type: string + default: 'tripleo_netapp' + CinderNetappLogin: + type: string + CinderNetappPassword: + type: string + CinderNetappServerHostname: + type: string + CinderNetappServerPort: + type: string + default: '80' + CinderNetappSizeMultiplier: + type: string + default: '1.2' + CinderNetappStorageFamily: + type: string + default: 'ontap_cluster' + CinderNetappStorageProtocol: + type: string + default: 'nfs' + CinderNetappTransportType: + type: string + default: 'http' + CinderNetappVfiler: + type: string + default: '' + CinderNetappVolumeList: + type: string + default: '' + CinderNetappVserver: + type: string + default: '' + CinderNetappPartnerBackendName: + type: string + default: '' + CinderNetappNfsShares: + type: string + default: '' + CinderNetappNfsSharesConfig: + type: string + default: '/etc/cinder/shares.conf' + CinderNetappNfsMountOptions: + type: string + default: '' + CinderNetappCopyOffloadToolPath: + type: string + default: '' + CinderNetappControllerIps: + type: string + default: '' + CinderNetappSaPassword: + type: string + default: '' + CinderNetappStoragePools: + type: string + default: '' + CinderNetappEseriesHostType: + type: string + default: 'linux_dm_mp' + CinderNetappWebservicePath: + type: string + default: '/devmgr/v2' + +resources: + CinderNetappConfig: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + hiera: + datafiles: + cinder_netapp_data: + mapped_data: + cinder_enable_netapp_backend: {get_input: EnableNetappBackend} + cinder::backend::netapp::title: {get_input: NetappBackendName} + cinder::backend::netapp::netapp_login: {get_input: NetappLogin} + cinder::backend::netapp::netapp_password: {get_input: NetappPassword} + cinder::backend::netapp::netapp_hostname: {get_input: NetappServerHostname} + cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort} + cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier} + cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily} + cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol} + cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType} + cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler} + cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList} + cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver} + cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName} + cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares} + cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig} + cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions} + cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath} + cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps} + cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword} + cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools} + cinder::backend::netapp::netapp_eseries_host_type: {get_input: NetappEseriesHostType} + cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath} + + CinderNetappDeployment: + type: OS::Heat::StructuredDeployment + properties: + config: {get_resource: CinderNetappConfig} + server: {get_param: server} + input_values: + EnableNetappBackend: {get_param: CinderEnableNetappBackend} + NetappBackendName: {get_param: CinderNetappBackendName} + NetappLogin: {get_param: CinderNetappLogin} + NetappPassword: {get_param: CinderNetappPassword} + NetappServerHostname: {get_param: CinderNetappServerHostname} + NetappServerPort: {get_param: CinderNetappServerPort} + NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier} + NetappStorageFamily: {get_param: CinderNetappStorageFamily} + NetappStorageProtocol: {get_param: CinderNetappStorageProtocol} + NetappTransportType: {get_param: CinderNetappTransportType} + NetappVfiler: {get_param: CinderNetappVfiler} + NetappVolumeList: {get_param: CinderNetappVolumeList} + NetappVserver: {get_param: CinderNetappVserver} + NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName} + NetappNfsShares: {get_param: CinderNetappNfsShares} + NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig} + NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions} + NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath} + NetappControllerIps: {get_param: CinderNetappControllerIps} + NetappSaPassword: {get_param: CinderNetappSaPassword} + NetappStoragePools: {get_param: CinderNetappStoragePools} + NetappEseriesHostType: {get_param: CinderNetappEseriesHostType} + NetappWebservicePath: {get_param: CinderNetappWebservicePath} + +outputs: + deploy_stdout: + description: Deployment reference, used to trigger puppet apply on changes + value: {get_attr: [CinderNetappDeployment, deploy_stdout]} diff --git a/puppet/extraconfig/pre_deploy/controller/multiple.yaml b/puppet/extraconfig/pre_deploy/controller/multiple.yaml new file mode 100644 index 00000000..f949a397 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/controller/multiple.yaml @@ -0,0 +1,18 @@ +heat_template_version: 2014-10-16 +description: 'Extra Pre-Deployment Config, multiple' +parameters: + server: + type: string + +resources: + + CinderNetappConfig: + type: cinder-netapp.yaml + properties: + server: {get_param: server} + + # Note depends_on may be used for serialization if ordering is important + OtherConfig: + type: other.yaml + properties: + server: {get_param: server} diff --git a/puppet/extraconfig/pre_deploy/default.yaml b/puppet/extraconfig/pre_deploy/default.yaml new file mode 100644 index 00000000..dcbc6811 --- /dev/null +++ b/puppet/extraconfig/pre_deploy/default.yaml @@ -0,0 +1,8 @@ +heat_template_version: 2014-10-16 +description: 'Noop Extra Pre-Deployment Config' +parameters: + server: + type: string +outputs: + deploy_stdout: + value: "None" diff --git a/puppet/hieradata/ceph.yaml b/puppet/hieradata/ceph.yaml index 280457df..6eb0e671 100644 --- a/puppet/hieradata/ceph.yaml +++ b/puppet/hieradata/ceph.yaml @@ -11,3 +11,5 @@ ceph_pools: - volumes - vms - images + +ceph_classes: []
\ No newline at end of file diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 4915d3c8..63a3473d 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -16,3 +16,5 @@ nova::compute::rbd::libvirt_images_rbd_pool: 'vms' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" ceilometer::agent::auth::auth_tenant_name: 'service' + +compute_classes: []
\ No newline at end of file diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml index 3de9bd91..72c10c26 100644 --- a/puppet/hieradata/controller.yaml +++ b/puppet/hieradata/controller.yaml @@ -65,7 +65,6 @@ glance::backend::rbd::rbd_store_user: 'openstack' neutron::core_plugin: 'ml2' neutron::service_plugins: - 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' -neutron::dhcp_agents_per_network: 2 neutron::server::sync_db: true neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf @@ -77,7 +76,6 @@ nova::api::osapi_v3: true cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler # heat -heat::engine::heat_stack_user_role: '' heat::engine::configure_delegated_roles: false heat::engine::trusts_delegated_roles: [] @@ -111,3 +109,5 @@ tripleo::loadbalancer::heat_api: true tripleo::loadbalancer::heat_cloudwatch: true tripleo::loadbalancer::heat_cfn: true tripleo::loadbalancer::horizon: true + +controller_classes: [] diff --git a/puppet/hieradata/object.yaml b/puppet/hieradata/object.yaml index 59a8b1cf..3a379035 100644 --- a/puppet/hieradata/object.yaml +++ b/puppet/hieradata/object.yaml @@ -14,3 +14,5 @@ swift::proxy::keystone::operator_roles: - admin - swiftoperator - ResellerAdmin + +object_classes: []
\ No newline at end of file diff --git a/puppet/hieradata/volume.yaml b/puppet/hieradata/volume.yaml index ad9e2c2a..9f3907ef 100644 --- a/puppet/hieradata/volume.yaml +++ b/puppet/hieradata/volume.yaml @@ -2,3 +2,5 @@ # cinder cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler + +volume_classes: []
\ No newline at end of file diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp index b645f9fe..21fd5f98 100644 --- a/puppet/manifests/overcloud_cephstorage.pp +++ b/puppet/manifests/overcloud_cephstorage.pp @@ -31,4 +31,6 @@ if count(hiera('ntp::servers')) > 0 { } include ::ceph::profile::client -include ::ceph::profile::osd
\ No newline at end of file +include ::ceph::profile::osd + +hiera_include('ceph_classes')
\ No newline at end of file diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 00bab7f6..67a73dda 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -51,7 +51,7 @@ nova_config { 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; } -$nova_enable_rbd_backend = hiera('nova_enable_rbd_backend', false) +$nova_enable_rbd_backend = hiera('nova::compute::rbd::ephemeral_storage', false) if $nova_enable_rbd_backend { include ::ceph::profile::client @@ -88,3 +88,5 @@ class { 'snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('compute_classes') diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index bc20bad5..f17dc831 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -90,7 +90,7 @@ if hiera('step') >= 2 { override_options => { 'mysqld' => { 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => '1024', + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', }, } @@ -237,16 +237,18 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $glance_store = 'glance.store.swift.Store' } - file: { $glance_store = 'glance.store.filesystem.Store' } - rbd: { $glance_store = 'glance.store.rbd.Store' } + swift: { $backend_store = 'glance.store.swift.Store' } + file: { $backend_store = 'glance.store.filesystem.Store' } + rbd: { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } + $http_store = ['glance.store.http.Store'] + $glance_store = concat($http_store, $backend_store) # TODO: notifications, scrubber, etc. include ::glance class { 'glance::api': - known_stores => [$glance_store] + known_stores => $glance_store } include ::glance::registry include join(['::glance::backend::', $glance_backend]) @@ -436,4 +438,6 @@ if hiera('step') >= 3 { snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + hiera_include('controller_classes') + } #END STEP 3 diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 045702d8..83d0d158 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -37,6 +37,8 @@ if $::hostname == downcase(hiera('bootstrap_nodeid')) { $sync_db = false } +$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5 + # When to start and enable services which haven't been Pacemakerized # FIXME: remove when we start all OpenStack services using Pacemaker # (occurences of this variable will be gradually replaced with false) @@ -72,7 +74,13 @@ if hiera('step') >= 1 { setup_cluster => $pacemaker_master, } class { '::pacemaker::stonith': - disable => true, + disable => !$enable_fencing, + } + if $enable_fencing { + include tripleo::fencing + + # enable stonith after all fencing devices have been created + Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } # Only configure RabbitMQ in this step, don't start it yet to @@ -132,7 +140,7 @@ if hiera('step') >= 1 { 'query_cache_size' => '0', 'query_cache_type' => '0', 'bind-address' => hiera('mysql_bind_host'), - 'max_connections' => '1024', + 'max_connections' => hiera('mysql_max_connections'), 'open_files_limit' => '-1', 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', 'wsrep_cluster_name' => 'galera_cluster', @@ -175,13 +183,78 @@ if hiera('step') >= 2 { # parameters here to configure pacemaker VIPs. The configuration # of pacemaker VIPs could move into puppet-tripleo or we should # make use of less specific hiera parameters here for the settings. + pacemaker::resource::service { 'haproxy': + clone_params => true, + } + $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') pacemaker::resource::ip { 'control_vip': ip_address => $control_vip, } + pacemaker::constraint::base { 'control_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${control_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], + } + pacemaker::constraint::colocation { 'control_vip-with-haproxy': + source => "ip-${control_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['control_vip']], + } + $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip') - pacemaker::resource::ip { 'public_vip': - ip_address => $public_vip, + if $public_vip and $public_vip != $control_vip { + pacemaker::resource::ip { 'public_vip': + ip_address => $public_vip, + } + pacemaker::constraint::base { 'public_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${public_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } + pacemaker::constraint::colocation { 'public_vip-with-haproxy': + source => "ip-${public_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['public_vip']], + } + } + + $redis_vip = hiera('redis_vip') + if $redis_vip and $redis_vip != $control_vip { + pacemaker::resource::ip { 'redis_vip': + ip_address => $redis_vip, + } + pacemaker::constraint::base { 'redis_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${redis_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } + pacemaker::constraint::colocation { 'redis_vip-with-haproxy': + source => "ip-${redis_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['redis_vip']], + } } $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip') @@ -189,6 +262,23 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'internal_api_vip': ip_address => $internal_api_vip, } + pacemaker::constraint::base { 'internal_api_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${internal_api_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } + pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy': + source => "ip-${internal_api_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['internal_api_vip']], + } } $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip') @@ -196,6 +286,23 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'storage_vip': ip_address => $storage_vip, } + pacemaker::constraint::base { 'storage_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } + pacemaker::constraint::colocation { 'storage_vip-with-haproxy': + source => "ip-${storage_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_vip']], + } } $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip') @@ -203,11 +310,25 @@ if hiera('step') >= 2 { pacemaker::resource::ip { 'storage_mgmt_vip': ip_address => $storage_mgmt_vip, } + pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy': + constraint_type => 'order', + first_resource => "ip-${storage_mgmt_vip}", + second_resource => 'haproxy-clone', + first_action => 'start', + second_action => 'start', + constraint_params => 'kind=Optional', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } + pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy': + source => "ip-${storage_mgmt_vip}", + target => 'haproxy-clone', + score => 'INFINITY', + require => [Pacemaker::Resource::Service['haproxy'], + Pacemaker::Resource::Ip['storage_mgmt_vip']], + } } - pacemaker::resource::service { 'haproxy': - clone_params => true, - } pacemaker::resource::service { $::memcached::params::service_name : clone_params => true, require => Class['::memcached'], @@ -255,28 +376,6 @@ if hiera('step') >= 2 { resource_params => 'wait_last_known_master=true', require => Class['::redis'], } - $redis_vip = hiera('redis_vip') - if $redis_vip and $redis_vip != $control_vip { - pacemaker::resource::ip { 'vip-redis': - ip_address => $redis_vip, - } - } - pacemaker::constraint::base { 'redis-master-then-vip-redis': - constraint_type => 'order', - first_resource => 'redis-master', - second_resource => "ip-${redis_vip}", - first_action => 'promote', - second_action => 'start', - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Ip['vip-redis']], - } - pacemaker::constraint::colocation { 'vip-redis-with-redis-master': - source => "ip-${redis_vip}", - target => 'redis-master', - score => 'INFINITY', - require => [Pacemaker::Resource::Ocf['redis'], - Pacemaker::Resource::Ip['vip-redis']], - } } @@ -444,16 +543,18 @@ if hiera('step') >= 3 { $glance_backend = downcase(hiera('glance_backend', 'swift')) case $glance_backend { - swift: { $glance_store = 'glance.store.swift.Store' } - file: { $glance_store = 'glance.store.filesystem.Store' } - rbd: { $glance_store = 'glance.store.rbd.Store' } + swift: { $backend_store = 'glance.store.swift.Store' } + file: { $backend_store = 'glance.store.filesystem.Store' } + rbd: { $backend_store = 'glance.store.rbd.Store' } default: { fail('Unrecognized glance_backend parameter.') } } + $http_store = ['glance.store.http.Store'] + $glance_store = concat($http_store, $backend_store) # TODO: notifications, scrubber, etc. include ::glance class { 'glance::api': - known_stores => [$glance_store], + known_stores => $glance_store, manage_service => false, enabled => false, } @@ -618,7 +719,6 @@ if hiera('step') >= 3 { include ::swift::proxy::keystone include ::swift::proxy::authtoken include ::swift::proxy::staticweb - include ::swift::proxy::ceilometer include ::swift::proxy::ratelimit include ::swift::proxy::catch_errors include ::swift::proxy::tempurl @@ -744,6 +844,8 @@ if hiera('step') >= 3 { snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + hiera_include('controller_classes') + } #END STEP 3 if hiera('step') >= 4 { @@ -846,12 +948,7 @@ if hiera('step') >= 4 { # as soon as neutron-server is started; to avoid races we want to make this # happen only on one node, before normal Pacemaker initialization # https://bugzilla.redhat.com/show_bug.cgi?id=1233061 - exec { 'neutron-server-start-wait-stop' : - command => "systemctl start neutron-server && \ - sleep 5s && \ - systemctl stop neutron-server", - path => ["/usr/bin", "/usr/sbin"], - } -> + exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } -> pacemaker::resource::service { $::neutron::params::server_service: op_params => "start timeout=90", clone_params => "interleave=true", @@ -1211,15 +1308,6 @@ if hiera('step') >= 4 { Pacemaker::Resource::Service[$::mongodb::params::service_name]], } } - pacemaker::constraint::base { 'vip-redis-then-ceilometer-central': - constraint_type => 'order', - first_resource => "ip-${redis_vip}", - second_resource => "${::ceilometer::params::agent_central_service_name}-clone", - first_action => 'start', - second_action => 'start', - require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name], - Pacemaker::Resource::Ip['vip-redis']], - } # Heat pacemaker::resource::service { $::heat::params::api_service_name : diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp index 24799c8c..ed2ca7c0 100644 --- a/puppet/manifests/overcloud_object.pp +++ b/puppet/manifests/overcloud_object.pp @@ -56,3 +56,5 @@ class { 'snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('object_classes')
\ No newline at end of file diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp index edfeaeca..2ef0884b 100644 --- a/puppet/manifests/overcloud_volume.pp +++ b/puppet/manifests/overcloud_volume.pp @@ -59,3 +59,5 @@ class { 'snmp': agentaddress => ['udp:161','udp6:[::1]:161'], snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ], } + +hiera_include('volume_classes')
\ No newline at end of file diff --git a/puppet/swift-storage-puppet.yaml b/puppet/swift-storage-puppet.yaml index 82922a87..fb1756b8 100644 --- a/puppet/swift-storage-puppet.yaml +++ b/puppet/swift-storage-puppet.yaml @@ -194,7 +194,7 @@ outputs: str_replace: template: "IP HOST.localdomain HOST" params: - IP: {get_attr: [SwiftStorage, networks, ctlplane, 0]} + IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]} HOST: {get_attr: [SwiftStorage, name]} nova_server_resource: description: Heat resource handle for the swift storage server @@ -219,4 +219,3 @@ outputs: config_identifier: description: identifier which changes if the node configuration may need re-applying value: {get_attr: [SwiftStorageHieraDeploy, deploy_stdout]} - |