diff options
-rw-r--r-- | docker/firstboot/start_docker_agents.sh | 5 | ||||
-rw-r--r-- | environments/puppet-ceph-external.yaml | 4 | ||||
-rwxr-xr-x | extraconfig/tasks/yum_update.sh | 102 | ||||
-rw-r--r-- | network/config/bond-with-vlans/controller.yaml | 3 | ||||
-rw-r--r-- | os-apply-config/ceph-cluster-config.yaml | 5 | ||||
-rw-r--r-- | overcloud-without-mergepy.yaml | 14 | ||||
-rw-r--r-- | puppet/ceph-cluster-config.yaml | 16 | ||||
-rw-r--r-- | puppet/ceph-storage.yaml | 9 | ||||
-rw-r--r-- | puppet/cinder-storage.yaml | 9 | ||||
-rw-r--r-- | puppet/compute.yaml | 11 | ||||
-rw-r--r-- | puppet/controller.yaml | 13 | ||||
-rw-r--r-- | puppet/extraconfig/ceph/ceph-external-config.yaml | 14 | ||||
-rw-r--r-- | puppet/hieradata/compute.yaml | 2 | ||||
-rw-r--r-- | puppet/manifests/overcloud_compute.pp | 3 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller.pp | 4 | ||||
-rw-r--r-- | puppet/manifests/overcloud_controller_pacemaker.pp | 20 | ||||
-rw-r--r-- | puppet/swift-storage.yaml | 9 |
17 files changed, 186 insertions, 57 deletions
diff --git a/docker/firstboot/start_docker_agents.sh b/docker/firstboot/start_docker_agents.sh index caf511bd..88759a5d 100644 --- a/docker/firstboot/start_docker_agents.sh +++ b/docker/firstboot/start_docker_agents.sh @@ -52,9 +52,10 @@ echo nameserver 8.8.8.8 > /etc/resolv.conf HOSTNAME=$(hostname) echo "127.0.0.1 $HOSTNAME.localdomain $HOSTNAME" >> /etc/hosts -# Another hack.. we need latest docker.. +# Another hack.. we need a different docker version +# (should obviously be dropped once the atomic image contains docker 1.8.2) /usr/bin/systemctl stop docker.service -/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-latest +/bin/curl -o /tmp/docker https://get.docker.com/builds/Linux/x86_64/docker-1.8.2 /bin/mount -o remount,rw /usr /bin/rm /bin/docker /bin/cp /tmp/docker /bin/docker diff --git a/environments/puppet-ceph-external.yaml b/environments/puppet-ceph-external.yaml index f22967f4..7f5b5080 100644 --- a/environments/puppet-ceph-external.yaml +++ b/environments/puppet-ceph-external.yaml @@ -14,10 +14,12 @@ parameter_defaults: CinderEnableRbdBackend: true GlanceBackend: rbd # If the Ceph pools which host VMs, Volumes and Images do not match these - # names, edit the following as needed. + # names OR the client keyring to use is not named 'openstack', edit the + # following as needed. NovaRbdPoolName: vms CinderRbdPoolName: volumes GlanceRbdPoolName: images + CephClientUserName: openstack # finally we disable the Cinder LVM backend CinderEnableIscsiBackend: false diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh index 9125ca07..3ba13f23 100755 --- a/extraconfig/tasks/yum_update.sh +++ b/extraconfig/tasks/yum_update.sh @@ -43,6 +43,108 @@ fi pacemaker_status=$(systemctl is-active pacemaker) if [[ "$pacemaker_status" == "active" ]] ; then + echo "Checking for and adding missing constraints" + + if ! pcs constraint order show | grep "start openstack-nova-novncproxy-clone then start openstack-nova-api-clone"; then + pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone + fi + + if ! pcs constraint order show | grep "start rabbitmq-clone then start openstack-keystone-clone"; then + pcs constraint order start rabbitmq-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "promote galera-master then start openstack-keystone-clone"; then + pcs constraint order promote galera-master then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "start haproxy-clone then start openstack-keystone-clone"; then + pcs constraint order start haproxy-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "start memcached-clone then start openstack-keystone-clone"; then + pcs constraint order start memcached-clone then openstack-keystone-clone + fi + + if ! pcs constraint order show | grep "promote redis-master then start openstack-ceilometer-central-clone"; then + pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false + fi + + if ! pcs resource defaults | grep "resource-stickiness: INFINITY"; then + pcs resource defaults resource-stickiness=INFINITY + fi + + echo "Setting resource start/stop timeouts" + + # timeouts for non-openstack services and special cases + pcs resource update haproxy op start timeout=100s + pcs resource update haproxy op stop timeout=100s + # mongod start timeout is also higher, setting only stop timeout + pcs resource update mongod op stop timeout=100s + # rabbit start timeout is already 100s + pcs resource update rabbitmq op stop timeout=100s + pcs resource update memcached op start timeout=100s + pcs resource update memcached op stop timeout=100s + pcs resource update httpd op start timeout=100s + pcs resource update httpd op stop timeout=100s + # neutron-netns-cleanup stop timeout is 300s, setting only start timeout + pcs resource update neutron-netns-cleanup op start timeout=100s + # neutron-ovs-cleanup stop timeout is 300s, setting only start timeout + pcs resource update neutron-ovs-cleanup op start timeout=100s + + # timeouts for openstack services + pcs resource update neutron-dhcp-agent op start timeout=100s + pcs resource update neutron-dhcp-agent op stop timeout=100s + pcs resource update neutron-l3-agent op start timeout=100s + pcs resource update neutron-l3-agent op stop timeout=100s + pcs resource update neutron-metadata-agent op start timeout=100s + pcs resource update neutron-metadata-agent op stop timeout=100s + pcs resource update neutron-openvswitch-agent op start timeout=100s + pcs resource update neutron-openvswitch-agent op stop timeout=100s + pcs resource update neutron-server op start timeout=100s + pcs resource update neutron-server op stop timeout=100s + pcs resource update openstack-ceilometer-alarm-evaluator op start timeout=100s + pcs resource update openstack-ceilometer-alarm-evaluator op stop timeout=100s + pcs resource update openstack-ceilometer-alarm-notifier op start timeout=100s + pcs resource update openstack-ceilometer-alarm-notifier op stop timeout=100s + pcs resource update openstack-ceilometer-api op start timeout=100s + pcs resource update openstack-ceilometer-api op stop timeout=100s + pcs resource update openstack-ceilometer-central op start timeout=100s + pcs resource update openstack-ceilometer-central op stop timeout=100s + pcs resource update openstack-ceilometer-collector op start timeout=100s + pcs resource update openstack-ceilometer-collector op stop timeout=100s + pcs resource update openstack-ceilometer-notification op start timeout=100s + pcs resource update openstack-ceilometer-notification op stop timeout=100s + pcs resource update openstack-cinder-api op start timeout=100s + pcs resource update openstack-cinder-api op stop timeout=100s + pcs resource update openstack-cinder-scheduler op start timeout=100s + pcs resource update openstack-cinder-scheduler op stop timeout=100s + pcs resource update openstack-cinder-volume op start timeout=100s + pcs resource update openstack-cinder-volume op stop timeout=100s + pcs resource update openstack-glance-api op start timeout=100s + pcs resource update openstack-glance-api op stop timeout=100s + pcs resource update openstack-glance-registry op start timeout=100s + pcs resource update openstack-glance-registry op stop timeout=100s + pcs resource update openstack-heat-api op start timeout=100s + pcs resource update openstack-heat-api op stop timeout=100s + pcs resource update openstack-heat-api-cfn op start timeout=100s + pcs resource update openstack-heat-api-cfn op stop timeout=100s + pcs resource update openstack-heat-api-cloudwatch op start timeout=100s + pcs resource update openstack-heat-api-cloudwatch op stop timeout=100s + pcs resource update openstack-heat-engine op start timeout=100s + pcs resource update openstack-heat-engine op stop timeout=100s + pcs resource update openstack-keystone op start timeout=100s + pcs resource update openstack-keystone op stop timeout=100s + pcs resource update openstack-nova-api op start timeout=100s + pcs resource update openstack-nova-api op stop timeout=100s + pcs resource update openstack-nova-conductor op start timeout=100s + pcs resource update openstack-nova-conductor op stop timeout=100s + pcs resource update openstack-nova-consoleauth op start timeout=100s + pcs resource update openstack-nova-consoleauth op stop timeout=100s + pcs resource update openstack-nova-novncproxy op start timeout=100s + pcs resource update openstack-nova-novncproxy op stop timeout=100s + pcs resource update openstack-nova-scheduler op start timeout=100s + pcs resource update openstack-nova-scheduler op stop timeout=100s + echo "Pacemaker running, stopping cluster node and doing full package update" node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*") if [[ "$node_count" == "1" ]] ; then diff --git a/network/config/bond-with-vlans/controller.yaml b/network/config/bond-with-vlans/controller.yaml index 4290be20..eb4399ea 100644 --- a/network/config/bond-with-vlans/controller.yaml +++ b/network/config/bond-with-vlans/controller.yaml @@ -30,10 +30,9 @@ parameters: description: IP address/subnet on the tenant network type: string BondInterfaceOvsOptions: - default: 'bond_mode=balance-tcp lacp=active other-config:lacp-fallback-ab=true' + default: 'bond_mode=active-backup' description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using this option. - Default wil attempt LACP, but will fall back to active-backup. type: string ExternalNetworkVlanID: default: 10 diff --git a/os-apply-config/ceph-cluster-config.yaml b/os-apply-config/ceph-cluster-config.yaml index 4e435ffb..115de085 100644 --- a/os-apply-config/ceph-cluster-config.yaml +++ b/os-apply-config/ceph-cluster-config.yaml @@ -13,7 +13,7 @@ parameters: ceph_client_key: default: '' type: string - description: Ceph key used to create the 'openstack' user keyring. + description: Ceph key used to create the client user keyring. ceph_fsid: default: '' type: string @@ -36,6 +36,9 @@ parameters: GlanceRbdPoolName: default: images type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml index 78886c61..3e09cc5d 100644 --- a/overcloud-without-mergepy.yaml +++ b/overcloud-without-mergepy.yaml @@ -223,8 +223,9 @@ parameters: type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list MongoDbNoJournal: default: false description: Should MongoDb journaling be disabled @@ -700,6 +701,12 @@ parameters: description: > Setting to a previously unused value during stack-update will trigger package update on all nodes + DeployIdentifier: + default: '' + type: string + description: > + Setting this to a unique value will re-run any deployment tasks which + perform configuration on a Heat stack-update. # If you want to remove a specific node from a resource group, you can pass # the node name or id as a <Group>RemovalPolicies parameter, for example: @@ -1380,6 +1387,7 @@ resources: NodeConfigIdentifiers: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} controller_config: {get_attr: [Controller, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} ComputeNodesPostDeployment: type: OS::TripleO::ComputePostDeployment @@ -1389,6 +1397,7 @@ resources: NodeConfigIdentifiers: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} compute_config: {get_attr: [Compute, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} ObjectStorageNodesPostDeployment: type: OS::TripleO::ObjectStoragePostDeployment @@ -1398,6 +1407,7 @@ resources: NodeConfigIdentifiers: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} objectstorage_config: {get_attr: [ObjectStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} BlockStorageNodesPostDeployment: type: OS::TripleO::BlockStoragePostDeployment @@ -1407,6 +1417,7 @@ resources: NodeConfigIdentifiers: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} blockstorage_config: {get_attr: [BlockStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} CephStorageNodesPostDeployment: type: OS::TripleO::CephStoragePostDeployment @@ -1416,6 +1427,7 @@ resources: NodeConfigIdentifiers: allnodes_extra: {get_attr: [AllNodesExtraConfig, config_identifier]} cephstorage_config: {get_attr: [CephStorage, attributes, config_identifier]} + deployment_identifier: {get_param: DeployIdentifier} outputs: KeystoneURL: diff --git a/puppet/ceph-cluster-config.yaml b/puppet/ceph-cluster-config.yaml index 5e54a621..96198c3f 100644 --- a/puppet/ceph-cluster-config.yaml +++ b/puppet/ceph-cluster-config.yaml @@ -13,7 +13,7 @@ parameters: ceph_client_key: default: '' type: string - description: Ceph key used to create the 'openstack' user keyring. + description: Ceph key used to create the client user keyring. ceph_fsid: default: '' type: string @@ -36,6 +36,9 @@ parameters: GlanceRbdPoolName: default: images type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: @@ -74,7 +77,7 @@ resources: keyring_path: '/var/lib/ceph/bootstrap-osd/ceph.keyring', cap_mon: 'allow profile bootstrap-osd' }, - client.openstack: { + client.CLIENT_USER: { secret: 'ADMIN_KEY', mode: '0644', cap_mon: 'allow r', @@ -82,6 +85,7 @@ resources: } }" params: + CLIENT_USER: {get_param: CephClientUserName} ADMIN_KEY: {get_param: ceph_admin_key} NOVA_POOL: {get_param: NovaRbdPoolName} CINDER_POOL: {get_param: CinderRbdPoolName} @@ -89,6 +93,14 @@ resources: nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} ceph_pools: - {get_param: CinderRbdPoolName} - {get_param: NovaRbdPoolName} diff --git a/puppet/ceph-storage.yaml b/puppet/ceph-storage.yaml index 75294599..0d968504 100644 --- a/puppet/ceph-storage.yaml +++ b/puppet/ceph-storage.yaml @@ -22,8 +22,9 @@ parameters: constraints: - custom_constraint: nova.keypair NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -133,11 +134,7 @@ resources: config: {get_resource: CephStorageConfig} server: {get_resource: CephStorage} input_values: - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]} diff --git a/puppet/cinder-storage.yaml b/puppet/cinder-storage.yaml index bee2e567..b536418d 100644 --- a/puppet/cinder-storage.yaml +++ b/puppet/cinder-storage.yaml @@ -75,8 +75,9 @@ parameters: type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -202,11 +203,7 @@ resources: rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} diff --git a/puppet/compute.yaml b/puppet/compute.yaml index 70c74037..18547732 100644 --- a/puppet/compute.yaml +++ b/puppet/compute.yaml @@ -211,8 +211,9 @@ parameters: type: string default: '' # Has to be here because of the ignored empty value bug NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list RabbitHost: type: string default: '' # Has to be here because of the ignored empty value bug @@ -403,7 +404,7 @@ resources: nova::glance_api_servers: {get_input: glance_api_servers} neutron::debug: {get_input: debug} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} neutron_flat_networks: {get_input: neutron_flat_networks} @@ -522,11 +523,7 @@ resources: rabbit_password: {get_param: RabbitPassword} rabbit_client_use_ssl: {get_param: RabbitClientUseSSL} rabbit_client_port: {get_param: RabbitClientPort} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} diff --git a/puppet/controller.yaml b/puppet/controller.yaml index dc381499..ae2b66e3 100644 --- a/puppet/controller.yaml +++ b/puppet/controller.yaml @@ -450,8 +450,9 @@ parameters: description: Should MongoDb journaling be disabled type: boolean NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list PcsdPassword: type: string description: The password for the 'pcsd' user. @@ -930,11 +931,7 @@ resources: template: "'LIMIT'" params: LIMIT: {get_param: RabbitFDLimit} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} control_virtual_interface: {get_param: ControlVirtualInterface} public_virtual_interface: {get_param: PublicVirtualInterface} swift_hash_suffix: {get_param: SwiftHashSuffix} @@ -1158,7 +1155,7 @@ resources: # Neutron neutron::bind_host: {get_input: neutron_api_network} neutron::rabbit_password: {get_input: rabbit_password} - neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_user: {get_input: rabbit_username} neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} neutron::rabbit_port: {get_input: rabbit_client_port} neutron::debug: {get_input: debug} @@ -1249,6 +1246,8 @@ resources: rabbitmq::node_ip_address: {get_input: rabbitmq_network} rabbitmq::erlang_cookie: {get_input: rabbit_cookie} rabbitmq::file_limit: {get_input: rabbit_fd_limit} + rabbitmq::default_user: {get_input: rabbit_username} + rabbitmq::default_pass: {get_input: rabbit_password} # Redis redis::bind: {get_input: redis_network} redis_vip: {get_input: redis_vip} diff --git a/puppet/extraconfig/ceph/ceph-external-config.yaml b/puppet/extraconfig/ceph/ceph-external-config.yaml index fadc8a00..7cefc24b 100644 --- a/puppet/extraconfig/ceph/ceph-external-config.yaml +++ b/puppet/extraconfig/ceph/ceph-external-config.yaml @@ -38,6 +38,9 @@ parameters: GlanceRbdPoolName: default: images type: string + CephClientUserName: + default: openstack + type: string resources: CephClusterConfigImpl: @@ -56,7 +59,7 @@ resources: ceph::profile::params::client_keys: str_replace: template: "{ - client.openstack: { + client.CLIENT_USER: { secret: 'CLIENT_KEY', mode: '0644', cap_mon: 'allow r', @@ -64,6 +67,7 @@ resources: } }" params: + CLIENT_USER: {get_param: CephClientUserName} CLIENT_KEY: {get_param: ceph_client_key} NOVA_POOL: {get_param: NovaRbdPoolName} CINDER_POOL: {get_param: CinderRbdPoolName} @@ -71,6 +75,14 @@ resources: nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName} cinder_rbd_pool_name: {get_param: CinderRbdPoolName} glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName} + nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName} + glance::backend::rbd::rbd_store_pool: {get_param: CephClientUserName} + nova::compute::rbd::rbd_keyring: + list_join: + - '.' + - - 'client' + - {get_param: CephClientUserName} + ceph_client_user_name: {get_param: CephClientUserName} ceph_pools: - {get_param: CinderRbdPoolName} - {get_param: NovaRbdPoolName} diff --git a/puppet/hieradata/compute.yaml b/puppet/hieradata/compute.yaml index 659008a5..173020f8 100644 --- a/puppet/hieradata/compute.yaml +++ b/puppet/hieradata/compute.yaml @@ -10,8 +10,6 @@ nova::compute::vnc_enabled: true nova::compute::libvirt::vncserver_listen: '0.0.0.0' nova::compute::libvirt::migration_support: true -nova::compute::rbd::libvirt_rbd_user: 'openstack' -nova::compute::rbd::rbd_keyring: 'client.openstack' nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}" ceilometer::agent::auth::auth_tenant_name: 'service' diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp index 4c927569..cd41cc79 100644 --- a/puppet/manifests/overcloud_compute.pp +++ b/puppet/manifests/overcloud_compute.pp @@ -49,8 +49,9 @@ if $rbd_ephemeral_storage or $rbd_persistent_storage { include ::ceph::profile::client $client_keys = hiera('ceph::profile::params::client_keys') + $client_user = join(['client.', hiera('ceph_client_user_name')]) class { '::nova::compute::rbd': - libvirt_rbd_secret_key => $client_keys['client.openstack']['secret'], + libvirt_rbd_secret_key => $client_keys[$client_user]['secret'], } } diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp index 695cb519..34be39f3 100644 --- a/puppet/manifests/overcloud_controller.pp +++ b/puppet/manifests/overcloud_controller.pp @@ -327,7 +327,7 @@ if hiera('step') >= 3 { cinder::backend::rbd { $cinder_rbd_backend : rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => 'openstack', + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, } @@ -381,7 +381,7 @@ if hiera('step') >= 3 { package {'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend : nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 91bc1b14..b9623714 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -74,11 +74,11 @@ if hiera('step') >= 1 { Class['tripleo::fencing'] -> Class['pacemaker::stonith'] } - # FIXME(gfidente): sets 90secs as default start timeout op + # FIXME(gfidente): sets 100secs as default start timeout op # param; until we can use pcmk global defaults we'll still # need to add it to every resource which redefines op params Pacemaker::Resource::Service { - op_params => 'start timeout=90s', + op_params => 'start timeout=100s stop timeout=100s', } # Only configure RabbitMQ in this step, don't start it yet to @@ -344,7 +344,7 @@ if hiera('step') >= 2 { if downcase(hiera('ceilometer_backend')) == 'mongodb' { pacemaker::resource::service { $::mongodb::params::service_name : - op_params => 'start timeout=120s', + op_params => 'start timeout=120s stop timeout=100s', clone_params => true, require => Class['::mongodb::server'], } @@ -703,7 +703,7 @@ if hiera('step') >= 3 { cinder::backend::rbd { $cinder_rbd_backend : rbd_pool => hiera('cinder_rbd_pool_name'), - rbd_user => 'openstack', + rbd_user => hiera('ceph_client_user_name'), rbd_secret_uuid => hiera('ceph::profile::params::fsid'), require => $cinder_pool_requires, } @@ -757,7 +757,7 @@ if hiera('step') >= 3 { package { 'nfs-utils': } -> cinder::backend::nfs { $cinder_nfs_backend: nfs_servers => hiera('cinder_nfs_servers'), - nfs_mount_options => hiera('cinder_nfs_mount_options'), + nfs_mount_options => hiera('cinder_nfs_mount_options',''), nfs_shares_config => '/etc/cinder/shares-nfs.conf', } } @@ -1186,24 +1186,24 @@ if hiera('step') >= 4 { # Nova pacemaker::resource::service { $::nova::params::api_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=90s monitor start-delay=10s', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::conductor_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=90s monitor start-delay=10s', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::consoleauth_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=90s monitor start-delay=10s', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', require => Pacemaker::Resource::Service[$::keystone::params::service_name], } pacemaker::resource::service { $::nova::params::vncproxy_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=90s monitor start-delay=10s', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::resource::service { $::nova::params::scheduler_service_name : clone_params => 'interleave=true', - op_params => 'start timeout=90s monitor start-delay=10s', + op_params => 'start timeout=100s stop timeout=100s monitor start-delay=10s', } pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint': diff --git a/puppet/swift-storage.yaml b/puppet/swift-storage.yaml index 22ec6096..3d9b9018 100644 --- a/puppet/swift-storage.yaml +++ b/puppet/swift-storage.yaml @@ -45,8 +45,9 @@ parameters: type: string hidden: true NtpServer: - type: string default: '' + description: Comma-separated list of ntp servers + type: comma_delimited_list EnablePackageInstall: default: 'false' description: Set to true to enable package installation via Puppet @@ -207,11 +208,7 @@ resources: swift_min_part_hours: {get_param: MinPartHours} swift_part_power: {get_param: PartPower} swift_replicas: { get_param: Replicas} - ntp_servers: - str_replace: - template: '["server"]' - params: - server: {get_param: NtpServer} + ntp_servers: {get_param: NtpServer} enable_package_install: {get_param: EnablePackageInstall} enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]} swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]} |