From cdfe03566354b938e94c6863b0d4b1c3e64cc10c Mon Sep 17 00:00:00 2001 From: Keith Schincke Date: Fri, 12 May 2017 08:12:56 -0400 Subject: Remove osd_pool_default_min_size to allow Ceph cluster to do the right thing by default The default value is 0 which has the minimum number be caluclated based on the replica count from osd_pool_defaut_size. The default replica count is 3 and the calculated min_size is 2. If the replica count is 1 then the min_size is 1. ie: min_size = replica - (replica/2) Add CephPoolDefaultSize parameter to ceph-mon.yaml. This parameter defaults to 3 but can be overriden. See puppet-ceph-devel.yaml for an example Change-Id: Ie9bdd9b16bcb9f11107ece614b010e87d3ae98a9 --- ci/environments/scenario001-multinode.yaml | 1 + ci/environments/scenario004-multinode.yaml | 1 + environments/puppet-ceph-devel.yaml | 2 ++ puppet/services/ceph-base.yaml | 1 - puppet/services/ceph-mon.yaml | 6 +++++- .../notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml | 12 ++++++++++++ 6 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml index eee6f1ce..473beb09 100644 --- a/ci/environments/scenario001-multinode.yaml +++ b/ci/environments/scenario001-multinode.yaml @@ -102,6 +102,7 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + CephPoolDefaultSize: 1 NovaEnableRbdBackend: true CinderEnableRbdBackend: true CinderBackupBackend: ceph diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml index 24fb2bf4..14f181c4 100644 --- a/ci/environments/scenario004-multinode.yaml +++ b/ci/environments/scenario004-multinode.yaml @@ -86,6 +86,7 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' + CephPoolDefaultSize: 1 SwiftCeilometerPipelineEnabled: false NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin' BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default' diff --git a/environments/puppet-ceph-devel.yaml b/environments/puppet-ceph-devel.yaml index 9c8abbb4..8fc4bf29 100644 --- a/environments/puppet-ceph-devel.yaml +++ b/environments/puppet-ceph-devel.yaml @@ -20,3 +20,5 @@ parameter_defaults: GlanceBackend: rbd GnocchiBackend: rbd CinderEnableIscsiBackend: false + CephPoolDefaultSite: 1 + diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml index 033d3f77..1eea3dc7 100644 --- a/puppet/services/ceph-base.yaml +++ b/puppet/services/ceph-base.yaml @@ -91,7 +91,6 @@ outputs: service_name: ceph_base config_settings: tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage} - ceph::profile::params::osd_pool_default_min_size: 1 ceph::profile::params::osds: {/srv/data: {}} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml index d589ef89..0f72eb73 100644 --- a/puppet/services/ceph-mon.yaml +++ b/puppet/services/ceph-mon.yaml @@ -70,6 +70,10 @@ parameters: MonitoringSubscriptionCephMon: default: 'overcloud-ceph-mon' type: string + CephPoolDefaultSize: + description: default minimum replication for RBD copies + type: number + default: 3 resources: CephBase: @@ -92,7 +96,7 @@ outputs: ceph::profile::params::mon_key: {get_param: CephMonKey} ceph::profile::params::osd_pool_default_pg_num: 32 ceph::profile::params::osd_pool_default_pgp_num: 32 - ceph::profile::params::osd_pool_default_size: 3 + ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize} # repeat returns items in a list, so we need to map_merge twice tripleo::profile::base::ceph::mon::ceph_pools: map_merge: diff --git a/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml new file mode 100644 index 00000000..fc2cb48a --- /dev/null +++ b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + Removed the hard coding of osd_pool_default_min_size. Setting this value + to 1 can result in data loss in operating production deployments. Not + setting this value (or setting it to 0) will allow ceph to calculate the + value based on the current setting of osd_pool_default_size. If the + replication count is 3, then the calculated min_size is 2. If the + replication count is 1, then the calcualted min_size is 1. For a POC + deployments using a single OSD, set osd_pool_default_size = 1. See + description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/ + Added CephPoolDefaultSize to set default replication size. Default value is 3. -- cgit 1.2.3-korg