aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/environments/scenario001-multinode.yaml1
-rw-r--r--ci/environments/scenario004-multinode.yaml1
-rw-r--r--environments/puppet-ceph-devel.yaml2
-rw-r--r--puppet/services/ceph-base.yaml1
-rw-r--r--puppet/services/ceph-mon.yaml6
-rw-r--r--releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml12
6 files changed, 21 insertions, 2 deletions
diff --git a/ci/environments/scenario001-multinode.yaml b/ci/environments/scenario001-multinode.yaml
index 437d7c37..ad4fa10f 100644
--- a/ci/environments/scenario001-multinode.yaml
+++ b/ci/environments/scenario001-multinode.yaml
@@ -101,6 +101,7 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ CephPoolDefaultSize: 1
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index a5c6fa31..e473d0bb 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -90,6 +90,7 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ CephPoolDefaultSize: 1
SwiftCeilometerPipelineEnabled: false
NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin, networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin'
BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/environments/puppet-ceph-devel.yaml b/environments/puppet-ceph-devel.yaml
index 9c8abbb4..8fc4bf29 100644
--- a/environments/puppet-ceph-devel.yaml
+++ b/environments/puppet-ceph-devel.yaml
@@ -20,3 +20,5 @@ parameter_defaults:
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
+ CephPoolDefaultSite: 1
+
diff --git a/puppet/services/ceph-base.yaml b/puppet/services/ceph-base.yaml
index 5f19af68..e12c55eb 100644
--- a/puppet/services/ceph-base.yaml
+++ b/puppet/services/ceph-base.yaml
@@ -99,7 +99,6 @@ outputs:
service_name: ceph_base
config_settings:
tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
- ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index c36f0537..28552301 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -78,6 +78,10 @@ parameters:
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
+ CephPoolDefaultSize:
+ description: default minimum replication for RBD copies
+ type: number
+ default: 3
resources:
CephBase:
@@ -102,7 +106,7 @@ outputs:
ceph::profile::params::mon_key: {get_param: CephMonKey}
ceph::profile::params::osd_pool_default_pg_num: 32
ceph::profile::params::osd_pool_default_pgp_num: 32
- ceph::profile::params::osd_pool_default_size: 3
+ ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:
diff --git a/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml
new file mode 100644
index 00000000..fc2cb48a
--- /dev/null
+++ b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Removed the hard coding of osd_pool_default_min_size. Setting this value
+ to 1 can result in data loss in operating production deployments. Not
+ setting this value (or setting it to 0) will allow ceph to calculate the
+ value based on the current setting of osd_pool_default_size. If the
+ replication count is 3, then the calculated min_size is 2. If the
+ replication count is 1, then the calcualted min_size is 1. For a POC
+ deployments using a single OSD, set osd_pool_default_size = 1. See
+ description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
+ Added CephPoolDefaultSize to set default replication size. Default value is 3.