summaryrefslogtreecommitdiffstats
path: root/src/ceph/doc/rados/configuration/pool-pg.conf
diff options
context:
space:
mode:
authorQiaowei Ren <qiaowei.ren@intel.com>2018-01-04 13:43:33 +0800
committerQiaowei Ren <qiaowei.ren@intel.com>2018-01-05 11:59:39 +0800
commit812ff6ca9fcd3e629e49d4328905f33eee8ca3f5 (patch)
tree04ece7b4da00d9d2f98093774594f4057ae561d4 /src/ceph/doc/rados/configuration/pool-pg.conf
parent15280273faafb77777eab341909a3f495cf248d9 (diff)
initial code repo
This patch creates initial code repo. For ceph, luminous stable release will be used for base code, and next changes and optimization for ceph will be added to it. For opensds, currently any changes can be upstreamed into original opensds repo (https://github.com/opensds/opensds), and so stor4nfv will directly clone opensds code to deploy stor4nfv environment. And the scripts for deployment based on ceph and opensds will be put into 'ci' directory. Change-Id: I46a32218884c75dda2936337604ff03c554648e4 Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
Diffstat (limited to 'src/ceph/doc/rados/configuration/pool-pg.conf')
-rw-r--r--src/ceph/doc/rados/configuration/pool-pg.conf20
1 files changed, 20 insertions, 0 deletions
diff --git a/src/ceph/doc/rados/configuration/pool-pg.conf b/src/ceph/doc/rados/configuration/pool-pg.conf
new file mode 100644
index 0000000..5f1b3b7
--- /dev/null
+++ b/src/ceph/doc/rados/configuration/pool-pg.conf
@@ -0,0 +1,20 @@
+[global]
+
+ # By default, Ceph makes 3 replicas of objects. If you want to make four
+ # copies of an object the default value--a primary copy and three replica
+ # copies--reset the default values as shown in 'osd pool default size'.
+ # If you want to allow Ceph to write a lesser number of copies in a degraded
+ # state, set 'osd pool default min size' to a number less than the
+ # 'osd pool default size' value.
+
+ osd pool default size = 4 # Write an object 4 times.
+ osd pool default min size = 1 # Allow writing one copy in a degraded state.
+
+ # Ensure you have a realistic number of placement groups. We recommend
+ # approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
+ # divided by the number of replicas (i.e., osd pool default size). So for
+ # 10 OSDs and osd pool default size = 4, we'd recommend approximately
+ # (100 * 10) / 4 = 250.
+
+ osd pool default pg num = 250
+ osd pool default pgp num = 250