summaryrefslogtreecommitdiffstats
path: root/type/cntt/software/charts/osh
diff options
context:
space:
mode:
authorKaspars Skels <kaspars.skels@att.com>2019-09-04 12:12:49 -0500
committerKaspars Skels <kaspars.skels@att.com>2019-09-04 13:45:29 -0500
commit18ca21eac0c569868250fdc9e652e41f76ae2fbd (patch)
tree09af5b178c07eba62ec4526a7389e43759017bca /type/cntt/software/charts/osh
parent8aee7b343082db8f65319e3358deb48528294bda (diff)
Move genesis from pod17-jump to pod17-node1
Adjust installation to comply with Pharos specification http://artifacts.opnfv.org/pharos/colorado/docs/specification/index.html The jump host is intended to serve as a node where installer runs. This then is used as a worker node for Jenkins. This patchset re-configures genesis to move to pod17-node1, and reduces compute nodes from 3 to 2. Change-Id: Ie5f65ad7cc0e6f688c913705babab6dc25925b4e Signed-off-by: Kaspars Skels <kaspars.skels@att.com>
Diffstat (limited to 'type/cntt/software/charts/osh')
-rw-r--r--type/cntt/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml82
1 files changed, 81 insertions, 1 deletions
diff --git a/type/cntt/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml b/type/cntt/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml
index 3f5bfba..18eeaad 100644
--- a/type/cntt/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml
+++ b/type/cntt/software/charts/osh/openstack-tenant-ceph/ceph-client.yaml
@@ -17,7 +17,87 @@ metadata:
data:
values:
conf:
+ ceph:
+ global:
+ osd_pool_default_size: 1
+
+ # todo: add docs explaining reduced redundancy
+ # we only have 2 computes, e.g. 2 OSDs
pool:
target:
- osd: 3
+ osd: 2
+ default:
+ crush_rule: same_host
+ spec:
+ # RBD pool
+ - name: rbd
+ application: rbd
+ replication: 1
+ percent_total_data: 10
+ # Cinder volumes pool
+ - name: cinder.volumes
+ application: cinder-volume
+ replication: 1
+ percent_total_data: 40
+ # RadosGW pools
+ - name: .rgw.root
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.control
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.data.root
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.gc
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.log
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.intent-log
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.meta
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.usage
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.users.keys
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.users.email
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.users.swift
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.users.uid
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.buckets.extra
+ application: rgw
+ replication: 1
+ percent_total_data: 0.1
+ - name: default.rgw.buckets.index
+ application: rgw
+ replication: 1
+ percent_total_data: 3
+ - name: default.rgw.buckets.data
+ application: rgw
+ replication: 1
+ percent_total_data: 30
...