From 799182f0c40730e41253dc5c861857d219291c3b Mon Sep 17 00:00:00 2001 From: "Sridhar K. N. Rao" Date: Fri, 18 Oct 2019 14:35:14 +0530 Subject: Site definition for Intel Pod-10 This patch adds site definition for Intel Pod-10. Updated publickeys of luc and trevor Updated site-definition Updated divingbell Modifying common parts - FOR TESTING ONLY - Will be removed Trying with only 1 disk (bootdisk) Trying with 2 disks - /dev/sda as bootdisk, /dev/sdb as datadisk Change ceph config from directory to /dev/sdb (OSD-data only) Change ceph config from directory to /dev/sdb (OSD-Journl too) Reduce footprint of osh-infra (reduce disk pressure) Move ceph to site specific manifests Fix pod10 host/hardware profiles to be site local Fix Nova/Neutron parts to be site local Fix glance cirros image pull Fix type to site layer names for moved files Rename pod10 hardware/host profiles Move ceph fully to /dev/sdb Disable SR-IOV configuration Optimize disk storage for Nova VMs (use root disk or 3T) Signed-off-by: Sridhar K. N. Rao Change-Id: I2160e56744917510d4627cefca32031904188f77 --- .../software/charts/ucp/ceph/ceph-client.yaml | 100 +++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 site/intel-pod10/software/charts/ucp/ceph/ceph-client.yaml (limited to 'site/intel-pod10/software/charts/ucp/ceph/ceph-client.yaml') diff --git a/site/intel-pod10/software/charts/ucp/ceph/ceph-client.yaml b/site/intel-pod10/software/charts/ucp/ceph/ceph-client.yaml new file mode 100644 index 0000000..e1e8ecf --- /dev/null +++ b/site/intel-pod10/software/charts/ucp/ceph/ceph-client.yaml @@ -0,0 +1,100 @@ +--- +# The purpose of this file is to define envrionment-specific parameters for the +# ceph client +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: ucp-ceph-client + layeringDefinition: + abstract: false + layer: site + parentSelector: + name: ucp-ceph-client-global + actions: + - method: merge + path: . + storagePolicy: cleartext +data: + values: + conf: + pool: + target: + # NEWSITE-CHANGEME: The number of OSDs per ceph node. Does not need to + # change if your deployment HW matches this site's HW. + osd: 1 + spec: + # RBD pool + - name: rbd + application: rbd + replication: 1 + percent_total_data: 40 + - name: cephfs_metadata + application: cephfs + replication: 1 + percent_total_data: 5 + - name: cephfs_data + application: cephfs + replication: 1 + percent_total_data: 10 + # RadosGW pools + - name: .rgw.root + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.control + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.data.root + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.gc + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.log + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.intent-log + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.meta + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.usage + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.users.keys + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.users.email + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.users.swift + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.users.uid + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.buckets.extra + application: rgw + replication: 1 + percent_total_data: 0.1 + - name: default.rgw.buckets.index + application: rgw + replication: 1 + percent_total_data: 3 + - name: default.rgw.buckets.data + application: rgw + replication: 1 + percent_total_data: 34.8 +... -- cgit 1.2.3-korg