diff options
author | leonwang <wanghui71@huawei.com> | 2018-01-10 03:44:46 +0000 |
---|---|---|
committer | leonwang <wanghui71@huawei.com> | 2018-01-10 08:43:38 +0000 |
commit | 64df7bc3bc70d49153409436b411fb327691a4d5 (patch) | |
tree | c078dda45831938f0268e66f774390b4079309c7 /ci/ansible/group_vars/ceph/osds.yml | |
parent | 0786fde30eba926b097617dea9ca4683ac2fa1b7 (diff) |
Push zealand version of opensds ansible as base-code of Stor4NFV
As we discussed on last meeting, the installer script of stor4nfv
will be based on opensds ansible, so in this patch I download the
first release (zealand) of opensds code and push the ansible script
into stor4nfv repo so that we don't need to modify opensds code.
Please be free to ask if you have any question.
Change-Id: I7b50729977b195fa64e8d9a09f415d9f3329d71f
Signed-off-by: leonwang <wanghui71@huawei.com>
Diffstat (limited to 'ci/ansible/group_vars/ceph/osds.yml')
-rw-r--r-- | ci/ansible/group_vars/ceph/osds.yml | 259 |
1 files changed, 259 insertions, 0 deletions
diff --git a/ci/ansible/group_vars/ceph/osds.yml b/ci/ansible/group_vars/ceph/osds.yml new file mode 100644 index 0000000..57cf581 --- /dev/null +++ b/ci/ansible/group_vars/ceph/osds.yml @@ -0,0 +1,259 @@ +---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override default vars defined in defaults/main.yml here,
+# but I would advice to use host or group vars instead
+
+#raw_journal_devices: "{{ dedicated_devices }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#raw_multi_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#dmcrytpt_journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#dmcrypt_dedicated_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+
+###########
+# GENERAL #
+###########
+
+# Even though OSD nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+
+####################
+# OSD CRUSH LOCATION
+####################
+
+# /!\
+#
+# BE EXTREMELY CAREFUL WITH THIS OPTION
+# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
+#
+# /!\
+#
+# It is probably best to keep this option to 'false' as the default
+# suggests it. This option should only be used while doing some complex
+# CRUSH map. It allows you to force a specific location for a set of OSDs.
+#
+# The following options will build a ceph.conf with OSD sections
+# Example:
+# [osd.X]
+# osd crush location = "root=location"
+#
+# This works with your inventory file
+# To match the following 'osd_crush_location' option the inventory must look like:
+#
+# [osds]
+# osd0 ceph_crush_root=foo ceph_crush_rack=bar
+
+#crush_location: false
+#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
+
+
+##############
+# CEPH OPTIONS
+##############
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
+
+devices:
+# - /dev/sda
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#devices: []
+
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# You can use this option with First and Forth and Fifth OSDS scenario.
+# Device discovery is based on the Ansible fact 'ansible_devices'
+# which reports all the devices on a system. If chosen all the disks
+# found will be passed to ceph-disk. You should not be worried on using
+# this option since ceph-disk has a built-in check which looks for empty devices.
+# Thus devices with existing partition tables will not be used.
+#
+#osd_auto_discovery: false
+
+# Encrypt your OSD device using dmcrypt
+# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
+#dmcrypt: "{{ True if dmcrytpt_journal_collocation or dmcrypt_dedicated_journal else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+
+# I. First scenario: collocated
+#
+# To enable this scenario do: osd_scenario: collocated
+#
+#
+# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
+# will be stored on the same device.
+#
+# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
+# on the same device. The device will get 2 partitions:
+# - One for 'data', called 'ceph data'
+# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sda*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
+#
+
+#osd_scenario: "{{ 'collocated' if journal_collocation or dmcrytpt_journal_collocation else 'non-collocated' if raw_multi_journal or dmcrypt_dedicated_journal else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#valid_osd_scenarios:
+# - collocated
+# - non-collocated
+# - lvm
+osd_scenario: collocated
+
+# II. Second scenario: non-collocated
+#
+# To enable this scenario do: osd_scenario: non-collocated
+#
+# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
+# will be stored on different devices:
+# - 'ceph data' will be stored on the device listed in 'devices'
+# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
+#
+# Let's take an example, imagine 'devices' was declared like this:
+#
+# devices:
+# - /dev/sda
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+#
+# And 'dedicated_devices' was declared like this:
+#
+# dedicated_devices:
+# - /dev/sdf
+# - /dev/sdf
+# - /dev/sdg
+# - /dev/sdg
+#
+# This will result in the following mapping:
+# - /dev/sda will have /dev/sdf1 as journal
+# - /dev/sdb will have /dev/sdf2 as a journal
+# - /dev/sdc will have /dev/sdg1 as a journal
+# - /dev/sdd will have /dev/sdg2 as a journal
+#
+#
+# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
+# on a dedicated device.
+#
+# So the following will happen:
+# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
+# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
+# 'block' will store all your actual data.
+# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
+# and one for RocksDB WAL, called 'block.wal'
+#
+# By default dedicated_devices will represent block.db
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sd*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
+# /dev/sdb: PTTYPE="gpt"
+# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
+# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
+#dedicated_devices: []
+
+
+# More device granularity for Bluestore
+#
+# ONLY if osd_objectstore: bluestore is enabled.
+#
+# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
+# If set, then you will have a dedicated partition on a specific device for block.wal.
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sd*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
+# /dev/sdb: PTTYPE="gpt"
+# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
+# /dev/sdc: PTTYPE="gpt"
+# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
+#bluestore_wal_devices: "{{ dedicated_devices }}"
+
+# III. Use ceph-volume to create OSDs from logical volumes.
+# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
+# when using lvm, not collocated journals.
+# lvm_volumes is a list of dictionaries. Each dictionary must contain a data, journal and vg_name
+# key. Any logical volume or logical group used must be a name and not a path.
+# data must be a logical volume
+# journal can be either a lv, device or partition. You can not use the same journal for many data lvs.
+# data_vg must be the volume group name of the data lv
+# journal_vg is optional and must be the volume group name of the journal lv, if applicable
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# journal: journal-lv1
+# journal_vg: vg2
+# - data: data-lv2
+# journal: /dev/sda
+# data_vg: vg1
+# - data: data-lv3
+# journal: /dev/sdb1
+# data_vg: vg2
+#lvm_volumes: []
+
+
+##########
+# DOCKER #
+##########
+
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
+#ceph_osd_docker_memory_limit: 1g
+#ceph_osd_docker_cpu_limit: 1
+
+# PREPARE DEVICE
+#
+# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
+#
+#ceph_osd_docker_devices: "{{ devices }}"
+#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
+
+# ACTIVATE DEVICE
+#
+#ceph_osd_docker_extra_env:
+#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
+
+
+###########
+# SYSTEMD #
+###########
+
+# ceph_osd_systemd_overrides will override the systemd settings
+# for the ceph-osd services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
|