diff options
Diffstat (limited to 'ci/ansible/group_vars')
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/ceph/all.yml | 1002 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/ceph/ceph.hosts | 16 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/ceph/ceph.yaml | 13 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/ceph/osds.yml | 518 | ||||
-rw-r--r-- | ci/ansible/group_vars/cinder/cinder.yaml | 31 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/common.yml | 73 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/lvm/lvm.yaml | 13 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/osdsdb.yml | 68 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/osdsdock.yml | 157 | ||||
-rw-r--r--[-rwxr-xr-x] | ci/ansible/group_vars/osdslet.yml | 39 |
10 files changed, 976 insertions, 954 deletions
diff --git a/ci/ansible/group_vars/ceph/all.yml b/ci/ansible/group_vars/ceph/all.yml index 1d49e6c..9594d33 100755..100644 --- a/ci/ansible/group_vars/ceph/all.yml +++ b/ci/ansible/group_vars/ceph/all.yml @@ -1,501 +1,501 @@ ---- -# Variables here are applicable to all host groups NOT roles - -# This sample file generated by generate_group_vars_sample.sh - -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - -# You can override vars by using host or group vars - -########### -# GENERAL # -########### - -###################################### -# Releases name to number dictionary # -###################################### -#ceph_release_num: -# dumpling: 0.67 -# emperor: 0.72 -# firefly: 0.80 -# giant: 0.87 -# hammer: 0.94 -# infernalis: 9 -# jewel: 10 -# kraken: 11 -# luminous: 12 -# mimic: 13 - -# Directory to fetch cluster fsid, keys etc... -#fetch_directory: fetch/ - -# The 'cluster' variable determines the name of the cluster. -# Changing the default value to something else means that you will -# need to change all the command line calls as well, for example if -# your cluster name is 'foo': -# "ceph health" will become "ceph --cluster foo health" -# -# An easier way to handle this is to use the environment variable CEPH_ARGS -# So run: "export CEPH_ARGS="--cluster foo" -# With that you will be able to run "ceph health" normally -#cluster: ceph - -# Inventory host group variables -#mon_group_name: mons -#osd_group_name: osds -#rgw_group_name: rgws -#mds_group_name: mdss -#nfs_group_name: nfss -#restapi_group_name: restapis -#rbdmirror_group_name: rbdmirrors -#client_group_name: clients -#iscsi_gw_group_name: iscsi-gws -#mgr_group_name: mgrs - -# If check_firewall is true, then ansible will try to determine if the -# Ceph ports are blocked by a firewall. If the machine running ansible -# cannot reach the Ceph ports for some other reason, you may need or -# want to set this to False to skip those checks. -#check_firewall: False - - -############ -# PACKAGES # -############ -#debian_package_dependencies: -# - python-pycurl -# - hdparm - -#centos_package_dependencies: -# - python-pycurl -# - hdparm -# - epel-release -# - python-setuptools -# - libselinux-python - -#redhat_package_dependencies: -# - python-pycurl -# - hdparm -# - python-setuptools - -# Whether or not to install the ceph-test package. -#ceph_test: false - -# Enable the ntp service by default to avoid clock skew on -# ceph nodes -#ntp_service_enabled: true - -# Set uid/gid to default '64045' for bootstrap directories. -# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. -# These values have to be set according to the base OS used by the container image, NOT the host. -#bootstrap_dirs_owner: "64045" -#bootstrap_dirs_group: "64045" - -# This variable determines if ceph packages can be updated. If False, the -# package resources will use "state=present". If True, they will use -# "state=latest". -#upgrade_ceph_packages: False - -#ceph_use_distro_backports: false # DEBIAN ONLY - - -########### -# INSTALL # -########### -#ceph_rhcs_cdn_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_repository_type: "{{ 'cdn' if ceph_rhcs_cdn_install else 'iso' if ceph_rhcs_iso_install else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_rhcs_iso_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_rhcs: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_stable: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_dev: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_stable_uca: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_custom: False # backward compatibility with stable-2.2, will disappear in stable 3.1 - -# ORIGIN SOURCE -# -# Choose between: -# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs' or 'dev' -# - 'distro' means that no separate repo file will be added -# you will get whatever version of Ceph is included in your Linux distro. -# 'local' means that the ceph binaries will be copied over from the local machine -#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#valid_ceph_origins: -# - repository -# - distro -# - local -ceph_origin: repository -ceph_repository: community - -#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#valid_ceph_repository: -# - community -# - rhcs -# - dev -# - uca -# - custom - - -# REPOSITORY: COMMUNITY VERSION -# -# Enabled when ceph_repository == 'community' -# -#ceph_mirror: http://download.ceph.com -#ceph_stable_key: https://download.ceph.com/keys/release.asc -ceph_stable_release: luminous -#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" - -#nfs_ganesha_stable: true # use stable repos for nfs-ganesha -#nfs_ganesha_stable_branch: V2.5-stable -#nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}" - - -# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions -# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ -# for more info read: https://github.com/ceph/ceph-ansible/issues/305 -#ceph_stable_distro_source: "{{ ansible_lsb.codename }}" - -# This option is needed for _both_ stable and dev version, so please always fill the right version -# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/ -#ceph_stable_redhat_distro: el7 - - -# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 1.3) -# -# Enabled when ceph_repository == 'rhcs' -# -# This version is only supported on RHEL >= 7.1 -# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel -# packages natively. The RHEL 7.1 kernel packages are more stable and secure than -# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL -# 7.1 or later if you want to use the kernel RBD client. -# -# The CephFS kernel client is undergoing rapid development upstream, and we do -# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this -# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS -# on RHEL 7. -# -# -#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}" -#valid_ceph_repository_type: -# - cdn -# - iso -#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}" -#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}" -#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content - -# RHCS installation in Debian systems -#ceph_rhcs_cdn_debian_repo: https://customername:customerpasswd@rhcs.download.redhat.com -#ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use /3-updates/ - - -# REPOSITORY: UBUNTU CLOUD ARCHIVE -# -# Enabled when ceph_repository == 'uca' -# -# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive -# usually has newer Ceph releases than the normal distro repository. -# -# -#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" -#ceph_stable_openstack_release_uca: liberty -#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" - - -# REPOSITORY: DEV -# -# Enabled when ceph_repository == 'dev' -# -#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack -#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) - -#nfs_ganesha_dev: false # use development repos for nfs-ganesha - -# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman -# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous -#nfs_ganesha_flavor: "ceph_master" - -#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways - - -# REPOSITORY: CUSTOM -# -# Enabled when ceph_repository == 'custom' -# -# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be -# a URL to the .repo file to be installed on the targets. For deb, -# ceph_custom_repo should be the URL to the repo base. -# -#ceph_custom_repo: https://server.domain.com/ceph-custom-repo - - -# ORIGIN: LOCAL CEPH INSTALLATION -# -# Enabled when ceph_repository == 'local' -# -# Path to DESTDIR of the ceph install -#ceph_installation_dir: "/path/to/ceph_installation/" -# Whether or not to use installer script rundep_installer.sh -# This script takes in rundep and installs the packages line by line onto the machine -# If this is set to false then it is assumed that the machine ceph is being copied onto will already have -# all runtime dependencies installed -#use_installer: false -# Root directory for ceph-ansible -#ansible_dir: "/path/to/ceph-ansible" - - -###################### -# CEPH CONFIGURATION # -###################### - -## Ceph options -# -# Each cluster requires a unique, consistent filesystem ID. By -# default, the playbook generates one for you and stores it in a file -# in `fetch_directory`. If you want to customize how the fsid is -# generated, you may find it useful to disable fsid generation to -# avoid cluttering up your ansible repo. If you set `generate_fsid` to -# false, you *must* generate `fsid` in another way. -# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT -#fsid: "{{ cluster_uuid.stdout }}" -#generate_fsid: true - -#ceph_conf_key_directory: /etc/ceph - -#cephx: true - -## Client options -# -#rbd_cache: "true" -#rbd_cache_writethrough_until_flush: "true" -#rbd_concurrent_management_ops: 20 - -#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions - -# Permissions for the rbd_client_log_path and -# rbd_client_admin_socket_path. Depending on your use case for Ceph -# you may want to change these values. The default, which is used if -# any of the variables are unset or set to a false value (like `null` -# or `false`) is to automatically determine what is appropriate for -# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 -# for infernalis releases, and root:root and 1777 for pre-infernalis -# releases. -# -# For other use cases, including running Ceph with OpenStack, you'll -# want to set these differently: -# -# For OpenStack on RHEL, you'll want: -# rbd_client_directory_owner: "qemu" -# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) -# rbd_client_directory_mode: "0755" -# -# For OpenStack on Ubuntu or Debian, set: -# rbd_client_directory_owner: "libvirt-qemu" -# rbd_client_directory_group: "kvm" -# rbd_client_directory_mode: "0755" -# -# If you set rbd_client_directory_mode, you must use a string (e.g., -# 'rbd_client_directory_mode: "0755"', *not* -# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode -# must be in octal or symbolic form -#rbd_client_directory_owner: null -#rbd_client_directory_group: null -#rbd_client_directory_mode: null - -#rbd_client_log_path: /var/log/ceph -#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor -#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor - -## Monitor options -# -# You must define either monitor_interface, monitor_address or monitor_address_block. -# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). -# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. -# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. -# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6) -monitor_interface: ens3 -#monitor_address: 0.0.0.0 -#monitor_address_block: subnet -# set to either ipv4 or ipv6, whichever your network is using -#ip_version: ipv4 -#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf - -## OSD options -# -journal_size: 100 # OSD journal size in MB -public_network: 100.64.128.40/24 -cluster_network: "{{ public_network }}" -#osd_mkfs_type: xfs -#osd_mkfs_options_xfs: -f -i size=2048 -#osd_mount_options_xfs: noatime,largeio,inode64,swalloc -#osd_objectstore: filestore - -# xattrs. by default, 'filestore xattr use omap' is set to 'true' if -# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can -# be set to 'true' or 'false' to explicitly override those -# defaults. Leave it 'null' to use the default for your chosen mkfs -# type. -#filestore_xattr_use_omap: null - -## MDS options -# -#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf -#mds_allow_multimds: false -#mds_max_mds: 3 - -## Rados Gateway options -# -#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls -#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names -#radosgw_civetweb_port: 8080 -#radosgw_civetweb_num_threads: 100 -# For additional civetweb configuration options available such as SSL, logging, -# keepalive, and timeout settings, please see the civetweb docs at -# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md -#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}" -# You must define either radosgw_interface, radosgw_address. -# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). -# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable. -# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined. -# To use an IPv6 address, use the radosgw_address setting instead (and set ip_version to ipv6) -#radosgw_interface: interface -#radosgw_address: "{{ '0.0.0.0' if rgw_containerized_deployment else 'address' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#radosgw_address_block: subnet -#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/ -# Rados Gateway options -#email_address: foo@bar.com - -## REST API options -# -#restapi_interface: "{{ monitor_interface }}" -#restapi_address: "{{ monitor_address }}" -#restapi_port: 5000 - -## Testing mode -# enable this mode _only_ when you have a single node -# if you don't want it keep the option commented -#common_single_host_mode: true - -## Handlers - restarting daemons after a config change -# if for whatever reasons the content of your ceph configuration changes -# ceph daemons will be restarted as well. At the moment, we can not detect -# which config option changed so all the daemons will be restarted. Although -# this restart will be serialized for each node, in between a health check -# will be performed so we make sure we don't move to the next node until -# ceph is not healthy -# Obviously between the checks (for monitors to be in quorum and for osd's pgs -# to be clean) we have to wait. These retries and delays can be configurable -# for both monitors and osds. -# -# Monitor handler checks -#handler_health_mon_check_retries: 5 -#handler_health_mon_check_delay: 10 -# -# OSD handler checks -#handler_health_osd_check_retries: 40 -#handler_health_osd_check_delay: 30 -#handler_health_osd_check: true -# -# MDS handler checks -#handler_health_mds_check_retries: 5 -#handler_health_mds_check_delay: 10 -# -# RGW handler checks -#handler_health_rgw_check_retries: 5 -#handler_health_rgw_check_delay: 10 - -# NFS handler checks -#handler_health_nfs_check_retries: 5 -#handler_health_nfs_check_delay: 10 - -# RBD MIRROR handler checks -#handler_health_rbd_mirror_check_retries: 5 -#handler_health_rbd_mirror_check_delay: 10 - -# MGR handler checks -#handler_health_mgr_check_retries: 5 -#handler_health_mgr_check_delay: 10 - -############### -# NFS-GANESHA # -############### - -# Confiure the type of NFS gatway access. At least one must be enabled for an -# NFS role to be useful -# -# Set this to true to enable File access via NFS. Requires an MDS role. -#nfs_file_gw: false -# Set this to true to enable Object access via NFS. Requires an RGW role. -#nfs_obj_gw: true - -################### -# CONFIG OVERRIDE # -################### - -# Ceph configuration file override. -# This allows you to specify more configuration options -# using an INI style format. -# The following sections are supported: [global], [mon], [osd], [mds], [rgw] -# -# Example: -# ceph_conf_overrides: -# global: -# foo: 1234 -# bar: 5678 -# -#ceph_conf_overrides: {} - - -############# -# OS TUNING # -############# - -#disable_transparent_hugepage: true -#os_tuning_params: -# - { name: kernel.pid_max, value: 4194303 } -# - { name: fs.file-max, value: 26234859 } -# - { name: vm.zone_reclaim_mode, value: 0 } -# - { name: vm.swappiness, value: 10 } -# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } - -# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES -# Set this to a byte value (e.g. 134217728) -# A value of 0 will leave the package default. -#ceph_tcmalloc_max_total_thread_cache: 0 - - -########## -# DOCKER # -########## -#docker_exec_cmd: -#docker: false -#ceph_docker_image: "ceph/daemon" -#ceph_docker_image_tag: latest -#ceph_docker_registry: docker.io -#ceph_docker_enable_centos_extra_repo: false -#ceph_docker_on_openstack: false -#ceph_mon_docker_interface: "{{ monitor_interface }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#ceph_mon_docker_subnet: "{{ public_network }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#mon_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#osd_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#mds_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#rgw_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#containerized_deployment: "{{ True if mon_containerized_deployment or osd_containerized_deployment or mds_containerized_deployment or rgw_containerized_deployment else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 - - -############ -# KV store # -############ -#containerized_deployment_with_kv: false -#mon_containerized_default_ceph_conf_with_kv: false -#kv_type: etcd -#kv_endpoint: 127.0.0.1 -#kv_port: 2379 - - -# this is only here for usage with the rolling_update.yml playbook -# do not ever change this here -#rolling_update: false - - +---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+######################################
+# Releases name to number dictionary #
+######################################
+#ceph_release_num:
+# dumpling: 0.67
+# emperor: 0.72
+# firefly: 0.80
+# giant: 0.87
+# hammer: 0.94
+# infernalis: 9
+# jewel: 10
+# kraken: 11
+# luminous: 12
+# mimic: 13
+
+# Directory to fetch cluster fsid, keys etc...
+#fetch_directory: fetch/
+
+# The 'cluster' variable determines the name of the cluster.
+# Changing the default value to something else means that you will
+# need to change all the command line calls as well, for example if
+# your cluster name is 'foo':
+# "ceph health" will become "ceph --cluster foo health"
+#
+# An easier way to handle this is to use the environment variable CEPH_ARGS
+# So run: "export CEPH_ARGS="--cluster foo"
+# With that you will be able to run "ceph health" normally
+#cluster: ceph
+
+# Inventory host group variables
+#mon_group_name: mons
+#osd_group_name: osds
+#rgw_group_name: rgws
+#mds_group_name: mdss
+#nfs_group_name: nfss
+#restapi_group_name: restapis
+#rbdmirror_group_name: rbdmirrors
+#client_group_name: clients
+#iscsi_gw_group_name: iscsi-gws
+#mgr_group_name: mgrs
+
+# If check_firewall is true, then ansible will try to determine if the
+# Ceph ports are blocked by a firewall. If the machine running ansible
+# cannot reach the Ceph ports for some other reason, you may need or
+# want to set this to False to skip those checks.
+#check_firewall: False
+
+
+############
+# PACKAGES #
+############
+#debian_package_dependencies:
+# - python-pycurl
+# - hdparm
+
+#centos_package_dependencies:
+# - python-pycurl
+# - hdparm
+# - epel-release
+# - python-setuptools
+# - libselinux-python
+
+#redhat_package_dependencies:
+# - python-pycurl
+# - hdparm
+# - python-setuptools
+
+# Whether or not to install the ceph-test package.
+#ceph_test: false
+
+# Enable the ntp service by default to avoid clock skew on
+# ceph nodes
+#ntp_service_enabled: true
+
+# Set uid/gid to default '64045' for bootstrap directories.
+# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
+# These values have to be set according to the base OS used by the container image, NOT the host.
+#bootstrap_dirs_owner: "64045"
+#bootstrap_dirs_group: "64045"
+
+# This variable determines if ceph packages can be updated. If False, the
+# package resources will use "state=present". If True, they will use
+# "state=latest".
+#upgrade_ceph_packages: False
+
+#ceph_use_distro_backports: false # DEBIAN ONLY
+
+
+###########
+# INSTALL #
+###########
+#ceph_rhcs_cdn_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_repository_type: "{{ 'cdn' if ceph_rhcs_cdn_install else 'iso' if ceph_rhcs_iso_install else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_rhcs_iso_install: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_rhcs: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_stable: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_dev: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_stable_uca: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_custom: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+# ORIGIN SOURCE
+#
+# Choose between:
+# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs' or 'dev'
+# - 'distro' means that no separate repo file will be added
+# you will get whatever version of Ceph is included in your Linux distro.
+# 'local' means that the ceph binaries will be copied over from the local machine
+#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#valid_ceph_origins:
+# - repository
+# - distro
+# - local
+ceph_origin: repository
+ceph_repository: community
+
+#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#valid_ceph_repository:
+# - community
+# - rhcs
+# - dev
+# - uca
+# - custom
+
+
+# REPOSITORY: COMMUNITY VERSION
+#
+# Enabled when ceph_repository == 'community'
+#
+#ceph_mirror: http://download.ceph.com
+#ceph_stable_key: https://download.ceph.com/keys/release.asc
+ceph_stable_release: luminous
+#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
+
+#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
+#nfs_ganesha_stable_branch: V2.5-stable
+#nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}"
+
+
+# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
+# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
+# for more info read: https://github.com/ceph/ceph-ansible/issues/305
+#ceph_stable_distro_source: "{{ ansible_lsb.codename }}"
+
+# This option is needed for _both_ stable and dev version, so please always fill the right version
+# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/
+#ceph_stable_redhat_distro: el7
+
+
+# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 1.3)
+#
+# Enabled when ceph_repository == 'rhcs'
+#
+# This version is only supported on RHEL >= 7.1
+# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
+# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
+# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
+# 7.1 or later if you want to use the kernel RBD client.
+#
+# The CephFS kernel client is undergoing rapid development upstream, and we do
+# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
+# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
+# on RHEL 7.
+#
+#
+#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
+#valid_ceph_repository_type:
+# - cdn
+# - iso
+#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
+#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}"
+#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content
+
+# RHCS installation in Debian systems
+#ceph_rhcs_cdn_debian_repo: https://customername:customerpasswd@rhcs.download.redhat.com
+#ceph_rhcs_cdn_debian_repo_version: "/3-release/" # for GA, later for updates use /3-updates/
+
+
+# REPOSITORY: UBUNTU CLOUD ARCHIVE
+#
+# Enabled when ceph_repository == 'uca'
+#
+# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
+# usually has newer Ceph releases than the normal distro repository.
+#
+#
+#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+#ceph_stable_openstack_release_uca: liberty
+#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
+
+
+# REPOSITORY: DEV
+#
+# Enabled when ceph_repository == 'dev'
+#
+#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
+#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
+
+#nfs_ganesha_dev: false # use development repos for nfs-ganesha
+
+# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
+# flavors so far include: ceph_master, ceph_jewel, ceph_kraken, ceph_luminous
+#nfs_ganesha_flavor: "ceph_master"
+
+#ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways
+
+
+# REPOSITORY: CUSTOM
+#
+# Enabled when ceph_repository == 'custom'
+#
+# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
+# a URL to the .repo file to be installed on the targets. For deb,
+# ceph_custom_repo should be the URL to the repo base.
+#
+#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
+
+
+# ORIGIN: LOCAL CEPH INSTALLATION
+#
+# Enabled when ceph_repository == 'local'
+#
+# Path to DESTDIR of the ceph install
+#ceph_installation_dir: "/path/to/ceph_installation/"
+# Whether or not to use installer script rundep_installer.sh
+# This script takes in rundep and installs the packages line by line onto the machine
+# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
+# all runtime dependencies installed
+#use_installer: false
+# Root directory for ceph-ansible
+#ansible_dir: "/path/to/ceph-ansible"
+
+
+######################
+# CEPH CONFIGURATION #
+######################
+
+## Ceph options
+#
+# Each cluster requires a unique, consistent filesystem ID. By
+# default, the playbook generates one for you and stores it in a file
+# in `fetch_directory`. If you want to customize how the fsid is
+# generated, you may find it useful to disable fsid generation to
+# avoid cluttering up your ansible repo. If you set `generate_fsid` to
+# false, you *must* generate `fsid` in another way.
+# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
+#fsid: "{{ cluster_uuid.stdout }}"
+#generate_fsid: true
+
+#ceph_conf_key_directory: /etc/ceph
+
+#cephx: true
+
+## Client options
+#
+#rbd_cache: "true"
+#rbd_cache_writethrough_until_flush: "true"
+#rbd_concurrent_management_ops: 20
+
+#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
+
+# Permissions for the rbd_client_log_path and
+# rbd_client_admin_socket_path. Depending on your use case for Ceph
+# you may want to change these values. The default, which is used if
+# any of the variables are unset or set to a false value (like `null`
+# or `false`) is to automatically determine what is appropriate for
+# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
+# for infernalis releases, and root:root and 1777 for pre-infernalis
+# releases.
+#
+# For other use cases, including running Ceph with OpenStack, you'll
+# want to set these differently:
+#
+# For OpenStack on RHEL, you'll want:
+# rbd_client_directory_owner: "qemu"
+# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
+# rbd_client_directory_mode: "0755"
+#
+# For OpenStack on Ubuntu or Debian, set:
+# rbd_client_directory_owner: "libvirt-qemu"
+# rbd_client_directory_group: "kvm"
+# rbd_client_directory_mode: "0755"
+#
+# If you set rbd_client_directory_mode, you must use a string (e.g.,
+# 'rbd_client_directory_mode: "0755"', *not*
+# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
+# must be in octal or symbolic form
+#rbd_client_directory_owner: null
+#rbd_client_directory_group: null
+#rbd_client_directory_mode: null
+
+#rbd_client_log_path: /var/log/ceph
+#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
+#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
+
+## Monitor options
+#
+# You must define either monitor_interface, monitor_address or monitor_address_block.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
+# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
+# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6)
+monitor_interface: ens3
+#monitor_address: 0.0.0.0
+#monitor_address_block: subnet
+# set to either ipv4 or ipv6, whichever your network is using
+#ip_version: ipv4
+#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
+
+## OSD options
+#
+journal_size: 100 # OSD journal size in MB
+public_network: 100.64.128.40/24
+cluster_network: "{{ public_network }}"
+#osd_mkfs_type: xfs
+#osd_mkfs_options_xfs: -f -i size=2048
+#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
+#osd_objectstore: filestore
+
+# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
+# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
+# be set to 'true' or 'false' to explicitly override those
+# defaults. Leave it 'null' to use the default for your chosen mkfs
+# type.
+#filestore_xattr_use_omap: null
+
+## MDS options
+#
+#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
+#mds_allow_multimds: false
+#mds_max_mds: 3
+
+## Rados Gateway options
+#
+#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
+#radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names
+#radosgw_civetweb_port: 8080
+#radosgw_civetweb_num_threads: 100
+# For additional civetweb configuration options available such as SSL, logging,
+# keepalive, and timeout settings, please see the civetweb docs at
+# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md
+#radosgw_civetweb_options: "num_threads={{ radosgw_civetweb_num_threads }}"
+# You must define either radosgw_interface, radosgw_address.
+# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
+# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable.
+# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined.
+# To use an IPv6 address, use the radosgw_address setting instead (and set ip_version to ipv6)
+#radosgw_interface: interface
+#radosgw_address: "{{ '0.0.0.0' if rgw_containerized_deployment else 'address' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#radosgw_address_block: subnet
+#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
+# Rados Gateway options
+#email_address: foo@bar.com
+
+## REST API options
+#
+#restapi_interface: "{{ monitor_interface }}"
+#restapi_address: "{{ monitor_address }}"
+#restapi_port: 5000
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+# if you don't want it keep the option commented
+#common_single_host_mode: true
+
+## Handlers - restarting daemons after a config change
+# if for whatever reasons the content of your ceph configuration changes
+# ceph daemons will be restarted as well. At the moment, we can not detect
+# which config option changed so all the daemons will be restarted. Although
+# this restart will be serialized for each node, in between a health check
+# will be performed so we make sure we don't move to the next node until
+# ceph is not healthy
+# Obviously between the checks (for monitors to be in quorum and for osd's pgs
+# to be clean) we have to wait. These retries and delays can be configurable
+# for both monitors and osds.
+#
+# Monitor handler checks
+#handler_health_mon_check_retries: 5
+#handler_health_mon_check_delay: 10
+#
+# OSD handler checks
+#handler_health_osd_check_retries: 40
+#handler_health_osd_check_delay: 30
+#handler_health_osd_check: true
+#
+# MDS handler checks
+#handler_health_mds_check_retries: 5
+#handler_health_mds_check_delay: 10
+#
+# RGW handler checks
+#handler_health_rgw_check_retries: 5
+#handler_health_rgw_check_delay: 10
+
+# NFS handler checks
+#handler_health_nfs_check_retries: 5
+#handler_health_nfs_check_delay: 10
+
+# RBD MIRROR handler checks
+#handler_health_rbd_mirror_check_retries: 5
+#handler_health_rbd_mirror_check_delay: 10
+
+# MGR handler checks
+#handler_health_mgr_check_retries: 5
+#handler_health_mgr_check_delay: 10
+
+###############
+# NFS-GANESHA #
+###############
+
+# Confiure the type of NFS gatway access. At least one must be enabled for an
+# NFS role to be useful
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+#nfs_file_gw: false
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: true
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ceph configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+#
+#ceph_conf_overrides: {}
+
+
+#############
+# OS TUNING #
+#############
+
+#disable_transparent_hugepage: true
+#os_tuning_params:
+# - { name: kernel.pid_max, value: 4194303 }
+# - { name: fs.file-max, value: 26234859 }
+# - { name: vm.zone_reclaim_mode, value: 0 }
+# - { name: vm.swappiness, value: 10 }
+# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
+
+# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES
+# Set this to a byte value (e.g. 134217728)
+# A value of 0 will leave the package default.
+#ceph_tcmalloc_max_total_thread_cache: 0
+
+
+##########
+# DOCKER #
+##########
+#docker_exec_cmd:
+#docker: false
+#ceph_docker_image: "ceph/daemon"
+#ceph_docker_image_tag: latest
+#ceph_docker_registry: docker.io
+#ceph_docker_enable_centos_extra_repo: false
+#ceph_docker_on_openstack: false
+#ceph_mon_docker_interface: "{{ monitor_interface }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#ceph_mon_docker_subnet: "{{ public_network }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#mon_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#osd_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#mds_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#rgw_containerized_deployment: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#containerized_deployment: "{{ True if mon_containerized_deployment or osd_containerized_deployment or mds_containerized_deployment or rgw_containerized_deployment else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+
+############
+# KV store #
+############
+#containerized_deployment_with_kv: false
+#mon_containerized_default_ceph_conf_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#kv_port: 2379
+
+
+# this is only here for usage with the rolling_update.yml playbook
+# do not ever change this here
+#rolling_update: false
+
+
diff --git a/ci/ansible/group_vars/ceph/ceph.hosts b/ci/ansible/group_vars/ceph/ceph.hosts index 42f5da8..34a7b26 100755..100644 --- a/ci/ansible/group_vars/ceph/ceph.hosts +++ b/ci/ansible/group_vars/ceph/ceph.hosts @@ -1,8 +1,8 @@ -[mons] -localhost ansible_connection=local - -[osds] -localhost ansible_connection=local - -[mgrs] -localhost ansible_connection=local +[mons]
+localhost ansible_connection=local
+
+[osds]
+localhost ansible_connection=local
+
+[mgrs]
+localhost ansible_connection=local
diff --git a/ci/ansible/group_vars/ceph/ceph.yaml b/ci/ansible/group_vars/ceph/ceph.yaml index 8272cd1..5e70724 100755..100644 --- a/ci/ansible/group_vars/ceph/ceph.yaml +++ b/ci/ansible/group_vars/ceph/ceph.yaml @@ -1,5 +1,8 @@ -configFile: /etc/ceph/ceph.conf -pool: - "rbd": # change pool name same to ceph pool, but don't change it if you choose lvm backend - diskType: SSD - AZ: default
\ No newline at end of file +configFile: /etc/ceph/ceph.conf
+pool:
+ "rbd": # change pool name same to ceph pool, but don't change it if you choose lvm backend
+ diskType: SSD
+ AZ: default
+ accessProtocol: rbd
+ thinProvisioned: true
+ compressed: false
diff --git a/ci/ansible/group_vars/ceph/osds.yml b/ci/ansible/group_vars/ceph/osds.yml index 1f12204..57cf581 100755..100644 --- a/ci/ansible/group_vars/ceph/osds.yml +++ b/ci/ansible/group_vars/ceph/osds.yml @@ -1,259 +1,259 @@ ---- -# Variables here are applicable to all host groups NOT roles - -# This sample file generated by generate_group_vars_sample.sh - -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - -# You can override default vars defined in defaults/main.yml here, -# but I would advice to use host or group vars instead - -#raw_journal_devices: "{{ dedicated_devices }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#raw_multi_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#dmcrytpt_journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1 -#dmcrypt_dedicated_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1 - - -########### -# GENERAL # -########### - -# Even though OSD nodes should not have the admin key -# at their disposal, some people might want to have it -# distributed on OSD nodes. Setting 'copy_admin_key' to 'true' -# will copy the admin key to the /etc/ceph/ directory -#copy_admin_key: false - - -#################### -# OSD CRUSH LOCATION -#################### - -# /!\ -# -# BE EXTREMELY CAREFUL WITH THIS OPTION -# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING -# -# /!\ -# -# It is probably best to keep this option to 'false' as the default -# suggests it. This option should only be used while doing some complex -# CRUSH map. It allows you to force a specific location for a set of OSDs. -# -# The following options will build a ceph.conf with OSD sections -# Example: -# [osd.X] -# osd crush location = "root=location" -# -# This works with your inventory file -# To match the following 'osd_crush_location' option the inventory must look like: -# -# [osds] -# osd0 ceph_crush_root=foo ceph_crush_rack=bar - -#crush_location: false -#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\"" - - -############## -# CEPH OPTIONS -############## - -# Devices to be used as OSDs -# You can pre-provision disks that are not present yet. -# Ansible will just skip them. Newly added disk will be -# automatically configured during the next run. -# - - -# Declare devices to be used as OSDs -# All scenario(except 3rd) inherit from the following device declaration - -devices: -# - /dev/sda -# - /dev/sdc -# - /dev/sdd -# - /dev/sde - -#devices: [] - - -#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. -# You can use this option with First and Forth and Fifth OSDS scenario. -# Device discovery is based on the Ansible fact 'ansible_devices' -# which reports all the devices on a system. If chosen all the disks -# found will be passed to ceph-disk. You should not be worried on using -# this option since ceph-disk has a built-in check which looks for empty devices. -# Thus devices with existing partition tables will not be used. -# -#osd_auto_discovery: false - -# Encrypt your OSD device using dmcrypt -# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted -#dmcrypt: "{{ True if dmcrytpt_journal_collocation or dmcrypt_dedicated_journal else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 - - -# I. First scenario: collocated -# -# To enable this scenario do: osd_scenario: collocated -# -# -# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions -# will be stored on the same device. -# -# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored -# on the same device. The device will get 2 partitions: -# - One for 'data', called 'ceph data' -# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block' -# -# Example of what you will get: -# [root@ceph-osd0 ~]# blkid /dev/sda* -# /dev/sda: PTTYPE="gpt" -# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" -# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" -# - -#osd_scenario: "{{ 'collocated' if journal_collocation or dmcrytpt_journal_collocation else 'non-collocated' if raw_multi_journal or dmcrypt_dedicated_journal else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 -#valid_osd_scenarios: -# - collocated -# - non-collocated -# - lvm -osd_scenario: collocated - -# II. Second scenario: non-collocated -# -# To enable this scenario do: osd_scenario: non-collocated -# -# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions -# will be stored on different devices: -# - 'ceph data' will be stored on the device listed in 'devices' -# - 'ceph journal' will be stored on the device listed in 'dedicated_devices' -# -# Let's take an example, imagine 'devices' was declared like this: -# -# devices: -# - /dev/sda -# - /dev/sdb -# - /dev/sdc -# - /dev/sdd -# -# And 'dedicated_devices' was declared like this: -# -# dedicated_devices: -# - /dev/sdf -# - /dev/sdf -# - /dev/sdg -# - /dev/sdg -# -# This will result in the following mapping: -# - /dev/sda will have /dev/sdf1 as journal -# - /dev/sdb will have /dev/sdf2 as a journal -# - /dev/sdc will have /dev/sdg1 as a journal -# - /dev/sdd will have /dev/sdg2 as a journal -# -# -# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored -# on a dedicated device. -# -# So the following will happen: -# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. -# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. -# 'block' will store all your actual data. -# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db' -# and one for RocksDB WAL, called 'block.wal' -# -# By default dedicated_devices will represent block.db -# -# Example of what you will get: -# [root@ceph-osd0 ~]# blkid /dev/sd* -# /dev/sda: PTTYPE="gpt" -# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0" -# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" -# /dev/sdb: PTTYPE="gpt" -# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" -# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" -#dedicated_devices: [] - - -# More device granularity for Bluestore -# -# ONLY if osd_objectstore: bluestore is enabled. -# -# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'. -# If set, then you will have a dedicated partition on a specific device for block.wal. -# -# Example of what you will get: -# [root@ceph-osd0 ~]# blkid /dev/sd* -# /dev/sda: PTTYPE="gpt" -# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669" -# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699" -# /dev/sdb: PTTYPE="gpt" -# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" -# /dev/sdc: PTTYPE="gpt" -# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" -#bluestore_wal_devices: "{{ dedicated_devices }}" - -# III. Use ceph-volume to create OSDs from logical volumes. -# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals -# when using lvm, not collocated journals. -# lvm_volumes is a list of dictionaries. Each dictionary must contain a data, journal and vg_name -# key. Any logical volume or logical group used must be a name and not a path. -# data must be a logical volume -# journal can be either a lv, device or partition. You can not use the same journal for many data lvs. -# data_vg must be the volume group name of the data lv -# journal_vg is optional and must be the volume group name of the journal lv, if applicable -# For example: -# lvm_volumes: -# - data: data-lv1 -# data_vg: vg1 -# journal: journal-lv1 -# journal_vg: vg2 -# - data: data-lv2 -# journal: /dev/sda -# data_vg: vg1 -# - data: data-lv3 -# journal: /dev/sdb1 -# data_vg: vg2 -#lvm_volumes: [] - - -########## -# DOCKER # -########## - -#ceph_config_keys: [] # DON'T TOUCH ME - -# Resource limitation -# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints -# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations -# These options can be passed using the 'ceph_osd_docker_extra_env' variable. -#ceph_osd_docker_memory_limit: 1g -#ceph_osd_docker_cpu_limit: 1 - -# PREPARE DEVICE -# -# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above -# -#ceph_osd_docker_devices: "{{ devices }}" -#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} - -# ACTIVATE DEVICE -# -#ceph_osd_docker_extra_env: -#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command - - -########### -# SYSTEMD # -########### - -# ceph_osd_systemd_overrides will override the systemd settings -# for the ceph-osd services. -# For example,to set "PrivateDevices=false" you can specify: -#ceph_osd_systemd_overrides: -# Service: -# PrivateDevices: False - +---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override default vars defined in defaults/main.yml here,
+# but I would advice to use host or group vars instead
+
+#raw_journal_devices: "{{ dedicated_devices }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#raw_multi_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#dmcrytpt_journal_collocation: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+#dmcrypt_dedicated_journal: False # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+
+###########
+# GENERAL #
+###########
+
+# Even though OSD nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+
+####################
+# OSD CRUSH LOCATION
+####################
+
+# /!\
+#
+# BE EXTREMELY CAREFUL WITH THIS OPTION
+# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
+#
+# /!\
+#
+# It is probably best to keep this option to 'false' as the default
+# suggests it. This option should only be used while doing some complex
+# CRUSH map. It allows you to force a specific location for a set of OSDs.
+#
+# The following options will build a ceph.conf with OSD sections
+# Example:
+# [osd.X]
+# osd crush location = "root=location"
+#
+# This works with your inventory file
+# To match the following 'osd_crush_location' option the inventory must look like:
+#
+# [osds]
+# osd0 ceph_crush_root=foo ceph_crush_rack=bar
+
+#crush_location: false
+#osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}\""
+
+
+##############
+# CEPH OPTIONS
+##############
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
+
+devices:
+# - /dev/sda
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#devices: []
+
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# You can use this option with First and Forth and Fifth OSDS scenario.
+# Device discovery is based on the Ansible fact 'ansible_devices'
+# which reports all the devices on a system. If chosen all the disks
+# found will be passed to ceph-disk. You should not be worried on using
+# this option since ceph-disk has a built-in check which looks for empty devices.
+# Thus devices with existing partition tables will not be used.
+#
+#osd_auto_discovery: false
+
+# Encrypt your OSD device using dmcrypt
+# If set to True, no matter which osd_objecstore and osd_scenario you use the data will be encrypted
+#dmcrypt: "{{ True if dmcrytpt_journal_collocation or dmcrypt_dedicated_journal else False }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+
+
+# I. First scenario: collocated
+#
+# To enable this scenario do: osd_scenario: collocated
+#
+#
+# If osd_objectstore: filestore is enabled both 'ceph data' and 'ceph journal' partitions
+# will be stored on the same device.
+#
+# If osd_objectstore: bluestore is enabled 'ceph data', 'ceph block', 'ceph block.db', 'ceph block.wal' will be stored
+# on the same device. The device will get 2 partitions:
+# - One for 'data', called 'ceph data'
+# - One for 'ceph block', 'ceph block.db', 'ceph block.wal' called 'ceph block'
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sda*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d"
+#
+
+#osd_scenario: "{{ 'collocated' if journal_collocation or dmcrytpt_journal_collocation else 'non-collocated' if raw_multi_journal or dmcrypt_dedicated_journal else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
+#valid_osd_scenarios:
+# - collocated
+# - non-collocated
+# - lvm
+osd_scenario: collocated
+
+# II. Second scenario: non-collocated
+#
+# To enable this scenario do: osd_scenario: non-collocated
+#
+# If osd_objectstore: filestore is enabled 'ceph data' and 'ceph journal' partitions
+# will be stored on different devices:
+# - 'ceph data' will be stored on the device listed in 'devices'
+# - 'ceph journal' will be stored on the device listed in 'dedicated_devices'
+#
+# Let's take an example, imagine 'devices' was declared like this:
+#
+# devices:
+# - /dev/sda
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+#
+# And 'dedicated_devices' was declared like this:
+#
+# dedicated_devices:
+# - /dev/sdf
+# - /dev/sdf
+# - /dev/sdg
+# - /dev/sdg
+#
+# This will result in the following mapping:
+# - /dev/sda will have /dev/sdf1 as journal
+# - /dev/sdb will have /dev/sdf2 as a journal
+# - /dev/sdc will have /dev/sdg1 as a journal
+# - /dev/sdd will have /dev/sdg2 as a journal
+#
+#
+# If osd_objectstore: bluestore is enabled, both 'ceph block.db' and 'ceph block.wal' partitions will be stored
+# on a dedicated device.
+#
+# So the following will happen:
+# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'.
+# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata.
+# 'block' will store all your actual data.
+# - The devices in 'dedicated_devices' will get 1 partition for RocksDB DB, called 'block.db'
+# and one for RocksDB WAL, called 'block.wal'
+#
+# By default dedicated_devices will represent block.db
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sd*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c"
+# /dev/sdb: PTTYPE="gpt"
+# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217"
+# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a"
+#dedicated_devices: []
+
+
+# More device granularity for Bluestore
+#
+# ONLY if osd_objectstore: bluestore is enabled.
+#
+# By default, if 'bluestore_wal_devices' is empty, it will get the content of 'dedicated_devices'.
+# If set, then you will have a dedicated partition on a specific device for block.wal.
+#
+# Example of what you will get:
+# [root@ceph-osd0 ~]# blkid /dev/sd*
+# /dev/sda: PTTYPE="gpt"
+# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669"
+# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699"
+# /dev/sdb: PTTYPE="gpt"
+# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3"
+# /dev/sdc: PTTYPE="gpt"
+# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3"
+#bluestore_wal_devices: "{{ dedicated_devices }}"
+
+# III. Use ceph-volume to create OSDs from logical volumes.
+# Use 'osd_scenario: lvm' to enable this scenario. Currently we only support dedicated journals
+# when using lvm, not collocated journals.
+# lvm_volumes is a list of dictionaries. Each dictionary must contain a data, journal and vg_name
+# key. Any logical volume or logical group used must be a name and not a path.
+# data must be a logical volume
+# journal can be either a lv, device or partition. You can not use the same journal for many data lvs.
+# data_vg must be the volume group name of the data lv
+# journal_vg is optional and must be the volume group name of the journal lv, if applicable
+# For example:
+# lvm_volumes:
+# - data: data-lv1
+# data_vg: vg1
+# journal: journal-lv1
+# journal_vg: vg2
+# - data: data-lv2
+# journal: /dev/sda
+# data_vg: vg1
+# - data: data-lv3
+# journal: /dev/sdb1
+# data_vg: vg2
+#lvm_volumes: []
+
+
+##########
+# DOCKER #
+##########
+
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+# Resource limitation
+# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
+# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
+# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
+#ceph_osd_docker_memory_limit: 1g
+#ceph_osd_docker_cpu_limit: 1
+
+# PREPARE DEVICE
+#
+# WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
+#
+#ceph_osd_docker_devices: "{{ devices }}"
+#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
+
+# ACTIVATE DEVICE
+#
+#ceph_osd_docker_extra_env:
+#ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
+
+
+###########
+# SYSTEMD #
+###########
+
+# ceph_osd_systemd_overrides will override the systemd settings
+# for the ceph-osd services.
+# For example,to set "PrivateDevices=false" you can specify:
+#ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: False
+
diff --git a/ci/ansible/group_vars/cinder/cinder.yaml b/ci/ansible/group_vars/cinder/cinder.yaml index bfb1d85..e7971d0 100644 --- a/ci/ansible/group_vars/cinder/cinder.yaml +++ b/ci/ansible/group_vars/cinder/cinder.yaml @@ -1,14 +1,17 @@ -authOptions: - noAuth: true - endpoint: "http://127.0.0.1/identity" - cinderEndpoint: "http://127.0.0.1:8776/v2" - domainId: "Default" - domainName: "Default" - username: "" - password: "" - tenantId: "myproject" - tenantName: "myproject" -pool: - "cinder-lvm@lvm#lvm": - AZ: nova - thin: true +authOptions:
+ noAuth: true
+ endpoint: "http://127.0.0.1/identity"
+ cinderEndpoint: "http://127.0.0.1:8776/v2"
+ domainId: "Default"
+ domainName: "Default"
+ username: ""
+ password: ""
+ tenantId: "myproject"
+ tenantName: "myproject"
+pool:
+ "cinder-lvm@lvm#lvm":
+ AZ: nova
+ thin: true
+ accessProtocol: iscsi
+ thinProvisioned: true
+ compressed: true
diff --git a/ci/ansible/group_vars/common.yml b/ci/ansible/group_vars/common.yml index 734d2e3..cbdaaf6 100755..100644 --- a/ci/ansible/group_vars/common.yml +++ b/ci/ansible/group_vars/common.yml @@ -1,34 +1,39 @@ ---- -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - - -########### -# GENERAL # -########### - -workplace: /home/krej # Change this field according to your username, use '/root' if you login as root. - -# These fields are NOT suggested to be modified -remote_url: https://github.com/opensds/opensds.git -opensds_root_dir: "{{ workplace }}/gopath/src/github.com/opensds/opensds" -opensds_build_dir: "{{ opensds_root_dir }}/build" -opensds_config_dir: /etc/opensds -opensds_log_dir: /var/log/opensds - -########### -# GOLANG # -########### - -golang_release: 1.9.2 - -# These fields are NOT suggested to be modified -golang_tarball: go{{ golang_release }}.linux-amd64.tar.gz -golang_download_url: https://storage.googleapis.com/golang/{{ golang_tarball }} - -########### -#CONTAINER# -########### - -container_enabled: false +---
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+###########
+# GENERAL #
+###########
+
+opensds_release: v0.1.4 # The version should be at least v0.1.4.
+nbp_release: v0.1.0 # The version should be at least v0.1.0.
+
+# These fields are not suggested to be modified
+opensds_download_url: https://github.com/opensds/opensds/releases/download/{{ opensds_release }}/opensds-{{ opensds_release }}-linux-amd64.tar.gz
+opensds_tarball_url: /opt/opensds-{{ opensds_release }}-linux-amd64.tar.gz
+opensds_dir: /opt/opensds-{{ opensds_release }}-linux-amd64
+nbp_download_url: https://github.com/opensds/nbp/releases/download/{{ nbp_release }}/opensds-k8s-{{ nbp_release }}-linux-amd64.tar.gz
+nbp_tarball_url: /opt/opensds-k8s-{{ nbp_release }}-linux-amd64.tar.gz
+nbp_dir: /opt/opensds-k8s-{{ nbp_release }}-linux-amd64
+
+opensds_config_dir: /etc/opensds
+opensds_log_dir: /var/log/opensds
+
+
+###########
+# PLUGIN #
+###########
+
+nbp_plugin_type: standalone # standalone is the default integration way, but you can change it to 'csi', 'flexvolume'
+
+flexvolume_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/opensds.io~opensds
+
+
+###########
+#CONTAINER#
+###########
+
+container_enabled: false
diff --git a/ci/ansible/group_vars/lvm/lvm.yaml b/ci/ansible/group_vars/lvm/lvm.yaml index a5aecb8..a360891 100755..100644 --- a/ci/ansible/group_vars/lvm/lvm.yaml +++ b/ci/ansible/group_vars/lvm/lvm.yaml @@ -1,5 +1,8 @@ -tgtBindIp: 127.0.0.1 -pool: - "vg001": # change pool name same to vg_name, but don't change it if you choose ceph backend - diskType: SSD - AZ: default
\ No newline at end of file +tgtBindIp: 127.0.0.1
+pool:
+ "vg001": # change pool name same to vg_name, but don't change it if you choose ceph backend
+ diskType: SSD
+ AZ: default
+ accessProtocol: iscsi
+ thinProvisioned: false
+ compressed: false
diff --git a/ci/ansible/group_vars/osdsdb.yml b/ci/ansible/group_vars/osdsdb.yml index c8ef864..1b6b812 100755..100644 --- a/ci/ansible/group_vars/osdsdb.yml +++ b/ci/ansible/group_vars/osdsdb.yml @@ -1,33 +1,35 @@ ---- -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - - -########### -# GENERAL # -########### - -db_driver: etcd -db_endpoint: localhost:2379,localhost:2380 -#db_credential: opensds:password@127.0.0.1:3306/dbname - -########### -# ETCD # -########### - -etcd_release: v3.2.0 -etcd_host: 127.0.0.1 -etcd_port: 2379 -etcd_peer_port: 2380 - -# These fields are not suggested to be modified -etcd_tarball: etcd-{{ etcd_release }}-linux-amd64.tar.gz -etcd_download_url: https://github.com/coreos/etcd/releases/download/{{ etcd_release }}/{{ etcd_tarball }} -etcd_dir: /opt/etcd-{{ etcd_release }}-linux-amd64 - -########### -# DOCKER # -########### - -etcd_docker_image: quay.io/coreos/etcd:latest +---
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+###########
+# GENERAL #
+###########
+
+db_driver: etcd
+db_endpoint: localhost:2379,localhost:2380
+#db_credential: opensds:password@127.0.0.1:3306/dbname
+
+
+###########
+# ETCD #
+###########
+
+etcd_release: v3.2.0
+etcd_host: 127.0.0.1
+etcd_port: 2379
+etcd_peer_port: 2380
+
+# These fields are not suggested to be modified
+etcd_tarball: etcd-{{ etcd_release }}-linux-amd64.tar.gz
+etcd_download_url: https://github.com/coreos/etcd/releases/download/{{ etcd_release }}/{{ etcd_tarball }}
+etcd_dir: /opt/etcd-{{ etcd_release }}-linux-amd64
+
+
+###########
+# DOCKER #
+###########
+
+etcd_docker_image: quay.io/coreos/etcd:latest
diff --git a/ci/ansible/group_vars/osdsdock.yml b/ci/ansible/group_vars/osdsdock.yml index a8c4ce9..1544c65 100755..100644 --- a/ci/ansible/group_vars/osdsdock.yml +++ b/ci/ansible/group_vars/osdsdock.yml @@ -1,76 +1,81 @@ ---- -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - - -########### -# GENERAL # -########### - -# Change it according to your backend, currently support 'lvm', 'ceph', 'cinder' -enabled_backend: lvm - -# These fields are NOT suggested to be modified -dock_endpoint: localhost:50050 -dock_log_file: "{{ opensds_log_dir }}/osdsdock.log" - -########### -# LVM # -########### - -pv_device: /dev/sdc # Specify a block device and ensure it existed if you choose lvm -vg_name: vg001 # Specify a name randomly - -# These fields are NOT suggested to be modified -lvm_name: lvm backend -lvm_description: This is a lvm backend service -lvm_driver_name: lvm -lvm_config_path: "{{ opensds_config_dir }}/driver/lvm.yaml" - -########### -# CEPH # -########### - -ceph_pool_name: rbd # Specify a name randomly - -# These fields are NOT suggested to be modified -ceph_name: ceph backend -ceph_description: This is a ceph backend service -ceph_driver_name: ceph -ceph_config_path: "{{ opensds_config_dir }}/driver/ceph.yaml" - -########### -# CINDER # -########### - -# Use block-box install cinder_standalone if true, see details in: -# https://github.com/openstack/cinder/tree/master/contrib/block-box -use_cinder_standalone: true -# If true, you can configure cinder_container_platform, cinder_image_tag, -# cinder_volume_group. - -# Default: debian:stretch, and ubuntu:xenial, centos:7 is also supported. -cinder_container_platform: debian:stretch -# The image tag can be arbitrarily modified, as long as follow the image naming -# conventions, default: debian-cinder -cinder_image_tag: debian-cinder -# The cinder standalone use lvm driver as default driver, therefore `volume_group` -# should be configured, the default is: cinder-volumes. The volume group will be -# removed when use ansible script clean environment. -cinder_volume_group: cinder-volumes -# All source code and volume group file will be placed in the cinder_data_dir: -cinder_data_dir: "{{ workplace }}/cinder_data_dir" - - -# These fields are not suggested to be modified -cinder_name: cinder backend -cinder_description: This is a cinder backend service -cinder_driver_name: cinder -cinder_config_path: "{{ opensds_config_dir }}/driver/cinder.yaml" - -########### -# DOCKER # -########### - -dock_docker_image: opensdsio/opensds-dock:latest +---
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+###########
+# GENERAL #
+###########
+
+# Change it according to your backend, currently support 'lvm', 'ceph', 'cinder'
+enabled_backend: lvm
+
+# These fields are NOT suggested to be modified
+dock_endpoint: localhost:50050
+dock_log_file: "{{ opensds_log_dir }}/osdsdock.log"
+
+###########
+# LVM #
+###########
+
+pv_devices: # Specify block devices and ensure them existed if you choose lvm
+ #- /dev/sdc
+ #- /dev/sdd
+vg_name: vg001 # Specify a name randomly
+
+# These fields are NOT suggested to be modified
+lvm_name: lvm backend
+lvm_description: This is a lvm backend service
+lvm_driver_name: lvm
+lvm_config_path: "{{ opensds_config_dir }}/driver/lvm.yaml"
+
+###########
+# CEPH #
+###########
+
+ceph_pools: # Specify pool name randomly
+ - rbd
+ #- ssd
+ #- sas
+
+# These fields are NOT suggested to be modified
+ceph_name: ceph backend
+ceph_description: This is a ceph backend service
+ceph_driver_name: ceph
+ceph_config_path: "{{ opensds_config_dir }}/driver/ceph.yaml"
+
+###########
+# CINDER #
+###########
+
+# Use block-box install cinder_standalone if true, see details in:
+# https://github.com/openstack/cinder/tree/master/contrib/block-box
+use_cinder_standalone: true
+# If true, you can configure cinder_container_platform, cinder_image_tag,
+# cinder_volume_group.
+
+# Default: debian:stretch, and ubuntu:xenial, centos:7 is also supported.
+cinder_container_platform: debian:stretch
+# The image tag can be arbitrarily modified, as long as follow the image naming
+# conventions, default: debian-cinder
+cinder_image_tag: debian-cinder
+# The cinder standalone use lvm driver as default driver, therefore `volume_group`
+# should be configured, the default is: cinder-volumes. The volume group will be
+# removed when use ansible script clean environment.
+cinder_volume_group: cinder-volumes
+# All source code and volume group file will be placed in the cinder_data_dir:
+cinder_data_dir: "{{ workplace }}/cinder_data_dir"
+
+
+# These fields are not suggested to be modified
+cinder_name: cinder backend
+cinder_description: This is a cinder backend service
+cinder_driver_name: cinder
+cinder_config_path: "{{ opensds_config_dir }}/driver/cinder.yaml"
+
+###########
+# DOCKER #
+###########
+
+dock_docker_image: opensdsio/opensds-dock:latest
diff --git a/ci/ansible/group_vars/osdslet.yml b/ci/ansible/group_vars/osdslet.yml index f9be9de..a872449 100755..100644 --- a/ci/ansible/group_vars/osdslet.yml +++ b/ci/ansible/group_vars/osdslet.yml @@ -1,19 +1,20 @@ ---- -# Dummy variable to avoid error because ansible does not recognize the -# file as a good configuration file when no variable in it. -dummy: - - -########### -# GENERAL # -########### - -# These fields are NOT suggested to be modified -controller_endpoint: 0.0.0.0:50040 -controller_log_file: "{{ opensds_log_dir }}/osdslet.log" - -########### -# DOCKER # -########### - -controller_docker_image: opensdsio/opensds-controller:latest +---
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+###########
+# GENERAL #
+###########
+
+# These fields are NOT suggested to be modified
+controller_endpoint: 0.0.0.0:50040
+controller_log_file: "{{ opensds_log_dir }}/osdslet.log"
+
+
+###########
+# DOCKER #
+###########
+
+controller_docker_image: opensdsio/opensds-controller:latest
|