From 6b304d2e2d9addcf33ad7e7ce5481d37a6b8ee4e Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 18 Jun 2018 14:00:38 -0400 Subject: Fixes Ceph PG calculation Baremetal deployments were failing because the ceph PG size was exceeding the max allowed. Virtual was still working because we lower the number of pools and pg/osd. This patch changes the values to a number which should work for both virtual and baremetal. Also includes a fix which adds the controllers back as OSDs and a few other cleanup issues. JIRA: APEX-614 JIRA: APEX-569 Change-Id: I2ad65727ecdcaa0454eb53d25e32b7f1a53cd3a4 Signed-off-by: Tim Rozet --- apex/overcloud/deploy.py | 18 ++++++++---------- apex/settings/deploy_settings.py | 6 ++++-- apex/tests/test_apex_overcloud_deploy.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'apex') diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index 8367ffa2..a45b3a9b 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -423,7 +423,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None, docker_tag=docker_tag)) # if containers with ceph, and no ceph device we need to use a # persistent loop device for Ceph OSDs - if docker_tag and not ds_opts.get('ceph_device', None): + if docker_tag and ds_opts['ceph_device'] == '/dev/loop3': tmp_losetup = os.path.join(tmp_dir, 'losetup.service') with open(tmp_losetup, 'w') as fh: fh.write(LOSETUP_SERVICE) @@ -685,28 +685,26 @@ def prep_storage_env(ds, ns, virtual, tmp_dir): ceph_params = { 'DockerCephDaemonImage': docker_image, } - if not ds['global_params']['ha_enabled']: - ceph_params['CephPoolDefaultSize'] = 1 + # max pgs allowed are calculated as num_mons * 200. Therefore we + # set number of pgs and pools so that the total will be less: + # num_pgs * num_pools * num_osds + ceph_params['CephPoolDefaultSize'] = 2 + ceph_params['CephPoolDefaultPgNum'] = 32 if virtual: ceph_params['CephAnsibleExtraConfig'] = { 'centos_package_dependencies': [], 'ceph_osd_docker_memory_limit': '1g', 'ceph_mds_docker_memory_limit': '1g', } - ceph_params['CephPoolDefaultPgNum'] = 32 - if 'ceph_device' in ds_opts and ds_opts['ceph_device']: - ceph_device = ds_opts['ceph_device'] - else: - # TODO(trozet): make this DS default after Fraser - ceph_device = '/dev/loop3' - + ceph_device = ds_opts['ceph_device'] ceph_params['CephAnsibleDisksConfig'] = { 'devices': [ceph_device], 'journal_size': 512, 'osd_scenario': 'collocated' } utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params) + # TODO(trozet): remove following block as we only support containers now elif 'ceph_device' in ds_opts and ds_opts['ceph_device']: with open(storage_file, 'a') as fh: fh.write(' ExtraConfig:\n') diff --git a/apex/settings/deploy_settings.py b/apex/settings/deploy_settings.py index 4f887ed0..29fe64fb 100644 --- a/apex/settings/deploy_settings.py +++ b/apex/settings/deploy_settings.py @@ -26,11 +26,11 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller', 'os_version', 'l2gw', 'sriov', - 'containers'] + 'containers', + 'ceph_device'] OPT_DEPLOY_SETTINGS = ['performance', 'vsperf', - 'ceph_device', 'yardstick', 'dovetail', 'odl_vpp_routing_node', @@ -105,6 +105,8 @@ class DeploySettings(dict): self['deploy_options'][req_set] = 'ovs' elif req_set == 'ceph': self['deploy_options'][req_set] = True + elif req_set == 'ceph_device': + self['deploy_options'][req_set] = '/dev/loop3' elif req_set == 'odl_version': self['deploy_options'][req_set] = \ constants.DEFAULT_ODL_VERSION diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py index b69c44d0..f1db91ad 100644 --- a/apex/tests/test_apex_overcloud_deploy.py +++ b/apex/tests/test_apex_overcloud_deploy.py @@ -499,7 +499,7 @@ class TestOvercloudDeploy(unittest.TestCase): 'DockerCephDaemonImage': '192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos' '-7', - 'CephPoolDefaultSize': 1, + 'CephPoolDefaultSize': 2, 'CephAnsibleExtraConfig': { 'centos_package_dependencies': [], 'ceph_osd_docker_memory_limit': '1g', -- cgit 1.2.3-korg