summaryrefslogtreecommitdiffstats
path: root/apex/overcloud/deploy.py
diff options
context:
space:
mode:
Diffstat (limited to 'apex/overcloud/deploy.py')
-rw-r--r--apex/overcloud/deploy.py59
1 files changed, 14 insertions, 45 deletions
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 90c5cd4b..c7a8e407 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -144,15 +144,16 @@ def get_docker_sdn_file(ds_opts):
"""
# FIXME(trozet): We assume right now there is only one docker SDN file
docker_services = con.VALID_DOCKER_SERVICES
+ tht_dir = con.THT_DOCKER_ENV_DIR[ds_opts['os_version']]
sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for sdn_file in sdn_env_list:
sdn_base = os.path.basename(sdn_file)
if sdn_base in docker_services:
if docker_services[sdn_base] is not None:
- return os.path.join(con.THT_DOCKER_ENV_DIR,
+ return os.path.join(tht_dir,
docker_services[sdn_base])
else:
- return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
+ return os.path.join(tht_dir, sdn_base)
def create_deploy_cmd(ds, ns, inv, tmp_dir,
@@ -196,7 +197,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
else:
deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
- if ds_opts['ceph']:
+ if ds_opts['ceph'] and 'csit' not in env_file:
prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
'storage-environment.yaml'))
@@ -249,7 +250,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
- patches=None, upstream=False):
+ patches=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
@@ -259,7 +260,6 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
:param root_pw: password to configure for overcloud image
:param docker_tag: Docker image tag for RDO version (default None)
:param patches: List of patches to apply to overcloud image
- :param upstream: (boolean) Indicates if upstream deployment or not
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
@@ -366,35 +366,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
logging.debug("Temporary overcloud image stored as: {}".format(
tmp_oc_image))
- # TODO (trozet): remove this if block after Fraser
- if sdn == 'opendaylight' and not upstream:
- if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
- {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
- "/root/puppet-opendaylight-"
- "{}.tar.gz".format(ds_opts['odl_version'])}
- ])
- if ds_opts['odl_version'] == 'master':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
- else:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
-
- elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
- and ds_opts['odl_vpp_netvirt']:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ODL_NETVIRT_VPP_RPM)}
- ])
- elif sdn == 'opendaylight':
+ if sdn == 'opendaylight':
undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
'installer_vm']['ip']
oc_builder.inject_opendaylight(
@@ -422,7 +394,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
docker_tag=docker_tag))
# if containers with ceph, and no ceph device we need to use a
# persistent loop device for Ceph OSDs
- if docker_tag and not ds_opts.get('ceph_device', None):
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
with open(tmp_losetup, 'w') as fh:
fh.write(LOSETUP_SERVICE)
@@ -430,7 +402,6 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
{con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
},
{con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
- {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
{con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
{con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
@@ -690,28 +661,26 @@ def prep_storage_env(ds, ns, virtual, tmp_dir):
ceph_params = {
'DockerCephDaemonImage': docker_image,
}
- if not ds['global_params']['ha_enabled']:
- ceph_params['CephPoolDefaultSize'] = 1
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
if virtual:
ceph_params['CephAnsibleExtraConfig'] = {
'centos_package_dependencies': [],
'ceph_osd_docker_memory_limit': '1g',
'ceph_mds_docker_memory_limit': '1g',
}
- ceph_params['CephPoolDefaultPgNum'] = 32
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
- ceph_device = ds_opts['ceph_device']
- else:
- # TODO(trozet): make this DS default after Fraser
- ceph_device = '/dev/loop3'
-
+ ceph_device = ds_opts['ceph_device']
ceph_params['CephAnsibleDisksConfig'] = {
'devices': [ceph_device],
'journal_size': 512,
'osd_scenario': 'collocated'
}
utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')