summaryrefslogtreecommitdiffstats
path: root/apex/overcloud/deploy.py
diff options
context:
space:
mode:
Diffstat (limited to 'apex/overcloud/deploy.py')
-rw-r--r--apex/overcloud/deploy.py492
1 files changed, 415 insertions, 77 deletions
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index 809afc1..2726374 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -11,14 +11,20 @@ import base64
import fileinput
import logging
import os
+import platform
+import pprint
import shutil
import uuid
import struct
import time
+import yaml
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.common_builder as c_builder
from apex.common import constants as con
from apex.common.exceptions import ApexDeployException
from apex.common import parsers
+from apex.common import utils
from apex.virtual import utils as virt_utils
from cryptography.hazmat.primitives import serialization as \
crypto_serialization
@@ -37,6 +43,8 @@ SDN_FILE_MAP = {
'dvr': 'neutron-opendaylight-fdio-dvr.yaml',
'default': 'neutron-opendaylight-honeycomb.yaml'
},
+ 'l2gw': 'neutron-l2gw-opendaylight.yaml',
+ 'sriov': 'neutron-opendaylight-sriov.yaml',
'default': 'neutron-opendaylight.yaml',
},
'onos': {
@@ -64,24 +72,75 @@ OVS_PERF_MAP = {
'NeutronDpdkMemoryChannels': 'memory_channels'
}
-OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm"
-OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
".noarch.rpm"
+LOOP_DEVICE_SIZE = "10G"
+
+LOSETUP_SERVICE = """[Unit]
+Description=Setup loop devices
+Before=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
+ExecStop=/sbin/losetup -d /dev/loop3
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
+"""
+
+DUPLICATE_COMPUTE_SERVICES = [
+ 'OS::TripleO::Services::ComputeNeutronCorePlugin',
+ 'OS::TripleO::Services::ComputeNeutronMetadataAgent',
+ 'OS::TripleO::Services::ComputeNeutronOvsAgent',
+ 'OS::TripleO::Services::ComputeNeutronL3Agent'
+]
+
+NFS_VARS = [
+ 'NovaNfsEnabled',
+ 'GlanceNfsEnabled',
+ 'CinderNfsEnabledBackend'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
+ """
+ Builds a list of SDN environment files to be used in the deploy cmd.
+
+ This function recursively searches an sdn_map. First the sdn controller is
+ matched and then the function looks for enabled features for that
+ controller to determine which environment files should be used. By
+ default the feature will be checked if set to true in deploy settings to be
+ added to the list. If a feature does not have a boolean value, then the
+ key and value pair to compare with are checked as a tuple (k,v).
+
+ :param ds: deploy settings
+ :param sdn_map: SDN map to recursively search
+ :param env_list: recursive var to hold previously found env_list
+ :return: A list of env files
+ """
if env_list is None:
env_list = list()
for k, v in sdn_map.items():
- if ds['sdn_controller'] == k or (k in ds and ds[k] is True):
+ if ds['sdn_controller'] == k or (k in ds and ds[k]):
if isinstance(v, dict):
+ # Append default SDN env file first
+ # The assumption is that feature-enabled SDN env files
+ # override and do not conflict with previously set default
+ # settings
+ if ds['sdn_controller'] == k and 'default' in v:
+ env_list.append(os.path.join(con.THT_ENV_DIR,
+ v['default']))
env_list.extend(build_sdn_env_list(ds, v))
+ # check if the value is not a boolean
+ elif isinstance(v, tuple):
+ if ds[k] == v[0]:
+ env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
else:
env_list.append(os.path.join(con.THT_ENV_DIR, v))
- elif isinstance(v, tuple):
- if ds[k] == v[0]:
- env_list.append(os.path.join(con.THT_ENV_DIR, v[1]))
if len(env_list) == 0:
try:
env_list.append(os.path.join(
@@ -92,6 +151,26 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
return env_list
+def get_docker_sdn_files(ds_opts):
+ """
+ Returns docker env file for detected SDN
+ :param ds_opts: deploy options
+ :return: list of docker THT env files for an SDN
+ """
+ docker_services = con.VALID_DOCKER_SERVICES
+ tht_dir = con.THT_DOCKER_ENV_DIR
+ sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+ for i, sdn_file in enumerate(sdn_env_list):
+ sdn_base = os.path.basename(sdn_file)
+ if sdn_base in docker_services:
+ if docker_services[sdn_base] is not None:
+ sdn_env_list[i] = \
+ os.path.join(tht_dir, docker_services[sdn_base])
+ else:
+ sdn_env_list[i] = os.path.join(tht_dir, sdn_base)
+ return sdn_env_list
+
+
def create_deploy_cmd(ds, ns, inv, tmp_dir,
virtual, env_file='opnfv-environment.yaml',
net_data=False):
@@ -99,22 +178,52 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
logging.info("Creating deployment command")
deploy_options = ['network-environment.yaml']
+ ds_opts = ds['deploy_options']
+
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker.yaml'))
+
+ if ds['global_params']['ha_enabled']:
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'docker-ha.yaml'))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR,
+ 'puppet-pacemaker.yaml'))
+
if env_file:
deploy_options.append(env_file)
- ds_opts = ds['deploy_options']
- deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+ if ds_opts['containers']:
+ deploy_options.append('docker-images.yaml')
+ sdn_docker_files = get_docker_sdn_files(ds_opts)
+ for sdn_docker_file in sdn_docker_files:
+ deploy_options.append(sdn_docker_file)
+ else:
+ deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
for k, v in OTHER_FILE_MAP.items():
if k in ds_opts and ds_opts[k]:
- deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+ if ds_opts['containers']:
+ deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
+ "{}.yaml".format(k)))
+ else:
+ deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
- if ds_opts['ceph']:
- prep_storage_env(ds, tmp_dir)
+ # TODO(trozet) Fix this check to look for if ceph is in controller services
+ # and not use name of the file
+ if ds_opts['ceph'] and 'csit' not in env_file:
+ prep_storage_env(ds, ns, virtual, tmp_dir)
deploy_options.append(os.path.join(con.THT_ENV_DIR,
'storage-environment.yaml'))
- if ds['global_params']['ha_enabled']:
- deploy_options.append(os.path.join(con.THT_ENV_DIR,
- 'puppet-pacemaker.yaml'))
+ if ds_opts['sriov']:
+ prep_sriov_env(ds, tmp_dir)
+
+ # Check for 'k8s' here intentionally, as we may support other values
+ # such as openstack/openshift for 'vim' option.
+ if ds_opts['vim'] == 'k8s':
+ deploy_options.append('kubernetes-environment.yaml')
if virtual:
deploy_options.append('virtual-environment.yaml')
@@ -122,12 +231,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
deploy_options.append('baremetal-environment.yaml')
num_control, num_compute = inv.get_node_counts()
- if num_control == 0 or num_compute == 0:
- logging.error("Detected 0 control or compute nodes. Control nodes: "
- "{}, compute nodes{}".format(num_control, num_compute))
- raise ApexDeployException("Invalid number of control or computes")
- elif num_control > 1 and not ds['global_params']['ha_enabled']:
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
num_control = 1
+ if platform.machine() == 'aarch64':
+ # aarch64 deploys were not completing in the default 90 mins.
+ # Not sure if this is related to the hardware the OOO support
+ # was developed on or the virtualization support in CentOS
+ # Either way it will probably get better over time as the aarch
+ # support matures in CentOS and deploy time should be tested in
+ # the future so this multiplier can be removed.
+ con.DEPLOY_TIMEOUT *= 2
cmd = "openstack overcloud deploy --templates --timeout {} " \
.format(con.DEPLOY_TIMEOUT)
# build cmd env args
@@ -140,12 +253,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
if net_data:
cmd += ' --networks-file network_data.yaml'
libvirt_type = 'kvm'
- if virtual:
+ if virtual and (platform.machine() != 'aarch64'):
with open('/sys/module/kvm_intel/parameters/nested') as f:
nested_kvm = f.read().strip()
if nested_kvm != 'Y':
libvirt_type = 'qemu'
+ elif virtual and (platform.machine() == 'aarch64'):
+ libvirt_type = 'qemu'
cmd += ' --libvirt-type {}'.format(libvirt_type)
+ if platform.machine() == 'aarch64':
+ cmd += ' --override-ansible-cfg /home/stack/ansible.cfg '
logging.info("Deploy command set: {}".format(cmd))
with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -153,13 +270,17 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
return cmd
-def prep_image(ds, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
+ patches=None):
"""
Locates sdn image and preps for deployment.
:param ds: deploy settings
+ :param ns: network settings
:param img: sdn image
:param tmp_dir: dir to store modified sdn image
:param root_pw: password to configure for overcloud image
+ :param docker_tag: Docker image tag for RDO version (default None)
+ :param patches: List of patches to apply to overcloud image
:return: None
"""
# TODO(trozet): Come up with a better way to organize this logic in this
@@ -172,6 +293,7 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
ds_opts = ds['deploy_options']
virt_cmds = list()
sdn = ds_opts['sdn_controller']
+ patched_containers = set()
# we need this due to rhbz #1436021
# fixed in systemd-219-37.el7
if sdn is not False:
@@ -186,7 +308,25 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
".service"
}])
+ if ns.get('http_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'http_proxy={}' >> /etc/environment".format(
+ ns['http_proxy'])})
+
+ if ns.get('https_proxy', ''):
+ virt_cmds.append({
+ con.VIRT_RUN_CMD:
+ "echo 'https_proxy={}' >> /etc/environment".format(
+ ns['https_proxy'])})
+
+ tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+ shutil.copyfile(img, tmp_oc_image)
+ logging.debug("Temporary overcloud image stored as: {}".format(
+ tmp_oc_image))
+
if ds_opts['vpn']:
+ oc_builder.inject_quagga(tmp_oc_image, tmp_dir)
virt_cmds.append({con.VIRT_RUN_CMD: "chmod +x /etc/rc.d/rc.local"})
virt_cmds.append({
con.VIRT_RUN_CMD:
@@ -226,15 +366,13 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
if root_pw:
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if ds_opts['sfc'] and dataplane == 'ovs':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_KMOD_RPM)},
- {con.VIRT_RUN_CMD: "yum downgrade -y "
- "/root/ovs/rpm/rpmbuild/RPMS/x86_64/"
- "{}".format(OVS_NSH_RPM)}
- ])
+
+ if dataplane == 'ovs':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ # https://review.rdoproject.org/r/#/c/13839/
+ oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
+
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
# and add generic linux NS interface driver
@@ -248,48 +386,77 @@ def prep_image(ds, img, tmp_dir, root_pw=None):
"/root/nosdn_vpp_rpms/*.rpm"}
])
+ undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+ 'installer_vm']['ip']
if sdn == 'opendaylight':
- if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"},
- {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf "
- "/root/puppet-opendaylight-"
- "{}.tar.gz".format(ds_opts['odl_version'])}
- ])
- if ds_opts['odl_version'] == 'master':
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
- else:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ds_opts['odl_version'])}
- ])
-
- elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \
- and ds_opts['odl_vpp_netvirt']:
- virt_cmds.extend([
- {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
- {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
- ODL_NETVIRT_VPP_RPM)}
- ])
-
- if sdn == 'ovn':
+ oc_builder.inject_opendaylight(
+ odl_version=ds_opts['odl_version'],
+ image=tmp_oc_image,
+ tmp_dir=tmp_dir,
+ uc_ip=undercloud_admin_ip,
+ os_version=ds_opts['os_version'],
+ docker_tag=docker_tag,
+ )
+ if docker_tag:
+ patched_containers = patched_containers.union({'opendaylight'})
+
+ if patches:
+ if ds_opts['os_version'] == 'master':
+ branch = ds_opts['os_version']
+ else:
+ branch = "stable/{}".format(ds_opts['os_version'])
+ logging.info('Adding patches to overcloud')
+ patched_containers = patched_containers.union(
+ c_builder.add_upstream_patches(patches,
+ tmp_oc_image, tmp_dir,
+ branch,
+ uc_ip=undercloud_admin_ip,
+ docker_tag=docker_tag))
+ # if containers with ceph, and no ceph device we need to use a
+ # persistent loop device for Ceph OSDs
+ if docker_tag and ds_opts['ceph_device'] == '/dev/loop3':
+ tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
+ with open(tmp_losetup, 'w') as fh:
+ fh.write(LOSETUP_SERVICE)
virt_cmds.extend([
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y "
- "*openvswitch*"},
- {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y "
- "*openvswitch*"}
+ {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
+ },
+ {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size {}'
+ .format(LOOP_DEVICE_SIZE)},
+ {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
+ {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
])
-
- tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
- shutil.copyfile(img, tmp_oc_image)
- logging.debug("Temporary overcloud image stored as: {}".format(
- tmp_oc_image))
+ # TODO(trozet) remove this after LP#173474 is fixed
+ dhcp_unit = '/usr/lib/systemd/system/dhcp-interface@.service'
+ virt_cmds.append(
+ {con.VIRT_RUN_CMD: "crudini --del {} Unit "
+ "ConditionPathExists".format(dhcp_unit)})
+ # Prep for NFS
+ virt_cmds.extend([
+ {con.VIRT_INSTALL: "nfs-utils"},
+ {con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
+ "/etc/systemd/system/multi-user.target.wants/"
+ "nfs-server.service"},
+ {con.VIRT_RUN_CMD: "mkdir -p /glance"},
+ {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
+ {con.VIRT_RUN_CMD: "mkdir -p /nova"},
+ {con.VIRT_RUN_CMD: "chmod 777 /glance"},
+ {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
+ {con.VIRT_RUN_CMD: "chmod 777 /nova"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
+ {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
+ "no_root_squash,no_acl)' > /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
+ "no_root_squash,no_acl)' >> /etc/exports"},
+ {con.VIRT_RUN_CMD: "exportfs -avr"},
+ ])
virt_utils.virt_customize(virt_cmds, tmp_oc_image)
logging.info("Overcloud image customization complete")
+ return patched_containers
def make_ssh_key():
@@ -341,6 +508,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
# SSH keys
private_key, public_key = make_ssh_key()
+ num_control, num_compute = inv.get_node_counts()
+ if num_control > 1 and not ds['global_params']['ha_enabled']:
+ num_control = 1
+
# Make easier/faster variables to index in the file editor
if 'performance' in ds_opts:
perf = True
@@ -368,6 +539,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
else:
perf = False
+ tenant_settings = ns['networks']['tenant']
+ tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+ ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
# Modify OPNFV environment
# TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
@@ -391,6 +566,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ds_opts['dataplane'] == 'ovs_dpdk':
output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
'./ovs-dpdk-preconfig.yaml'
+ elif 'NeutronNetworkVLANRanges' in line:
+ vlan_setting = ''
+ if tenant_vlan_enabled:
+ if ns['networks']['tenant']['overlay_id_range']:
+ vlan_setting = ns['networks']['tenant']['overlay_id_range']
+ if 'datacentre' not in vlan_setting:
+ vlan_setting += ',datacentre:1:1000'
+ # SRIOV networks are VLAN based provider networks. In order to
+ # simplify the deployment, nfv_sriov will be the default physnet.
+ # VLANs are not needed in advance, and the user will have to create
+ # the network specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if vlan_setting:
+ vlan_setting += ",nfv_sriov"
+ else:
+ vlan_setting = "datacentre:1:1000,nfv_sriov"
+ if vlan_setting:
+ output_line = " NeutronNetworkVLANRanges: " + vlan_setting
+ elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " NeutronBridgeMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+ and ds_opts['sdn_controller'] == 'opendaylight':
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " OpenDaylightProviderMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+ output_line = " NeutronNetworkType: vlan\n" \
+ " NeutronTunnelTypes: ''"
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
@@ -400,16 +615,22 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ns['domain_name']))
elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio':
if 'NeutronVPPAgentPhysnets' in line:
- output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'".
- format(tenant_nic['Controller']))
+ # VPP interface tap0 will be used for external network
+ # connectivity.
+ output_line = (" NeutronVPPAgentPhysnets: "
+ "'datacentre:{},external:tap0'"
+ .format(tenant_nic['Controller']))
elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get(
'dvr') is True:
if 'OS::TripleO::Services::NeutronDhcpAgent' in line:
output_line = ''
elif 'NeutronDhcpAgentsPerNetwork' in line:
- num_control, num_compute = inv.get_node_counts()
+ if num_compute == 0:
+ num_dhcp_agents = num_control
+ else:
+ num_dhcp_agents = num_compute
output_line = (" NeutronDhcpAgentsPerNetwork: {}"
- .format(num_compute))
+ .format(num_dhcp_agents))
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
@@ -475,7 +696,50 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
print(output_line)
+ # Merge compute services into control services if only a single
+ # node deployment
+ if num_compute == 0:
+ with open(tmp_opnfv_env, 'r') as fh:
+ data = yaml.safe_load(fh)
+ param_data = data['parameter_defaults']
+ logging.info("All in one deployment detected")
+ logging.info("Disabling NFS in env file")
+ # Check to see if any parameters are set for Compute
+ for param in param_data.keys():
+ if param != 'ComputeServices' and param.startswith('Compute'):
+ logging.warning("Compute parameter set, but will not be used "
+ "in deployment: {}. Please use Controller "
+ "based parameters when using All-in-one "
+ "deployments".format(param))
+ if param in NFS_VARS:
+ param_data[param] = False
+ logging.info("Checking if service merging required into "
+ "control services")
+ if ('ControllerServices' in param_data and 'ComputeServices' in
+ param_data):
+ logging.info("Services detected in environment file. Merging...")
+ ctrl_services = param_data['ControllerServices']
+ cmp_services = param_data['ComputeServices']
+ param_data['ControllerServices'] = list(set().union(
+ ctrl_services, cmp_services))
+ for dup_service in DUPLICATE_COMPUTE_SERVICES:
+ if dup_service in param_data['ControllerServices']:
+ param_data['ControllerServices'].remove(dup_service)
+ param_data.pop('ComputeServices')
+ logging.debug("Merged controller services: {}".format(
+ pprint.pformat(param_data['ControllerServices'])
+ ))
+ else:
+ logging.info("No services detected in env file, not merging "
+ "services")
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
+
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
+ with open(tmp_opnfv_env, 'r') as fh:
+ logging.debug("opnfv-environment content is : {}".format(
+ pprint.pformat(yaml.safe_load(fh.read()))
+ ))
def generate_ceph_key():
@@ -484,11 +748,13 @@ def generate_ceph_key():
return base64.b64encode(header + key)
-def prep_storage_env(ds, tmp_dir):
+def prep_storage_env(ds, ns, virtual, tmp_dir):
"""
Creates storage environment file for deployment. Source file is copied by
undercloud playbook to host.
:param ds:
+ :param ns:
+ :param virtual:
:param tmp_dir:
:return:
"""
@@ -510,9 +776,35 @@ def prep_storage_env(ds, tmp_dir):
elif 'CephAdminKey' in line:
print(" CephAdminKey: {}".format(generate_ceph_key().decode(
'utf-8')))
+ elif 'CephClientKey' in line:
+ print(" CephClientKey: {}".format(generate_ceph_key().decode(
+ 'utf-8')))
else:
print(line)
- if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+
+ if ds_opts['containers']:
+ ceph_params = {}
+
+ # max pgs allowed are calculated as num_mons * 200. Therefore we
+ # set number of pgs and pools so that the total will be less:
+ # num_pgs * num_pools * num_osds
+ ceph_params['CephPoolDefaultSize'] = 2
+ ceph_params['CephPoolDefaultPgNum'] = 32
+ if virtual:
+ ceph_params['CephAnsibleExtraConfig'] = {
+ 'centos_package_dependencies': [],
+ 'ceph_osd_docker_memory_limit': '1g',
+ 'ceph_mds_docker_memory_limit': '1g',
+ }
+ ceph_device = ds_opts['ceph_device']
+ ceph_params['CephAnsibleDisksConfig'] = {
+ 'devices': [ceph_device],
+ 'journal_size': 512,
+ 'osd_scenario': 'collocated'
+ }
+ utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+ # TODO(trozet): remove following block as we only support containers now
+ elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
with open(storage_file, 'a') as fh:
fh.write(' ExtraConfig:\n')
fh.write(" ceph::profile::params::osds:{{{}:{{}}}}\n".format(
@@ -520,12 +812,58 @@ def prep_storage_env(ds, tmp_dir):
))
-def external_network_cmds(ns):
+def prep_sriov_env(ds, tmp_dir):
+ """
+ Creates SRIOV environment file for deployment. Source file is copied by
+ undercloud playbook to host.
+ :param ds:
+ :param tmp_dir:
+ :return:
+ """
+ ds_opts = ds['deploy_options']
+ sriov_iface = ds_opts['sriov']
+ sriov_file = os.path.join(tmp_dir, 'neutron-opendaylight-sriov.yaml')
+ if not os.path.isfile(sriov_file):
+ logging.error("sriov-environment file is not in tmp directory: {}. "
+ "Check if file was copied from "
+ "undercloud".format(tmp_dir))
+ raise ApexDeployException("sriov-environment file not copied from "
+ "undercloud")
+ # TODO(rnoriega): Instead of line editing, refactor this code to load
+ # yaml file into a dict, edit it and write the file back.
+ for line in fileinput.input(sriov_file, inplace=True):
+ line = line.strip('\n')
+ if 'NovaSchedulerDefaultFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NovaSchedulerAvailableFilters' in line:
+ print(" {}".format(line[3:]))
+ elif 'NeutronPhysicalDevMappings' in line:
+ print(" NeutronPhysicalDevMappings: \"nfv_sriov:{}\""
+ .format(sriov_iface))
+ elif 'NeutronSriovNumVFs' in line:
+ print(" NeutronSriovNumVFs: \"{}:8\"".format(sriov_iface))
+ elif 'NovaPCIPassthrough' in line:
+ print(" NovaPCIPassthrough:")
+ elif 'devname' in line:
+ print(" - devname: \"{}\"".format(sriov_iface))
+ elif 'physical_network' in line:
+ print(" physical_network: \"nfv_sriov\"")
+ else:
+ print(line)
+
+
+def external_network_cmds(ns, ds):
"""
Generates external network openstack commands
:param ns: network settings
+ :param ds: deploy settings
:return: list of commands to configure external network
"""
+ ds_opts = ds['deploy_options']
+ external_physnet = 'datacentre'
+ if ds_opts['dataplane'] == 'fdio' and \
+ ds_opts['sdn_controller'] != 'opendaylight':
+ external_physnet = 'external'
if 'external' in ns.enabled_network_list:
net_config = ns['networks']['external'][0]
external = True
@@ -546,7 +884,8 @@ def external_network_cmds(ns):
'compute']['vlan'])
cmds.append("openstack network create external --project service "
"--external --provider-network-type {} "
- "--provider-physical-network datacentre".format(ext_type))
+ "--provider-physical-network {}"
+ .format(ext_type, external_physnet))
# create subnet command
cidr = net_config['cidr']
subnet_cmd = "openstack subnet create external-subnet --project " \
@@ -554,8 +893,7 @@ def external_network_cmds(ns):
"--allocation-pool start={},end={} --subnet-range " \
"{}".format(gateway, pool_start, pool_end, str(cidr))
if external and cidr.version == 6:
- subnet_cmd += ' --ip-version 6 --ipv6-ra-mode slaac ' \
- '--ipv6-address-mode slaac'
+ subnet_cmd += ' --ip-version 6'
cmds.append(subnet_cmd)
logging.debug("Neutron external network commands determined "
"as: {}".format(cmds))