From d158a48f4f10e9abb1403b1178b8c2f31b36e992 Mon Sep 17 00:00:00 2001 From: Dan Radez Date: Mon, 2 Oct 2017 12:02:22 -0400 Subject: Adding apex/overcloud/* unittests Change-Id: I02cd512ba1ddaee2538bee7739e27b136112a0c6 Signed-off-by: Dan Radez --- apex/deploy.py | 20 +- apex/overcloud/deploy.py | 565 +++++++++++++++++++++++++++++++ apex/overcloud/overcloud_deploy.py | 565 ------------------------------- apex/tests/test_apex_overcloud_config.py | 80 +++++ apex/tests/test_apex_overcloud_deploy.py | 403 ++++++++++++++++++++++ tox.ini | 2 +- 6 files changed, 1059 insertions(+), 576 deletions(-) create mode 100644 apex/overcloud/deploy.py delete mode 100644 apex/overcloud/overcloud_deploy.py create mode 100644 apex/tests/test_apex_overcloud_config.py create mode 100644 apex/tests/test_apex_overcloud_deploy.py diff --git a/apex/deploy.py b/apex/deploy.py index 5ec0f7fa..4b1ef855 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -32,7 +32,7 @@ from apex.common.exceptions import ApexDeployException from apex.network import jumphost from apex.undercloud import undercloud as uc_lib from apex.overcloud import config as oc_cfg -from apex.overcloud import overcloud_deploy +from apex.overcloud import deploy as oc_deploy APEX_TEMP_DIR = tempfile.mkdtemp(prefix='apex_tmp') ANSIBLE_PATH = 'ansible/playbooks' @@ -341,14 +341,14 @@ def main(): # Prepare overcloud-full.qcow2 logging.info("Preparing Overcloud for deployment...") sdn_image = os.path.join(args.image_dir, SDN_IMAGE) - overcloud_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR, - root_pw=root_pw) + oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR, + root_pw=root_pw) opnfv_env = os.path.join(args.deploy_dir, args.env_file) - overcloud_deploy.prep_env(deploy_settings, net_settings, inventory, - opnfv_env, net_env_target, APEX_TEMP_DIR) - overcloud_deploy.create_deploy_cmd(deploy_settings, net_settings, - inventory, APEX_TEMP_DIR, - args.virtual, args.env_file) + oc_deploy.prep_env(deploy_settings, net_settings, inventory, + opnfv_env, net_env_target, APEX_TEMP_DIR) + oc_deploy.create_deploy_cmd(deploy_settings, net_settings, + inventory, APEX_TEMP_DIR, + args.virtual, args.env_file) deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH, 'deploy_overcloud.yml') virt_env = 'virtual-environment.yaml' @@ -391,7 +391,7 @@ def main(): 'UserKnownHostsFile=/dev/null -o ' \ 'LogLevel=error' deploy_vars['external_network_cmds'] = \ - overcloud_deploy.external_network_cmds(net_settings) + oc_deploy.external_network_cmds(net_settings) # TODO(trozet): just parse all ds_opts as deploy vars one time ds_opts = deploy_settings['deploy_options'] deploy_vars['gluon'] = ds_opts['gluon'] @@ -405,7 +405,7 @@ def main(): overcloudrc = os.path.join(APEX_TEMP_DIR, 'overcloudrc') if ds_opts['congress']: deploy_vars['congress_datasources'] = \ - overcloud_deploy.create_congress_cmds(overcloudrc) + oc_deploy.create_congress_cmds(overcloudrc) deploy_vars['congress'] = True else: deploy_vars['congress'] = False diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py new file mode 100644 index 00000000..ef916a43 --- /dev/null +++ b/apex/overcloud/deploy.py @@ -0,0 +1,565 @@ +############################################################################## +# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +import base64 +import fileinput +import logging +import os +import re +import shutil +import uuid +import struct +import time + +from apex.common import constants as con +from apex.common.exceptions import ApexDeployException +from apex.common import parsers +from apex.virtual import utils as virt_utils +from cryptography.hazmat.primitives import serialization as \ + crypto_serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.backends import default_backend as \ + crypto_default_backend + + +SDN_FILE_MAP = { + 'opendaylight': { + 'sfc': 'neutron-sfc-opendaylight.yaml', + 'vpn': 'neutron-bgpvpn-opendaylight.yaml', + 'gluon': 'gluon.yaml', + 'vpp': { + 'odl_vpp_netvirt': 'neutron-opendaylight-netvirt-vpp.yaml', + 'dvr': 'neutron-opendaylight-fdio-dvr.yaml', + 'default': 'neutron-opendaylight-honeycomb.yaml' + }, + 'default': 'neutron-opendaylight.yaml', + }, + 'onos': { + 'sfc': 'neutron-onos-sfc.yaml', + 'default': 'neutron-onos.yaml' + }, + 'ovn': 'neutron-ml2-ovn.yaml', + False: { + 'vpp': 'neutron-ml2-vpp.yaml', + 'dataplane': ('ovs_dpdk', 'neutron-ovs-dpdk.yaml') + } +} + +OTHER_FILE_MAP = { + 'tacker': 'enable_tacker.yaml', + 'congress': 'enable_congress.yaml', + 'barometer': 'enable_barometer.yaml', + 'rt_kvm': 'enable_rt_kvm.yaml' +} + +OVS_PERF_MAP = { + 'HostCpusList': 'dpdk_cores', + 'NeutronDpdkCoreList': 'pmd_cores', + 'NeutronDpdkSocketMemory': 'socket_memory', + 'NeutronDpdkMemoryChannels': 'memory_channels' +} + +OVS_NSH_KMOD_RPM = "openvswitch-kmod-2.6.1-1.el7.centos.x86_64.rpm" +OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm" +ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \ + ".noarch.rpm" + + +def build_sdn_env_list(ds, sdn_map, env_list=None): + if env_list is None: + env_list = list() + for k, v in sdn_map.items(): + if ds['sdn_controller'] == k or (k in ds and ds[k] is True): + if isinstance(v, dict): + env_list.extend(build_sdn_env_list(ds, v)) + else: + env_list.append(os.path.join(con.THT_ENV_DIR, v)) + elif isinstance(v, tuple): + if ds[k] == v[0]: + env_list.append(os.path.join(con.THT_ENV_DIR, v[1])) + if len(env_list) == 0: + try: + env_list.append(os.path.join( + con.THT_ENV_DIR, sdn_map['default'])) + except KeyError: + logging.warning("Unable to find default file for SDN") + + return env_list + + +def create_deploy_cmd(ds, ns, inv, tmp_dir, + virtual, env_file='opnfv-environment.yaml'): + + logging.info("Creating deployment command") + deploy_options = [env_file, 'network-environment.yaml'] + ds_opts = ds['deploy_options'] + deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP) + + for k, v in OTHER_FILE_MAP.items(): + if k in ds_opts and ds_opts[k]: + deploy_options.append(os.path.join(con.THT_ENV_DIR, v)) + + if ds_opts['ceph']: + prep_storage_env(ds, tmp_dir) + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'storage-environment.yaml')) + if ds['global_params']['ha_enabled']: + deploy_options.append(os.path.join(con.THT_ENV_DIR, + 'puppet-pacemaker.yaml')) + + if virtual: + deploy_options.append('virtual-environment.yaml') + else: + deploy_options.append('baremetal-environment.yaml') + + num_control, num_compute = inv.get_node_counts() + if num_control == 0 or num_compute == 0: + logging.error("Detected 0 control or compute nodes. Control nodes: " + "{}, compute nodes{}".format(num_control, num_compute)) + raise ApexDeployException("Invalid number of control or computes") + elif num_control > 1 and not ds['global_params']['ha_enabled']: + num_control = 1 + cmd = "openstack overcloud deploy --templates --timeout {} " \ + "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT) + # build cmd env args + for option in deploy_options: + cmd += " -e {}".format(option) + cmd += " --ntp-server {}".format(ns['ntp'][0]) + cmd += " --control-scale {}".format(num_control) + cmd += " --compute-scale {}".format(num_compute) + cmd += ' --control-flavor control --compute-flavor compute' + logging.info("Deploy command set: {}".format(cmd)) + + with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh: + fh.write(cmd) + return cmd + + +def prep_image(ds, img, tmp_dir, root_pw=None): + """ + Locates sdn image and preps for deployment. + :param ds: deploy settings + :param img: sdn image + :param tmp_dir: dir to store modified sdn image + :param root_pw: password to configure for overcloud image + :return: None + """ + # TODO(trozet): Come up with a better way to organize this logic in this + # function + logging.info("Preparing image: {} for deployment".format(img)) + if not os.path.isfile(img): + logging.error("Missing SDN image {}".format(img)) + raise ApexDeployException("Missing SDN image file: {}".format(img)) + + ds_opts = ds['deploy_options'] + virt_cmds = list() + sdn = ds_opts['sdn_controller'] + # we need this due to rhbz #1436021 + # fixed in systemd-219-37.el7 + if sdn is not False: + logging.info("Neutron openvswitch-agent disabled") + virt_cmds.extend([{ + con.VIRT_RUN_CMD: + "rm -f /etc/systemd/system/multi-user.target.wants/" + "neutron-openvswitch-agent.service"}, + { + con.VIRT_RUN_CMD: + "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent" + ".service" + }]) + + if ds_opts['vpn']: + virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"}) + logging.info("ZRPC and Quagga enabled") + + dataplane = ds_opts['dataplane'] + if dataplane == 'ovs_dpdk' or dataplane == 'fdio': + logging.info("Enabling kernel modules for dpdk") + # file to module mapping + uio_types = { + os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci', + os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic' + } + for mod_file, mod in uio_types.items(): + with open(mod_file, 'w') as fh: + fh.write('#!/bin/bash\n') + fh.write('exec /sbin/modprobe {}'.format(mod)) + fh.close() + + virt_cmds.extend([ + {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format( + mod_file)}, + {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/" + "{}".format(os.path.basename(mod_file))} + ]) + if root_pw: + pw_op = "password:{}".format(root_pw) + virt_cmds.append({con.VIRT_PW: pw_op}) + if ds_opts['sfc'] and dataplane == 'ovs': + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum -y install " + "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" + "{}".format(OVS_NSH_KMOD_RPM)}, + {con.VIRT_RUN_CMD: "yum downgrade -y " + "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" + "{}".format(OVS_NSH_RPM)} + ]) + if dataplane == 'fdio': + # Patch neutron with using OVS external interface for router + # and add generic linux NS interface driver + virt_cmds.append( + {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch " + "-p1 < neutron-patch-NSDriver.patch"}) + if sdn is False: + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"}, + {con.VIRT_RUN_CMD: "yum install -y " + "/root/nosdn_vpp_rpms/*.rpm"} + ]) + + if sdn == 'opendaylight': + if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, + {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, + {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " + "/root/puppet-opendaylight-" + "{}.tar.gz".format(ds_opts['odl_version'])} + ]) + if ds_opts['odl_version'] == 'master': + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( + ds_opts['odl_version'])} + ]) + else: + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( + ds_opts['odl_version'])} + ]) + + elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ + and ds_opts['odl_vpp_netvirt']: + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, + {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( + ODL_NETVIRT_VPP_RPM)} + ]) + + if sdn == 'ovn': + virt_cmds.extend([ + {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " + "*openvswitch*"}, + {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " + "*openvswitch*"} + ]) + + tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') + shutil.copyfile(img, tmp_oc_image) + logging.debug("Temporary overcloud image stored as: {}".format( + tmp_oc_image)) + virt_utils.virt_customize(virt_cmds, tmp_oc_image) + logging.info("Overcloud image customization complete") + + +def make_ssh_key(): + """ + Creates public and private ssh keys with 1024 bit RSA encryption + :return: private, public key + """ + key = rsa.generate_private_key( + backend=crypto_default_backend(), + public_exponent=65537, + key_size=1024 + ) + + private_key = key.private_bytes( + crypto_serialization.Encoding.PEM, + crypto_serialization.PrivateFormat.PKCS8, + crypto_serialization.NoEncryption()) + public_key = key.public_key().public_bytes( + crypto_serialization.Encoding.OpenSSH, + crypto_serialization.PublicFormat.OpenSSH + ) + return private_key.decode('utf-8'), public_key.decode('utf-8') + + +def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): + """ + Creates modified opnfv/network environments for deployment + :param ds: deploy settings + :param ns: network settings + :param inv: node inventory + :param opnfv_env: file path for opnfv-environment file + :param net_env: file path for network-environment file + :param tmp_dir: Apex tmp dir + :return: + """ + + logging.info("Preparing opnfv-environment and network-environment files") + ds_opts = ds['deploy_options'] + tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env)) + shutil.copyfile(opnfv_env, tmp_opnfv_env) + tenant_nic_map = ns['networks']['tenant']['nic_mapping'] + tenant_ctrl_nic = tenant_nic_map['controller']['members'][0] + tenant_comp_nic = tenant_nic_map['compute']['members'][0] + + # SSH keys + private_key, public_key = make_ssh_key() + + # Make easier/faster variables to index in the file editor + if 'performance' in ds_opts: + perf = True + # vpp + if 'vpp' in ds_opts['performance']['Compute']: + perf_vpp_comp = ds_opts['performance']['Compute']['vpp'] + else: + perf_vpp_comp = None + if 'vpp' in ds_opts['performance']['Controller']: + perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp'] + else: + perf_vpp_ctrl = None + + # ovs + if 'ovs' in ds_opts['performance']['Compute']: + perf_ovs_comp = ds_opts['performance']['Compute']['ovs'] + else: + perf_ovs_comp = None + + # kernel + if 'kernel' in ds_opts['performance']['Compute']: + perf_kern_comp = ds_opts['performance']['Compute']['kernel'] + else: + perf_kern_comp = None + else: + perf = False + + # Modify OPNFV environment + # TODO: Change to build a dict and outputing yaml rather than parsing + for line in fileinput.input(tmp_opnfv_env, inplace=True): + line = line.strip('\n') + output_line = line + if 'CloudDomain' in line: + output_line = " CloudDomain: {}".format(ns['domain_name']) + elif 'replace_private_key' in line: + output_line = " private_key: |\n" + key_out = '' + for line in private_key.splitlines(): + key_out += " {}\n".format(line) + output_line += key_out + elif 'replace_public_key' in line: + output_line = " public_key: '{}'".format(public_key) + + if ds_opts['sdn_controller'] == 'opendaylight' and \ + 'odl_vpp_routing_node' in ds_opts: + if 'opendaylight::vpp_routing_node' in line: + output_line = (" opendaylight::vpp_routing_node: {}.{}" + .format(ds_opts['odl_vpp_routing_node'], + ns['domain_name'])) + elif 'ControllerExtraConfig' in line: + output_line = (" ControllerExtraConfig:\n " + "tripleo::profile::base::neutron::agents::" + "honeycomb::interface_role_mapping:" + " ['{}:tenant-interface]'" + .format(tenant_ctrl_nic)) + elif 'NovaComputeExtraConfig' in line: + output_line = (" NovaComputeExtraConfig:\n " + "tripleo::profile::base::neutron::agents::" + "honeycomb::interface_role_mapping:" + " ['{}:tenant-interface]'" + .format(tenant_comp_nic)) + elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio': + if 'NeutronVPPAgentPhysnets' in line: + output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'". + format(tenant_ctrl_nic)) + elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get( + 'dvr') is True: + if 'OS::TripleO::Services::NeutronDhcpAgent' in line: + output_line = '' + elif 'NeutronDhcpAgentsPerNetwork' in line: + num_control, num_compute = inv.get_node_counts() + output_line = (" NeutronDhcpAgentsPerNetwork: {}" + .format(num_compute)) + elif 'ComputeServices' in line: + output_line = (" ComputeServices:\n" + " - OS::TripleO::Services::NeutronDhcpAgent") + + if perf: + for role in 'NovaCompute', 'Controller': + if role == 'NovaCompute': + perf_opts = perf_vpp_comp + else: + perf_opts = perf_vpp_ctrl + cfg = "{}ExtraConfig".format(role) + if cfg in line and perf_opts: + perf_line = '' + if 'main-core' in perf_opts: + perf_line += ("\n fdio::vpp_cpu_main_core: '{}'" + .format(perf_opts['main-core'])) + if 'corelist-workers' in perf_opts: + perf_line += ("\n " + "fdio::vpp_cpu_corelist_workers: '{}'" + .format(perf_opts['corelist-workers'])) + if perf_line: + output_line = (" {}:{}".format(cfg, perf_line)) + + # kernel args + # (FIXME) use compute's kernel settings for all nodes for now. + if 'ComputeKernelArgs' in line and perf_kern_comp: + kernel_args = '' + for k, v in perf_kern_comp.items(): + kernel_args += "{}={} ".format(k, v) + if kernel_args: + output_line = " ComputeKernelArgs: '{}'".\ + format(kernel_args) + if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp: + for k, v in OVS_PERF_MAP.items(): + if k in line and v in perf_ovs_comp: + output_line = " {}: '{}'".format(k, perf_ovs_comp[v]) + + print(output_line) + + logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) + + # Modify Network environment + for line in fileinput.input(net_env, inplace=True): + line = line.strip('\n') + if 'ComputeExtraConfigPre' in line and \ + ds_opts['dataplane'] == 'ovs_dpdk': + print(' OS::TripleO::ComputeExtraConfigPre: ' + './ovs-dpdk-preconfig.yaml') + elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \ + 'resource_registry' in line: + print("resource_registry:\n" + " OS::TripleO::NodeUserData: first-boot.yaml") + elif perf and perf_kern_comp and \ + 'NovaSchedulerDefaultFilters' in line: + print(" NovaSchedulerDefaultFilters: 'RamFilter," + "ComputeFilter,AvailabilityZoneFilter," + "ComputeCapabilitiesFilter,ImagePropertiesFilter," + "NUMATopologyFilter'") + else: + print(line) + + logging.info("network-environment file written to {}".format(net_env)) + + +def generate_ceph_key(): + key = os.urandom(16) + header = struct.pack(' 1 and not ds['global_params']['ha_enabled']: - num_control = 1 - cmd = "openstack overcloud deploy --templates --timeout {} " \ - "--libvirt-type kvm".format(con.DEPLOY_TIMEOUT) - # build cmd env args - for option in deploy_options: - cmd += " -e {}".format(option) - cmd += " --ntp-server {}".format(ns['ntp'][0]) - cmd += " --control-scale {}".format(num_control) - cmd += " --compute-scale {}".format(num_compute) - cmd += ' --control-flavor control --compute-flavor compute' - logging.info("Deploy command set: {}".format(cmd)) - - with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh: - fh.write(cmd) - return cmd - - -def prep_image(ds, img, tmp_dir, root_pw=None): - """ - Locates sdn image and preps for deployment. - :param ds: deploy settings - :param img: sdn image - :param tmp_dir: dir to store modified sdn image - :param root_pw: password to configure for overcloud image - :return: None - """ - # TODO(trozet): Come up with a better way to organize this logic in this - # function - logging.info("Preparing image: {} for deployment".format(img)) - if not os.path.isfile(img): - logging.error("Missing SDN image {}".format(img)) - raise ApexDeployException("Missing SDN image file: {}".format(img)) - - ds_opts = ds['deploy_options'] - virt_cmds = list() - sdn = ds_opts['sdn_controller'] - # we need this due to rhbz #1436021 - # fixed in systemd-219-37.el7 - if sdn is not False: - logging.info("Neutron openvswitch-agent disabled") - virt_cmds.extend([{ - con.VIRT_RUN_CMD: - "rm -f /etc/systemd/system/multi-user.target.wants/" - "neutron-openvswitch-agent.service"}, - { - con.VIRT_RUN_CMD: - "rm -f /usr/lib/systemd/system/neutron-openvswitch-agent" - ".service" - }]) - - if ds_opts['vpn']: - virt_cmds.append({con.VIRT_RUN_CMD: "systemctl enable zrpcd"}) - logging.info("ZRPC and Quagga enabled") - - dataplane = ds_opts['dataplane'] - if dataplane == 'ovs_dpdk' or dataplane == 'fdio': - logging.info("Enabling kernel modules for dpdk") - # file to module mapping - uio_types = { - os.path.join(tmp_dir, 'vfio_pci.modules'): 'vfio_pci', - os.path.join(tmp_dir, 'uio_pci_generic.modules'): 'uio_pci_generic' - } - for mod_file, mod in uio_types.items(): - with open(mod_file, 'w') as fh: - fh.write('#!/bin/bash\n') - fh.write('exec /sbin/modprobe {}'.format(mod)) - fh.close() - - virt_cmds.extend([ - {con.VIRT_UPLOAD: "{}:/etc/sysconfig/modules/".format( - mod_file)}, - {con.VIRT_RUN_CMD: "chmod 0755 /etc/sysconfig/modules/" - "{}".format(os.path.basename(mod_file))} - ]) - if root_pw: - pw_op = "password:{}".format(root_pw) - virt_cmds.append({con.VIRT_PW: pw_op}) - if ds_opts['sfc'] and dataplane == 'ovs': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_KMOD_RPM)}, - {con.VIRT_RUN_CMD: "yum downgrade -y " - "/root/ovs/rpm/rpmbuild/RPMS/x86_64/" - "{}".format(OVS_NSH_RPM)} - ]) - if dataplane == 'fdio': - # Patch neutron with using OVS external interface for router - # and add generic linux NS interface driver - virt_cmds.append( - {con.VIRT_RUN_CMD: "cd /usr/lib/python2.7/site-packages && patch " - "-p1 < neutron-patch-NSDriver.patch"}) - if sdn is False: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum remove -y vpp-lib"}, - {con.VIRT_RUN_CMD: "yum install -y " - "/root/nosdn_vpp_rpms/*.rpm"} - ]) - - if sdn == 'opendaylight': - if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "rm -rf /etc/puppet/modules/opendaylight"}, - {con.VIRT_RUN_CMD: "cd /etc/puppet/modules && tar xzf " - "/root/puppet-opendaylight-" - "{}.tar.gz".format(ds_opts['odl_version'])} - ]) - if ds_opts['odl_version'] == 'master': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "rpm -ivh --nodeps /root/{}/*".format( - ds_opts['odl_version'])} - ]) - else: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ds_opts['odl_version'])} - ]) - - elif sdn == 'opendaylight' and 'odl_vpp_netvirt' in ds_opts \ - and ds_opts['odl_vpp_netvirt']: - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "yum -y remove opendaylight"}, - {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format( - ODL_NETVIRT_VPP_RPM)} - ]) - - if sdn == 'ovn': - virt_cmds.extend([ - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum update -y " - "*openvswitch*"}, - {con.VIRT_RUN_CMD: "cd /root/ovs28 && yum downgrade -y " - "*openvswitch*"} - ]) - - tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2') - shutil.copyfile(img, tmp_oc_image) - logging.debug("Temporary overcloud image stored as: {}".format( - tmp_oc_image)) - virt_utils.virt_customize(virt_cmds, tmp_oc_image) - logging.info("Overcloud image customization complete") - - -def make_ssh_key(): - """ - Creates public and private ssh keys with 1024 bit RSA encryption - :return: private, public key - """ - key = rsa.generate_private_key( - backend=crypto_default_backend(), - public_exponent=65537, - key_size=1024 - ) - - private_key = key.private_bytes( - crypto_serialization.Encoding.PEM, - crypto_serialization.PrivateFormat.PKCS8, - crypto_serialization.NoEncryption()) - public_key = key.public_key().public_bytes( - crypto_serialization.Encoding.OpenSSH, - crypto_serialization.PublicFormat.OpenSSH - ) - return private_key.decode('utf-8'), public_key.decode('utf-8') - - -def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): - """ - Creates modified opnfv/network environments for deployment - :param ds: deploy settings - :param ns: network settings - :param inv: node inventory - :param opnfv_env: file path for opnfv-environment file - :param net_env: file path for network-environment file - :param tmp_dir: Apex tmp dir - :return: - """ - - logging.info("Preparing opnfv-environment and network-environment files") - ds_opts = ds['deploy_options'] - tmp_opnfv_env = os.path.join(tmp_dir, os.path.basename(opnfv_env)) - shutil.copyfile(opnfv_env, tmp_opnfv_env) - tenant_nic_map = ns['networks']['tenant']['nic_mapping'] - tenant_ctrl_nic = tenant_nic_map['controller']['members'][0] - tenant_comp_nic = tenant_nic_map['compute']['members'][0] - - # SSH keys - private_key, public_key = make_ssh_key() - - # Make easier/faster variables to index in the file editor - if 'performance' in ds_opts: - perf = True - # vpp - if 'vpp' in ds_opts['performance']['Compute']: - perf_vpp_comp = ds_opts['performance']['Compute']['vpp'] - else: - perf_vpp_comp = None - if 'vpp' in ds_opts['performance']['Controller']: - perf_vpp_ctrl = ds_opts['performance']['Controller']['vpp'] - else: - perf_vpp_ctrl = None - - # ovs - if 'ovs' in ds_opts['performance']['Compute']: - perf_ovs_comp = ds_opts['performance']['Compute']['ovs'] - else: - perf_ovs_comp = None - - # kernel - if 'kernel' in ds_opts['performance']['Compute']: - perf_kern_comp = ds_opts['performance']['Compute']['kernel'] - else: - perf_kern_comp = None - else: - perf = False - - # Modify OPNFV environment - # TODO: Change to build a dict and outputing yaml rather than parsing - for line in fileinput.input(tmp_opnfv_env, inplace=True): - line = line.strip('\n') - output_line = line - if 'CloudDomain' in line: - output_line = " CloudDomain: {}".format(ns['domain_name']) - elif 'replace_private_key' in line: - output_line = " private_key: |\n" - key_out = '' - for line in private_key.splitlines(): - key_out += " {}\n".format(line) - output_line += key_out - elif 'replace_public_key' in line: - output_line = " public_key: '{}'".format(public_key) - - if ds_opts['sdn_controller'] == 'opendaylight' and \ - 'odl_vpp_routing_node' in ds_opts: - if 'opendaylight::vpp_routing_node' in line: - output_line = (" opendaylight::vpp_routing_node: {}.{}" - .format(ds_opts['odl_vpp_routing_node'], - ns['domain_name'])) - elif 'ControllerExtraConfig' in line: - output_line = (" ControllerExtraConfig:\n " - "tripleo::profile::base::neutron::agents::" - "honeycomb::interface_role_mapping:" - " ['{}:tenant-interface]'" - .format(tenant_ctrl_nic)) - elif 'NovaComputeExtraConfig' in line: - output_line = (" NovaComputeExtraConfig:\n " - "tripleo::profile::base::neutron::agents::" - "honeycomb::interface_role_mapping:" - " ['{}:tenant-interface]'" - .format(tenant_comp_nic)) - elif not ds_opts['sdn_controller'] and ds_opts['dataplane'] == 'fdio': - if 'NeutronVPPAgentPhysnets' in line: - output_line = (" NeutronVPPAgentPhysnets: 'datacentre:{}'". - format(tenant_ctrl_nic)) - elif ds_opts['sdn_controller'] == 'opendaylight' and ds_opts.get( - 'dvr') is True: - if 'OS::TripleO::Services::NeutronDhcpAgent' in line: - output_line = '' - elif 'NeutronDhcpAgentsPerNetwork' in line: - num_control, num_compute = inv.get_node_counts() - output_line = (" NeutronDhcpAgentsPerNetwork: {}" - .format(num_compute)) - elif 'ComputeServices' in line: - output_line = (" ComputeServices:\n" - " - OS::TripleO::Services::NeutronDhcpAgent") - - if perf: - for role in 'NovaCompute', 'Controller': - if role == 'NovaCompute': - perf_opts = perf_vpp_comp - else: - perf_opts = perf_vpp_ctrl - cfg = "{}ExtraConfig".format(role) - if cfg in line and perf_opts: - perf_line = '' - if 'main-core' in perf_opts: - perf_line += ("\n fdio::vpp_cpu_main_core: '{}'" - .format(perf_opts['main-core'])) - if 'corelist-workers' in perf_opts: - perf_line += ("\n " - "fdio::vpp_cpu_corelist_workers: '{}'" - .format(perf_opts['corelist-workers'])) - if perf_line: - output_line = (" {}:{}".format(cfg, perf_line)) - - # kernel args - # (FIXME) use compute's kernel settings for all nodes for now. - if 'ComputeKernelArgs' in line and perf_kern_comp: - kernel_args = '' - for k, v in perf_kern_comp.items(): - kernel_args += "{}={} ".format(k, v) - if kernel_args: - output_line = " ComputeKernelArgs: '{}'".\ - format(kernel_args) - if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp: - for k, v in OVS_PERF_MAP.items(): - if k in line and v in perf_ovs_comp: - output_line = " {}: '{}'".format(k, perf_ovs_comp[v]) - - print(output_line) - - logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) - - # Modify Network environment - for line in fileinput.input(net_env, inplace=True): - line = line.strip('\n') - if 'ComputeExtraConfigPre' in line and \ - ds_opts['dataplane'] == 'ovs_dpdk': - print(' OS::TripleO::ComputeExtraConfigPre: ' - './ovs-dpdk-preconfig.yaml') - elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \ - 'resource_registry' in line: - print("resource_registry:\n" - " OS::TripleO::NodeUserData: first-boot.yaml") - elif perf and perf_kern_comp and \ - 'NovaSchedulerDefaultFilters' in line: - print(" NovaSchedulerDefaultFilters: 'RamFilter," - "ComputeFilter,AvailabilityZoneFilter," - "ComputeCapabilitiesFilter,ImagePropertiesFilter," - "NUMATopologyFilter'") - else: - print(line) - - logging.info("network-environment file written to {}".format(net_env)) - - -def generate_ceph_key(): - key = os.urandom(16) - header = struct.pack('