From 8f75445a37099590f36ce6044109209c67ef0deb Mon Sep 17 00:00:00 2001 From: Charalampos Kominos Date: Mon, 1 Oct 2018 13:12:46 +0200 Subject: Bring in aarch64 support in apex RDO builds packages which are aarch64 compatible but some configuration is needed to succesfully deploy. This change: - Prepares the aarch64 docker.io repo as the source for Kolla Containers - Configures VM sizing for aarch64 undercloud. - Configures VM sizing for aarch64 virtual deploy targets. Vms need to be larger on aarch64 compared to x86 to avoid starvation of resources. (MYSQL) - Configures vda2 as the location of the Linux Kernel in aarch64 in an UEFI system - Configures the vNICs to be on the pci-bus instead of the virtio-mmio bus.This will enalbe the Nics to come up in the same order as the x86 ones, so the extra configuration in ansible is not needed - Configures apex to use a stable version of the ceph:daemon container - Configure apex for containerized undercloud in Rocky - Add extra ansible.cfg file for aarch64 which increases waiting times in ansible for aarch64 - Provide helper scripts for DIB to create aarch64 UEFI images Known limitations: - Selinux is interfering with DHCP requests in ironic and ssh so it must be disabled before the deploy command is ran. - The aarch64 containers are frozen for in this commit: https://trunk.rdoproject.org/centos7-rocky/f3/18/f3180de6439333a2813119ad4b00ef897fcd596f_70883030 - The 600s timeout defined in : https://bugs.launchpad.net/tripleo/+bug/1789680 is not enough for aarch64. A value of 1200s is recommended JIRA: APEX-619 Change-Id: Ia3f067821e12bba44939bbf8c0e4676f2da70239 Signed-off-by: Charalampos Kominos Signed-off-by: ting wu --- apex/builders/common_builder.py | 22 +++++++++++++++------- apex/common/constants.py | 2 ++ apex/deploy.py | 28 +++++++++++++++++++++++----- apex/overcloud/deploy.py | 6 +++++- apex/tests/test_apex_common_builder.py | 17 ++++++++++++----- apex/undercloud/undercloud.py | 4 ++-- apex/virtual/configure_vm.py | 9 +++++---- 7 files changed, 64 insertions(+), 24 deletions(-) (limited to 'apex') diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py index 2934a1d8..7627ae3c 100644 --- a/apex/builders/common_builder.py +++ b/apex/builders/common_builder.py @@ -59,7 +59,7 @@ def project_to_path(project, patch=None): return "/usr/lib/python2.7/site-packages/" -def project_to_docker_image(project): +def project_to_docker_image(project, docker_url): """ Translates OpenStack project to OOO services that are containerized :param project: name of OpenStack project @@ -69,7 +69,8 @@ def project_to_docker_image(project): # based on project hub_output = utils.open_webpage( - urllib.parse.urljoin(con.DOCKERHUB_OOO, '?page_size=1024'), timeout=10) + urllib.parse.urljoin(docker_url, + '?page_size=1024'), timeout=10) try: results = json.loads(hub_output.decode())['results'] except Exception as e: @@ -89,7 +90,7 @@ def project_to_docker_image(project): return docker_images -def is_patch_promoted(change, branch, docker_image=None): +def is_patch_promoted(change, branch, docker_url, docker_image=None): """ Checks to see if a patch that is in merged exists in either the docker container or the promoted tripleo images @@ -122,8 +123,8 @@ def is_patch_promoted(change, branch, docker_image=None): return True else: # must be a docker patch, check docker tag modified time - docker_url = con.DOCKERHUB_OOO.replace('tripleomaster', - "tripleo{}".format(branch)) + docker_url = docker_url.replace('tripleomaster', + "tripleo{}".format(branch)) url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG) docker_url = urllib.parse.urljoin(docker_url, url_path) logging.debug("docker url is: {}".format(docker_url)) @@ -176,10 +177,15 @@ def add_upstream_patches(patches, image, tmp_dir, # and move the patch into the containers directory. We also assume # this builder call is for overcloud, because we do not support # undercloud containers + if platform.machine() == 'aarch64': + docker_url = con.DOCKERHUB_AARCH64 + else: + docker_url = con.DOCKERHUB_OOO if docker_tag and 'python' in project_path: # Projects map to multiple THT services, need to check which # are supported - ooo_docker_services = project_to_docker_image(patch['project']) + ooo_docker_services = project_to_docker_image(patch['project'], + docker_url) docker_img = ooo_docker_services[0] else: ooo_docker_services = [] @@ -189,6 +195,7 @@ def add_upstream_patches(patches, image, tmp_dir, patch['change-id']) patch_promoted = is_patch_promoted(change, branch.replace('stable/', ''), + docker_url, docker_img) if patch_diff and not patch_promoted: @@ -288,7 +295,8 @@ def prepare_container_images(prep_file, branch='master', neutron_driver=None): p_set['neutron_driver'] = neutron_driver p_set['namespace'] = "docker.io/tripleo{}".format(branch) if platform.machine() == 'aarch64': - p_set['ceph_tag'] = 'master-fafda7d-luminous-centos-7-aarch64' + p_set['namespace'] = "docker.io/armbandapex" + p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64' except KeyError: logging.error("Invalid prep file format: {}".format(prep_file)) diff --git a/apex/common/constants.py b/apex/common/constants.py index e89e7e75..dbdf70d9 100644 --- a/apex/common/constants.py +++ b/apex/common/constants.py @@ -67,6 +67,8 @@ VALID_DOCKER_SERVICES = { } DOCKERHUB_OOO = 'https://registry.hub.docker.com/v2/repositories' \ '/tripleomaster/' +DOCKERHUB_AARCH64 = 'https://registry.hub.docker.com/v2/repositories' \ + '/armbandapex/' KUBESPRAY_URL = 'https://github.com/kubernetes-incubator/kubespray.git' OPNFV_ARTIFACTS = 'http://storage.googleapis.com/artifacts.opnfv.org' CUSTOM_OVS = '{}/apex/random/openvswitch-2.9.0-9.el7fdn.x86_64.' \ diff --git a/apex/deploy.py b/apex/deploy.py index b74d5292..670fb6bd 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -293,12 +293,24 @@ def main(): 'requires at least 12GB per controller.') logging.info('Increasing RAM per controller to 12GB') elif args.virt_default_ram < 10: - control_ram = 10 - logging.warning('RAM per controller is too low. nosdn ' - 'requires at least 10GB per controller.') - logging.info('Increasing RAM per controller to 10GB') + if platform.machine() == 'aarch64': + control_ram = 16 + logging.warning('RAM per controller is too low for ' + 'aarch64 ') + logging.info('Increasing RAM per controller to 16GB') + else: + control_ram = 10 + logging.warning('RAM per controller is too low. nosdn ' + 'requires at least 10GB per controller.') + logging.info('Increasing RAM per controller to 10GB') else: control_ram = args.virt_default_ram + if platform.machine() == 'aarch64' and args.virt_cpus < 16: + vcpus = 16 + logging.warning('aarch64 requires at least 16 vCPUS per ' + 'target VM. Increasing to 16.') + else: + vcpus = args.virt_cpus if ha_enabled and args.virt_compute_nodes < 2: logging.debug( 'HA enabled, bumping number of compute nodes to 2') @@ -307,7 +319,7 @@ def main(): num_computes=args.virt_compute_nodes, controller_ram=control_ram * 1024, compute_ram=compute_ram * 1024, - vcpus=args.virt_cpus + vcpus=vcpus ) inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) logging.info("Inventory is:\n {}".format(pprint.pformat( @@ -435,6 +447,12 @@ def main(): docker_env = 'containers-prepare-parameter.yaml' shutil.copyfile(os.path.join(args.deploy_dir, docker_env), os.path.join(APEX_TEMP_DIR, docker_env)) + # Upload extra ansible.cfg + if platform.machine() == 'aarch64': + ansible_env = 'ansible.cfg' + shutil.copyfile(os.path.join(args.deploy_dir, ansible_env), + os.path.join(APEX_TEMP_DIR, ansible_env)) + c_builder.prepare_container_images( os.path.join(APEX_TEMP_DIR, docker_env), branch=branch.replace('stable/', ''), diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index c526a98e..e3177065 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -245,12 +245,16 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, if net_data: cmd += ' --networks-file network_data.yaml' libvirt_type = 'kvm' - if virtual: + if virtual and (platform.machine() != 'aarch64'): with open('/sys/module/kvm_intel/parameters/nested') as f: nested_kvm = f.read().strip() if nested_kvm != 'Y': libvirt_type = 'qemu' + elif virtual and (platform.machine() == 'aarch64'): + libvirt_type = 'qemu' cmd += ' --libvirt-type {}'.format(libvirt_type) + if platform.machine() == 'aarch64': + cmd += ' --override-ansible-cfg /home/stack/ansible.cfg ' logging.info("Deploy command set: {}".format(cmd)) with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh: diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py index dc4383b9..3ff95bb5 100644 --- a/apex/tests/test_apex_common_builder.py +++ b/apex/tests/test_apex_common_builder.py @@ -57,7 +57,8 @@ class TestCommonBuilder(unittest.TestCase): dummy_change = {'submitted': '2017-06-05 20:23:09.000000000', 'status': 'MERGED'} self.assertTrue(c_builder.is_patch_promoted(dummy_change, - 'master')) + 'master', + con.DOCKERHUB_OOO)) def test_is_patch_promoted_docker(self): dummy_change = {'submitted': '2017-06-05 20:23:09.000000000', @@ -65,13 +66,15 @@ class TestCommonBuilder(unittest.TestCase): dummy_image = 'centos-binary-opendaylight' self.assertTrue(c_builder.is_patch_promoted(dummy_change, 'master', + con.DOCKERHUB_OOO, docker_image=dummy_image)) def test_patch_not_promoted(self): dummy_change = {'submitted': '2900-06-05 20:23:09.000000000', 'status': 'MERGED'} self.assertFalse(c_builder.is_patch_promoted(dummy_change, - 'master')) + 'master', + con.DOCKERHUB_OOO)) def test_patch_not_promoted_docker(self): dummy_change = {'submitted': '2900-06-05 20:23:09.000000000', @@ -79,13 +82,15 @@ class TestCommonBuilder(unittest.TestCase): dummy_image = 'centos-binary-opendaylight' self.assertFalse(c_builder.is_patch_promoted(dummy_change, 'master', + con.DOCKERHUB_OOO, docker_image=dummy_image)) def test_patch_not_promoted_and_not_merged(self): dummy_change = {'submitted': '2900-06-05 20:23:09.000000000', 'status': 'BLAH'} self.assertFalse(c_builder.is_patch_promoted(dummy_change, - 'master')) + 'master', + con.DOCKERHUB_OOO)) @patch('builtins.open', mock_open()) @patch('apex.builders.common_builder.is_patch_promoted') @@ -241,7 +246,8 @@ class TestCommonBuilder(unittest.TestCase): '/dummytmp/dummyrepo.tar') def test_project_to_docker_image(self): - found_services = c_builder.project_to_docker_image(project='nova') + found_services = c_builder.project_to_docker_image('nova', + con.DOCKERHUB_OOO) assert 'nova-api' in found_services @patch('apex.common.utils.open_webpage') @@ -250,7 +256,8 @@ class TestCommonBuilder(unittest.TestCase): mock_open_web.return_value = b'{"blah": "blah"}' self.assertRaises(exceptions.ApexCommonBuilderException, c_builder.project_to_docker_image, - 'nova') + 'nova', + con.DOCKERHUB_OOO) def test_get_neutron_driver(self): ds_opts = {'dataplane': 'fdio', diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py index feae43c3..ccdcd168 100644 --- a/apex/undercloud/undercloud.py +++ b/apex/undercloud/undercloud.py @@ -64,7 +64,7 @@ class Undercloud: if self.external_net: networks.append('external') console = 'ttyAMA0' if platform.machine() == 'aarch64' else 'ttyS0' - root = 'vda' if platform.machine() == 'aarch64' else 'sda' + root = 'vda2' if platform.machine() == 'aarch64' else 'sda' self.vm = vm_lib.create_vm(name='undercloud', image=self.volume, @@ -112,7 +112,7 @@ class Undercloud: # give 10 seconds to come up time.sleep(10) # set IP - for x in range(5): + for x in range(10): if self._set_ip(): logging.info("Undercloud started. IP Address: {}".format( self.ip)) diff --git a/apex/virtual/configure_vm.py b/apex/virtual/configure_vm.py index ba0398bb..9d47bf03 100755 --- a/apex/virtual/configure_vm.py +++ b/apex/virtual/configure_vm.py @@ -102,6 +102,10 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'], with open(os.path.join(template_dir, 'domain.xml'), 'r') as f: source_template = f.read() imagefile = os.path.realpath(image) + + if arch == 'aarch64' and diskbus == 'sata': + diskbus = 'virtio' + memory = int(memory) * 1024 params = { 'name': name, @@ -118,9 +122,6 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'], 'user_interface': '', } - # assign virtio as default for aarch64 - if arch == 'aarch64' and diskbus == 'sata': - diskbus = 'virtio' # Configure the bus type for the target disk device params['diskbus'] = diskbus nicparams = { @@ -171,7 +172,7 @@ def create_vm(name, image, diskbus='sata', baremetal_interfaces=['admin'], """ params['user_interface'] = """ -
+
-- cgit 1.2.3-korg