:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: : Copyright (c) 2018 Mirantis Inc., Enea AB and others. : : All rights reserved. This program and the accompanying materials : are made available under the terms of the Apache License, Version 2.0 : which accompanies this distribution, and is available at : http://www.apache.org/licenses/LICENSE-2.0 :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: From: Alexandru Avadanii Date: Sun, 24 Jun 2018 20:36:44 +0200 Subject: [PATCH] libvirt xml: pass loader, virt machine, cpu mode - libvirt xml: pass loader param to vm Based on upstream commit [1]. - libvirt xml: pass virt machine type - libvirt xml: pass cpu mode to vm - virt module: Allow NVRAM unlinking on DOM undefine UEFI-enabled VMs usually have pflash (NVRAM) devices attached, which require one additional libvirt flag to be passed at 'undefine'. This is usually the case for AArch64 (arm64) VMs, where AAVMF (AA64 UEFI) is the only supported guest bootloader. [1] https://github.com/saltstack/salt/commit/9cace9adb Signed-off-by: Alexandru Avadanii --- README.rst | 7 +++++ _modules/virtng.py | 40 +++++++++++++++++++++++++++- salt/control/virt.sls | 9 +++++++ tests/pillar/control_virt_custom.sls | 6 +++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index fd15b19..7f8f4a4 100644 --- a/README.rst +++ b/README.rst @@ -453,6 +453,13 @@ Control VM provisioning: rate: period: '1800' bytes: '1500' + # Custom per-node loader definition (e.g. for AArch64 UEFI) + loader: + readonly: yes + type: pflash + path: /usr/share/AAVMF/AAVMF_CODE.fd + machine: virt-2.11 # Custom per-node virt machine type + cpu_mode: host-passthrough mac: nic01: AC:DE:48:AA:AA:AA nic02: AC:DE:48:AA:AA:BB diff --git a/_modules/virtng.py b/_modules/virtng.py index ce09508..6abd0eb 100644 --- a/_modules/virtng.py +++ b/_modules/virtng.py @@ -530,6 +530,9 @@ def init(name, disk='default', saltenv='base', rng=None, + loader=None, + machine=None, + cpu_mode=None, **kwargs): ''' Initialize a new vm @@ -649,6 +652,37 @@ def init(name, xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs) + # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template + if cpu_mode: + xml_doc = minidom.parseString(xml) + cpu_xml = xml_doc.createElement("cpu") + cpu_xml.setAttribute('mode', cpu_mode) + xml_doc.getElementsByTagName("domain")[0].appendChild(cpu_xml) + xml = xml_doc.toxml() + + # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template + if machine: + xml_doc = minidom.parseString(xml) + os_xml = xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0] + os_xml.getElementsByTagName("type")[0].setAttribute('machine', machine) + xml = xml_doc.toxml() + + # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template + if loader and 'path' not in loader: + log.info('`path` is a required property of `loader`, and cannot be found. Skipping loader configuration') + loader = None + elif loader: + xml_doc = minidom.parseString(xml) + loader_xml = xml_doc.createElement("loader") + for key, val in loader.items(): + if key == 'path': + continue + loader_xml.setAttribute(key, val) + loader_path_xml = xml_doc.createTextNode(loader['path']) + loader_xml.appendChild(loader_path_xml) + xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0].appendChild(loader_xml) + xml = xml_doc.toxml() + # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template for _nic in nicp: if _nic['virtualport']: @@ -1552,7 +1586,11 @@ def undefine(vm_): salt '*' virtng.undefine ''' dom = _get_dom(vm_) - return dom.undefine() == 0 + if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False): + # This one is only in 1.2.8+ + return dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0 + else: + return dom.undefine() == 0 def purge(vm_, dirs=False): diff --git a/salt/control/virt.sls b/salt/control/virt.sls index a2e56ff..1bcca95 100644 --- a/salt/control/virt.sls +++ b/salt/control/virt.sls @@ -58,6 +58,15 @@ salt_control_virt_{{ cluster_name }}_{{ node_name }}: {%- elif rng is defined %} - rng: {{ rng }} {%- endif %} + {%- if node.loader is defined %} + - loader: {{ node.loader }} + {%- endif %} + {%- if node.machine is defined %} + - machine: {{ node.machine }} + {%- endif %} + {%- if node.cpu_mode is defined %} + - cpu_mode: {{ node.cpu_mode }} + {%- endif %} - kwargs: seed: True serial_type: pty diff --git a/tests/pillar/control_virt_custom.sls b/tests/pillar/control_virt_custom.sls index 71cf37f..dcfafbd 100644 --- a/tests/pillar/control_virt_custom.sls +++ b/tests/pillar/control_virt_custom.sls @@ -63,11 +63,17 @@ salt: image: ubuntu.qcow size: medium img_dest: /var/lib/libvirt/ssdimages + machine: virt-2.11 + cpu_mode: host-passthrough ubuntu2: provider: node02.domain.com image: bubuntu.qcomw size: small img_dest: /var/lib/libvirt/hddimages + loader: + readonly: yes + type: pflash + path: /usr/share/AAVMF/AAVMF_CODE.fd ubuntu3: provider: node03.domain.com image: meowbuntu.qcom2