summaryrefslogtreecommitdiffstats
path: root/mcp
diff options
context:
space:
mode:
Diffstat (limited to 'mcp')
-rw-r--r--mcp/patches/salt-formula-salt/0001-libvirt-xml-pass-loader-virt-machine-cpu-mode.patch157
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j237
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j29
m---------mcp/salt-formulas/salt-formula-salt0
4 files changed, 189 insertions, 14 deletions
diff --git a/mcp/patches/salt-formula-salt/0001-libvirt-xml-pass-loader-virt-machine-cpu-mode.patch b/mcp/patches/salt-formula-salt/0001-libvirt-xml-pass-loader-virt-machine-cpu-mode.patch
new file mode 100644
index 000000000..effab8743
--- /dev/null
+++ b/mcp/patches/salt-formula-salt/0001-libvirt-xml-pass-loader-virt-machine-cpu-mode.patch
@@ -0,0 +1,157 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Sun, 24 Jun 2018 20:36:44 +0200
+Subject: [PATCH] libvirt xml: pass loader, virt machine, cpu mode
+
+- libvirt xml: pass loader param to vm
+ Based on upstream commit [1].
+- libvirt xml: pass virt machine type
+- libvirt xml: pass cpu mode to vm
+- virt module: Allow NVRAM unlinking on DOM undefine
+ UEFI-enabled VMs usually have pflash (NVRAM) devices attached,
+ which require one additional libvirt flag to be passed at 'undefine'.
+ This is usually the case for AArch64 (arm64) VMs, where AAVMF (AA64
+ UEFI) is the only supported guest bootloader.
+
+[1] https://github.com/saltstack/salt/commit/9cace9adb
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ README.rst | 7 +++++
+ _modules/virtng.py | 40 +++++++++++++++++++++++++++-
+ salt/control/virt.sls | 9 +++++++
+ tests/pillar/control_virt_custom.sls | 6 +++++
+ 4 files changed, 61 insertions(+), 1 deletion(-)
+
+diff --git a/README.rst b/README.rst
+index fd15b19..7f8f4a4 100644
+--- a/README.rst
++++ b/README.rst
+@@ -453,6 +453,13 @@ Control VM provisioning:
+ rate:
+ period: '1800'
+ bytes: '1500'
++ # Custom per-node loader definition (e.g. for AArch64 UEFI)
++ loader:
++ readonly: yes
++ type: pflash
++ path: /usr/share/AAVMF/AAVMF_CODE.fd
++ machine: virt-2.11 # Custom per-node virt machine type
++ cpu_mode: host-passthrough
+ mac:
+ nic01: AC:DE:48:AA:AA:AA
+ nic02: AC:DE:48:AA:AA:BB
+diff --git a/_modules/virtng.py b/_modules/virtng.py
+index ce09508..6abd0eb 100644
+--- a/_modules/virtng.py
++++ b/_modules/virtng.py
+@@ -530,6 +530,9 @@ def init(name,
+ disk='default',
+ saltenv='base',
+ rng=None,
++ loader=None,
++ machine=None,
++ cpu_mode=None,
+ **kwargs):
+ '''
+ Initialize a new vm
+@@ -649,6 +652,37 @@ def init(name,
+
+ xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
+
++ # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
++ if cpu_mode:
++ xml_doc = minidom.parseString(xml)
++ cpu_xml = xml_doc.createElement("cpu")
++ cpu_xml.setAttribute('mode', cpu_mode)
++ xml_doc.getElementsByTagName("domain")[0].appendChild(cpu_xml)
++ xml = xml_doc.toxml()
++
++ # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
++ if machine:
++ xml_doc = minidom.parseString(xml)
++ os_xml = xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0]
++ os_xml.getElementsByTagName("type")[0].setAttribute('machine', machine)
++ xml = xml_doc.toxml()
++
++ # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
++ if loader and 'path' not in loader:
++ log.info('`path` is a required property of `loader`, and cannot be found. Skipping loader configuration')
++ loader = None
++ elif loader:
++ xml_doc = minidom.parseString(xml)
++ loader_xml = xml_doc.createElement("loader")
++ for key, val in loader.items():
++ if key == 'path':
++ continue
++ loader_xml.setAttribute(key, val)
++ loader_path_xml = xml_doc.createTextNode(loader['path'])
++ loader_xml.appendChild(loader_path_xml)
++ xml_doc.getElementsByTagName("domain")[0].getElementsByTagName("os")[0].appendChild(loader_xml)
++ xml = xml_doc.toxml()
++
+ # TODO: Remove this code and refactor module, when salt-common would have updated libvirt_domain.jinja template
+ for _nic in nicp:
+ if _nic['virtualport']:
+@@ -1552,7 +1586,11 @@ def undefine(vm_):
+ salt '*' virtng.undefine <vm name>
+ '''
+ dom = _get_dom(vm_)
+- return dom.undefine() == 0
++ if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
++ # This one is only in 1.2.8+
++ return dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0
++ else:
++ return dom.undefine() == 0
+
+
+ def purge(vm_, dirs=False):
+diff --git a/salt/control/virt.sls b/salt/control/virt.sls
+index a2e56ff..1bcca95 100644
+--- a/salt/control/virt.sls
++++ b/salt/control/virt.sls
+@@ -58,6 +58,15 @@ salt_control_virt_{{ cluster_name }}_{{ node_name }}:
+ {%- elif rng is defined %}
+ - rng: {{ rng }}
+ {%- endif %}
++ {%- if node.loader is defined %}
++ - loader: {{ node.loader }}
++ {%- endif %}
++ {%- if node.machine is defined %}
++ - machine: {{ node.machine }}
++ {%- endif %}
++ {%- if node.cpu_mode is defined %}
++ - cpu_mode: {{ node.cpu_mode }}
++ {%- endif %}
+ - kwargs:
+ seed: True
+ serial_type: pty
+diff --git a/tests/pillar/control_virt_custom.sls b/tests/pillar/control_virt_custom.sls
+index 71cf37f..dcfafbd 100644
+--- a/tests/pillar/control_virt_custom.sls
++++ b/tests/pillar/control_virt_custom.sls
+@@ -63,11 +63,17 @@ salt:
+ image: ubuntu.qcow
+ size: medium
+ img_dest: /var/lib/libvirt/ssdimages
++ machine: virt-2.11
++ cpu_mode: host-passthrough
+ ubuntu2:
+ provider: node02.domain.com
+ image: bubuntu.qcomw
+ size: small
+ img_dest: /var/lib/libvirt/hddimages
++ loader:
++ readonly: yes
++ type: pflash
++ path: /usr/share/AAVMF/AAVMF_CODE.fd
+ ubuntu3:
+ provider: node03.domain.com
+ image: meowbuntu.qcom2
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
index 62af0893a..f1bdf01d0 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
@@ -5,6 +5,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
classes:
- system.linux.system.repo.glusterfs
@@ -92,34 +93,42 @@ parameters:
cluster:
internal:
node:
- mdb01:
+ mdb01: &salt_control_xenial_image_common_attr
image: ${_param:salt_control_xenial_image}
+{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ machine: virt
+ cpu_mode: host-passthrough
+ loader:
+ readonly: 'yes'
+ type: pflash
+ path: /usr/share/AAVMF/AAVMF_CODE.fd
+{%- endif %}
mdb02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
mdb03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
ctl01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
ctl02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
ctl03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
dbs01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
dbs02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
dbs03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
msg01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
msg02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
msg03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
prx01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
prx02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_xenial_image_common_attr
provider: kvm03.${_param:cluster_domain}
virt:
nic:
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
index ab0da39b3..09a490669 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
@@ -5,6 +5,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
{%- if conf.MCP_VCP %}
# NOTE(armband): we don't want to pull in salt.control for novcp
@@ -29,4 +30,12 @@ parameters:
provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
image: ${_param:salt_control_xenial_image}
size: opendaylight.server
+{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ machine: virt
+ cpu_mode: host-passthrough
+ loader:
+ readonly: 'yes'
+ type: pflash
+ path: /usr/share/AAVMF/AAVMF_CODE.fd
+{%- endif %}
{%- endif %}
diff --git a/mcp/salt-formulas/salt-formula-salt b/mcp/salt-formulas/salt-formula-salt
new file mode 160000
+Subproject 262e8b0ba270baf46a3ad264a5acf3d6056b5cd