From 92fc2a3c43305532b885ef70201f363204b69a3b Mon Sep 17 00:00:00 2001 From: Manuel Buil Date: Mon, 25 Jun 2018 10:57:02 +0100 Subject: xci: roles: create-vm-nodes: Add role for creating XCI VM nodes Add a new role based on the bifrost one to create nodes for the bifrost virtual deployments. This role will install and configure libvirt on the host, download a prebuilt OPNFV VM image and deploy the OPNFV VM using that image. Moreover, it will create the rest of the nodes for the virtual deployment which will be configured by bifrost later on. Change-Id: I9fbd084261351d3b53ae373060f43df046191c5e Co-Authored-by: Markos Chandras Signed-off-by: Manuel Buil --- xci/playbooks/roles/create-vm-nodes/README.md | 165 ++++++++++++++++++++ .../roles/create-vm-nodes/defaults/main.yml | 27 ++++ .../roles/create-vm-nodes/tasks/create_vm.yml | 166 +++++++++++++++++++++ .../create-vm-nodes/tasks/download_opnfvimage.yml | 32 ++++ xci/playbooks/roles/create-vm-nodes/tasks/main.yml | 49 ++++++ .../create-vm-nodes/tasks/prepare_libvirt.yml | 119 +++++++++++++++ .../roles/create-vm-nodes/templates/net.xml.j2 | 18 +++ .../create-vm-nodes/templates/pool_dir.xml.j2 | 7 + .../roles/create-vm-nodes/templates/vm.xml.j2 | 76 ++++++++++ .../roles/create-vm-nodes/vars/debian.yml | 13 ++ .../roles/create-vm-nodes/vars/redhat.yml | 17 +++ xci/playbooks/roles/create-vm-nodes/vars/suse.yml | 15 ++ 12 files changed, 704 insertions(+) create mode 100644 xci/playbooks/roles/create-vm-nodes/README.md create mode 100644 xci/playbooks/roles/create-vm-nodes/defaults/main.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/tasks/main.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 create mode 100644 xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 create mode 100644 xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 create mode 100644 xci/playbooks/roles/create-vm-nodes/vars/debian.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/vars/redhat.yml create mode 100644 xci/playbooks/roles/create-vm-nodes/vars/suse.yml (limited to 'xci') diff --git a/xci/playbooks/roles/create-vm-nodes/README.md b/xci/playbooks/roles/create-vm-nodes/README.md new file mode 100644 index 00000000..d96a2981 --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/README.md @@ -0,0 +1,165 @@ +create-vm-nodes +================ + +This role creates the XCI VMs used to deploy scenarios. It is a branch from the +bifrost role "bifrost-create-vm-nodes": + +https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes + +It creates the VMs based on the pdf and idf document which describes the +characteristics of the VMs or physical servers. For more information check the +spec: + +https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst + + +Flow +---- + +The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the +playbook that starts executing the role: + +xci-create-vms.yaml + +Note that at this stage the pdf and the opnfv_vm.yml are loaded. + +Some distro specific tasks related to variables are done and then the +prepare_libvirt playbook is run. This playbook, as the name says, +gets everything ready to run libvirt. + +After that, the nodes_json_data dictionary is initialized. This will collect +the data and finally dump it all into the baremetal_json_file which will be +read by bifrost in the subsequent role. + +The opnfv vm and the rest of vms get created using the xml libvirt template, +which gets filled with the pdf and opnfv_vm.yml variables. + +Finally nodes_json_data is dumped. + +Requirements +------------ + +The following packages are required and ensured to be present: +- libvirt-bin +- qemu-utils +- qemu-kvm +- sgabios + + +Warning +------- + +- It is currently assumed that the OS for the VM will be installed in the first +disk of the node described by the pdf. That's why there is a [0] in: + + - name: create volume for vm + command: > + virsh --connect {{ vm_libvirt_uri }} + vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2 + {{ item.disks[0].disk_capacity }} + --format qcow2 {{ prealloc|default("") }} + +- It is assumed that the opnfv VM characteristics are not described in the pdf +but in a similar document called opnfv_vm.yml + +- All references to csv from bifrost-create-vm-nodes were removed + +Role Variables +-------------- + +baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the + required information for bifrost to configure the + VMs appropriately + +vm_disk_cache: Disk cache mode to use by VMs disk. + Defaults to shell variable 'VM_DISK_CACHE', or, + if that is not set, to 'writeback'. + +node_names: Space-separated names for nodes to be created. + Defaults to shell variable 'NODE_NAMES'. + If not set, VM names will be autogenerated. + Note that independent on the number of names in this list, + at most 'test_vm_num_nodes' VMs will be created. + +vm_network: Name of the libvirt network to create the nodes on. + Defaults to shell variable 'VM_NET_BRIDGE', or, + if that is not set, to 'default'. + +node_storage_pool: Name of the libvirt storage pool to create disks + for VMs in. + Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or, + if that is not set, to 'default'. + If absent, this pool will be created. + +node_storage_pool_path: Path used by the libvirt storage pool + 'node_storage_pool' if it has to be created. + Defaults to "/var/lib/libvirt/images". + +node_logdir: Folder where to store VM logs. + Defaults to "/var/log/libvirt/baremetal_logs". + +vm_emulator: Path to emulator executable used to define VMs in libvirt. + Defaults to "/usr/bin/qemu-system-x86_64". + Generally users should not need to modify this setting, + as it is OS-specific and is overwritten by + os/distribution-specific defaults in this role when needed. + +vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM + related actions. + Defaults to shell variable 'LIBVIRT_CONNECT_URI', or, + if that is not set, to 'qemu:///system'. + Note that currently connecting to remote libvirt is + not tested and is unsupported. + +network_interface: Name of the bridge to create when creating + 'vm_network' libvirt network. + Defaults to "virbr0". + Name and default of this option are chosen to be the same + as in 'bifrost-ironic-install' role. + +opnfv_vm_network_ip: IP for the 'network_interface' bridge. + Defaults to '192.168.122.1'. + This setting is applied only when 'vm_network' + was absent and is created from scratch. + +node_network_netmask: Subnet mask for 'network_interface' bridge. + Defaults to '255.255.255.0'. + This setting is applied only when 'vm_network' + was absent and is created from scratch. + +Dependencies +------------ + +None at this time. + +Example Playbook +---------------- + +- hosts: localhost + connection: local + become: yes + gather_facts: yes + roles: + - role: create-vm-nodes + +License +------- + +Copyright (c) 2018 SUSE Linux GmbH. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Author Information +------------------ + +mbuil@suse.com diff --git a/xci/playbooks/roles/create-vm-nodes/defaults/main.yml b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml new file mode 100644 index 00000000..90ee6e5c --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml @@ -0,0 +1,27 @@ +--- +# defaults file for bifrost-create-vm-nodes +baremetal_json_file: '/tmp/baremetal.json' + +# We collect these parameters from the pdf +vm_nic: "virtio" +vm_groups: {} +vm_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}" +vm_disk_cache: "{{ lookup('env', 'VM_DISK_CACHE') | default('unsafe', true) }}" +node_names: "{{ lookup('env', 'NODE_NAMES').split() }}" + +# NOTE(pas-ha) name and default are chosen to be the same +# as in 'bifrost-ironic-install' role +network_interface: "virbr0" +# NOTE(pas-ha) these correspond to settings for the libvirt network created by default +vm_network: "{{ lookup('env', 'VM_NET_BRIDGE') | default('default', true) }}" +node_network_netmask: "255.255.255.0" + +node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}" +node_storage_pool_path: "/var/lib/libvirt/images" +node_logdir: "/var/log/libvirt/baremetal_logs" +# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars +vm_emulator: "/usr/bin/qemu-system-x86_64" +# NOTE(pas-ha) not really tested with non-local qemu connections +vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}" + +opnfv_image_path: "/var/lib/libvirt/images" diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml new file mode 100644 index 00000000..d8169c2f --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml @@ -0,0 +1,166 @@ +--- +# Create a VM and volume for it, save its MAC address +- shell: "sudo virsh list --all | grep 'shut off' | wc -l" + register: num_vms + +- name: "Creating VM" + block: + # NOTE(pas-ha) item here refers to name of the vm + - set_fact: + vm_name: "{{ node_names[num_vms.stdout | int] }}" + + - set_fact: + vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log" + vm_host_group: "{{ vm_default_groups }}" + + - set_fact: + vm_host_group: "{{ vm_default_groups | union(vm_groups[vm_name]) }}" + when: vm_groups[vm_name] is defined + + - name: set prealloc arg for Debian + set_fact: + prealloc: "--prealloc-metadata" + when: + - ansible_os_family == 'Debian' + - vm_libvirt_uri == 'qemu:///system' + + - name: list info on pools + virt_pool: + command: facts + uri: "{{ vm_libvirt_uri }}" + + - name: list existing vms + virt: + command: list_vms + register: existing_vms + + - block: + - name: Check if volume exists + stat: + path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2" + register: _vm_volume_prepared + + # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes + # mbuil: Assuming there is only one disk [0] + - name: create volume for vm + command: > + virsh --connect {{ vm_libvirt_uri }} + vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2 + {{ item.disks[0].disk_capacity }} + --format qcow2 {{ prealloc|default("") }} + when: + - not _vm_volume_prepared.stat.exists + - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes + + - name: set path to the volume created + set_fact: + vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2" + + - name: pre-touch the vm volume + file: + state: touch + path: "{{ vm_volume_path }}" + when: vm_libvirt_uri == 'qemu:///system' + + # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr + # fails to set +C on XFS. This could be more elegant, however the use + # case is for CI testing. + - name: set copy-on-write for volume on non-CentOS systems + command: chattr +C {{ vm_volume_path }} + ignore_errors: yes + when: + - ansible_distribution != 'CentOS' + - vm_libvirt_uri == 'qemu:///system' + + # Fetches the xml descriptor from the template + - name: create_vm + virt: + command: define + name: "{{ vm_name }}" + uri: "{{ vm_libvirt_uri }}" + xml: "{{ lookup('template', 'vm.xml.j2') }}" + + rescue: + - name: "Execute `dmesg` to collect debugging output should VM creation fail." + command: dmesg + - name: > + "Execute `virsh capabilities` to collect debugging output + should VM creation fail." + command: virsh capabilities + - name: "Abort due to failed VM creation" + fail: > + msg="VM creation step failed, please review dmesg + output for additional details" + when: vm_name not in existing_vms.list_vms + + # TODO(pas-ha) replace 'command: vbmc ...' tasks + # with a custom Ansible module using vbmc Python API + - name: get list of nodes from virtualbmc + command: vbmc list + register: vbmc_list + + # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching. + - name: delete vm from virtualbmc if it is there + command: vbmc delete {{ vm_name }} + when: vbmc_list.stdout.find(vm_name) != -1 + + - set_fact: + virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (num_vms.stdout | int ) }}" + + - name: plug vm into vbmc + command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }} + + - name: start virtualbmc + command: vbmc start {{ vm_name }} + + - name: get XML of the vm + virt: + name: "{{ vm_name }}" + command: get_xml + register: vm_xml + + - name: Fetch the ip + set_fact: + vm_ip: "{%- for interface in item.interfaces %}{%- if 'native' in (interface.vlan | string) %}{{ interface.address }}{%- endif %}{%- endfor %}" + + # Assumes there is only a single NIC per VM + - name: get MAC from vm XML + set_fact: + vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}" + + # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively + # see vbmc add --help + - name: set the json entry for vm + set_fact: + vm_data: + name: "{{ vm_name }}" + uuid: "{{ vm_name | to_uuid }}" + host_groups: "{{ vm_host_group }}" + driver: "{{ vm_node_driver|default('ipmi') }}" + driver_info: + power: + ipmi_address: "192.168.122.1" + ipmi_port: "{{ virtual_ipmi_port }}" + ipmi_username: "{{ item.remote_management.user }}" + ipmi_password: "{{ item.remote_management.pass }}" + nics: + - mac: "{{ vm_mac }}" + ansible_ssh_host: "{{ vm_ip }}" + ipv4_address: "{{ vm_ip }}" + properties: + cpu_arch: "{{ item.node.arch }}" + ram: "{{ item.node.memory.rstrip('G') }}" + cpus: "{{ item.node.cpus }}" + disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}" + + - name: add created vm info + set_fact: + nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}" + when: vm_name != 'opnfv' + + - name: Record OPNFV VM ip + set_fact: + opnfv_vm_ip: "{{ vm_ip }}" + when: vm_name == 'opnfv' + + when: (num_nodes | int) > (num_vms.stdout | int) diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml new file mode 100644 index 00000000..a227bc4f --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml @@ -0,0 +1,32 @@ +--- +- name: Download the {{ xci_distro }} image checksum file + get_url: + dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt" + force: no + url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt + timeout: 3000 +- name: Extract checksum + shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt" + register: _image_checksum +- fail: + msg: "Failed to get image checksum" + when: _image_checksum == '' +- set_fact: + image_checksum: "{{ _image_checksum.stdout }}" +- name: Download the {{ xci_distro }} image file + get_url: + url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2 + checksum: "sha256:{{ image_checksum }}" + timeout: 3000 + dest: "{{ xci_cache }}/deployment_image.qcow2" + force: no +- name: Set correct mode for deployment_image.qcow2 file + file: + path: "{{ xci_cache }}/deployment_image.qcow2" + mode: '0755' + owner: 'root' + group: 'root' + +- name: Create copy of original deployment image + shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2" + become: yes diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/main.yml b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml new file mode 100644 index 00000000..7e0090e4 --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml @@ -0,0 +1,49 @@ +--- +# baremetal_json_file could be the file coming from pdf/idf + +- name: "Load distribution defaults" + include_vars: "{{ ansible_os_family | lower }}.yml" + +# From the previous list +- name: "Install required packages" + package: + name: "{{ required_packages }}" + +- include_tasks: prepare_libvirt.yml +- include_tasks: download_opnfvimage.yml + +- name: create placeholder var for vm entries in JSON format + set_fact: + nodes_json_data: {} + +# First we create the opnfv_vm +- include_tasks: create_vm.yml + with_items: "{{ [opnfv_vm] + nodes }}" + +- name: Start the opnfv vm + virt: + command: start + name: opnfv + +- name: remove previous baremetal data file + file: + state: absent + path: "{{ baremetal_json_file }}" + +# We got nodes_json_data from the create_vm playbook +- name: write to baremetal json file + copy: + dest: "{{ baremetal_json_file }}" + content: "{{ nodes_json_data | to_nice_json }}" + +- debug: var=nodes_json_data + +- name: > + "Set file permissions such that the baremetal data file + can be read by the user executing Ansible" + file: + path: "{{ baremetal_json_file }}" + owner: "{{ ansible_env.SUDO_USER }}" + when: > + ansible_env.SUDO_USER is defined and + baremetal_json_file != "" diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml new file mode 100644 index 00000000..e09e2d6b --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml @@ -0,0 +1,119 @@ +--- +- name: "Restart libvirt service" + service: name="{{libvirt_service_name}}" state=restarted + +# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to +# operate properly with dnsmasq. This should be done before starting dnsmasq. +- name: "Enable IP forwarding in sysctl" + sysctl: + name: "net.ipv4.ip_forward" + value: 1 + sysctl_set: yes + state: present + reload: yes + +# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading +# the ROM from /usr/share/misc. +- name: "Look for sgabios in {{ sgabios_dir }}" + stat: path={{ sgabios_dir }}/sgabios.bin + register: test_sgabios_qemu + +- name: "Look for sgabios in /usr/share/misc" + stat: path=/usr/share/misc/sgabios.bin + register: test_sgabios_misc + +- name: "Place sgabios.bin" + command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin + when: > + test_sgabios_qemu == false and + test_sgabios_misc == true + +# NOTE(TheJulia): In order to prevent conflicts, stop +# dnsmasq to prevent conflicts with libvirt restarting. +# TODO(TheJulia): We shouldn't need to do this, but the +# libvirt dhcp instance conflicts withour specific config +# and taking this path allows us to not refactor dhcp at +# this moment. Our DHCP serving should be refactored +# so we don't need to do this. +- name: "Stop default dnsmasq service" + service: + name: dnsmasq + state: stopped + ignore_errors: true + +# NOTE(TheJulia): Seems if you test in a VM, this might +# be helpful if your installed your host originally +# with the default 192.168.122/0/24 network +- name: destroy libvirt network + virt_net: + name: "{{ vm_network }}" + state: absent + uri: "{{ vm_libvirt_uri }}" + +- name: ensure libvirt network is present + virt_net: + name: "{{ vm_network }}" + state: present + xml: "{{ lookup('template', 'net.xml.j2') }}" + uri: "{{ vm_libvirt_uri }}" + +- name: find facts on libvirt networks + virt_net: + command: facts + uri: "{{ vm_libvirt_uri }}" + +# NOTE(pas-ha) yet another place where non-local libvirt will not work +- name: "Delete network interface if virtual network is not active" + command: ip link del {{ ansible_libvirt_networks[vm_network].bridge }} + when: + - ansible_libvirt_networks[vm_network].state != 'active' + - vm_libvirt_uri == 'qemu:///system' + ignore_errors: yes + +- name: set libvirt network to autostart + virt_net: + name: "{{ vm_network }}" + autostart: yes + uri: "{{ vm_libvirt_uri }}" + +- name: ensure libvirt network is running + virt_net: + name: "{{ vm_network }}" + state: active + uri: "{{ vm_libvirt_uri }}" + +- name: get libvirt network status + virt_net: + name: "{{ vm_network }}" + command: status + uri: "{{ vm_libvirt_uri }}" + register: test_vm_net_status + +- name: fail if libvirt network is not active + assert: + that: test_vm_net_status.status == 'active' + +- name: define a libvirt pool if not set + virt_pool: + name: "{{ node_storage_pool }}" + state: present + uri: "{{ vm_libvirt_uri }}" + xml: "{{ lookup('template', 'pool_dir.xml.j2') }}" + +- name: ensure libvirt pool is running + virt_pool: + name: "{{ node_storage_pool }}" + state: active + autostart: yes + uri: "{{ vm_libvirt_uri }}" + +- name: create dir for bm logs + file: + state: directory + path: "{{ node_logdir }}" + recurse: yes + mode: "0755" + +- name: install virtualbmc + pip: + name: virtualbmc diff --git a/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 new file mode 100644 index 00000000..3c082170 --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 @@ -0,0 +1,18 @@ + + {{ vm_network }} + + + + + + + + + {%- for interface in opnfv_vm.interfaces %} + {%- if 'native' in (interface.vlan | string) %} + + {%- endif %} + {%- endfor %} + + + diff --git a/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 new file mode 100644 index 00000000..e4645deb --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 @@ -0,0 +1,7 @@ + + {{ node_storage_pool }} + + {{ node_storage_pool_path }} + + + diff --git a/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 new file mode 100644 index 00000000..c44fa6aa --- /dev/null +++ b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 @@ -0,0 +1,76 @@ + + {{ vm_name }} + {{ item.node.memory.rstrip('G') }} + {{ item.node.cpus }} + + hvm + {%- if 'opnfv' in vm_name -%} + + {%- else -%} + + {% endif -%} + + + + + + + + + + + + + destroy + restart + restart + + {{ vm_emulator }} + + + + +
+ + +
+ + {% set native_interfaces = [] %} + {%- for interface in item.interfaces %} + {%- if 'native' in (interface.vlan | string) %} + {%- set _ = native_interfaces.append(interface) %} + {%- endif %} + {%- endfor %} + {%- for interface in native_interfaces -%} + + + + + + {% endfor -%} + + +