summaryrefslogtreecommitdiffstats
path: root/xci
diff options
context:
space:
mode:
Diffstat (limited to 'xci')
-rw-r--r--xci/playbooks/roles/create-vm-nodes/README.md165
-rw-r--r--xci/playbooks/roles/create-vm-nodes/defaults/main.yml27
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml166
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml32
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/main.yml49
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml119
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/net.xml.j218
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j27
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j276
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/debian.yml13
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/redhat.yml17
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/suse.yml15
12 files changed, 704 insertions, 0 deletions
diff --git a/xci/playbooks/roles/create-vm-nodes/README.md b/xci/playbooks/roles/create-vm-nodes/README.md
new file mode 100644
index 00000000..d96a2981
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/README.md
@@ -0,0 +1,165 @@
+create-vm-nodes
+================
+
+This role creates the XCI VMs used to deploy scenarios. It is a branch from the
+bifrost role "bifrost-create-vm-nodes":
+
+https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
+
+It creates the VMs based on the pdf and idf document which describes the
+characteristics of the VMs or physical servers. For more information check the
+spec:
+
+https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
+
+
+Flow
+----
+
+The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
+playbook that starts executing the role:
+
+xci-create-vms.yaml
+
+Note that at this stage the pdf and the opnfv_vm.yml are loaded.
+
+Some distro specific tasks related to variables are done and then the
+prepare_libvirt playbook is run. This playbook, as the name says,
+gets everything ready to run libvirt.
+
+After that, the nodes_json_data dictionary is initialized. This will collect
+the data and finally dump it all into the baremetal_json_file which will be
+read by bifrost in the subsequent role.
+
+The opnfv vm and the rest of vms get created using the xml libvirt template,
+which gets filled with the pdf and opnfv_vm.yml variables.
+
+Finally nodes_json_data is dumped.
+
+Requirements
+------------
+
+The following packages are required and ensured to be present:
+- libvirt-bin
+- qemu-utils
+- qemu-kvm
+- sgabios
+
+
+Warning
+-------
+
+- It is currently assumed that the OS for the VM will be installed in the first
+disk of the node described by the pdf. That's why there is a [0] in:
+
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+
+- It is assumed that the opnfv VM characteristics are not described in the pdf
+but in a similar document called opnfv_vm.yml
+
+- All references to csv from bifrost-create-vm-nodes were removed
+
+Role Variables
+--------------
+
+baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the
+ required information for bifrost to configure the
+ VMs appropriately
+
+vm_disk_cache: Disk cache mode to use by VMs disk.
+ Defaults to shell variable 'VM_DISK_CACHE', or,
+ if that is not set, to 'writeback'.
+
+node_names: Space-separated names for nodes to be created.
+ Defaults to shell variable 'NODE_NAMES'.
+ If not set, VM names will be autogenerated.
+ Note that independent on the number of names in this list,
+ at most 'test_vm_num_nodes' VMs will be created.
+
+vm_network: Name of the libvirt network to create the nodes on.
+ Defaults to shell variable 'VM_NET_BRIDGE', or,
+ if that is not set, to 'default'.
+
+node_storage_pool: Name of the libvirt storage pool to create disks
+ for VMs in.
+ Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or,
+ if that is not set, to 'default'.
+ If absent, this pool will be created.
+
+node_storage_pool_path: Path used by the libvirt storage pool
+ 'node_storage_pool' if it has to be created.
+ Defaults to "/var/lib/libvirt/images".
+
+node_logdir: Folder where to store VM logs.
+ Defaults to "/var/log/libvirt/baremetal_logs".
+
+vm_emulator: Path to emulator executable used to define VMs in libvirt.
+ Defaults to "/usr/bin/qemu-system-x86_64".
+ Generally users should not need to modify this setting,
+ as it is OS-specific and is overwritten by
+ os/distribution-specific defaults in this role when needed.
+
+vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM
+ related actions.
+ Defaults to shell variable 'LIBVIRT_CONNECT_URI', or,
+ if that is not set, to 'qemu:///system'.
+ Note that currently connecting to remote libvirt is
+ not tested and is unsupported.
+
+network_interface: Name of the bridge to create when creating
+ 'vm_network' libvirt network.
+ Defaults to "virbr0".
+ Name and default of this option are chosen to be the same
+ as in 'bifrost-ironic-install' role.
+
+opnfv_vm_network_ip: IP for the 'network_interface' bridge.
+ Defaults to '192.168.122.1'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+node_network_netmask: Subnet mask for 'network_interface' bridge.
+ Defaults to '255.255.255.0'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+Dependencies
+------------
+
+None at this time.
+
+Example Playbook
+----------------
+
+- hosts: localhost
+ connection: local
+ become: yes
+ gather_facts: yes
+ roles:
+ - role: create-vm-nodes
+
+License
+-------
+
+Copyright (c) 2018 SUSE Linux GmbH.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author Information
+------------------
+
+mbuil@suse.com
diff --git a/xci/playbooks/roles/create-vm-nodes/defaults/main.yml b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml
new file mode 100644
index 00000000..90ee6e5c
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml
@@ -0,0 +1,27 @@
+---
+# defaults file for bifrost-create-vm-nodes
+baremetal_json_file: '/tmp/baremetal.json'
+
+# We collect these parameters from the pdf
+vm_nic: "virtio"
+vm_groups: {}
+vm_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+vm_disk_cache: "{{ lookup('env', 'VM_DISK_CACHE') | default('unsafe', true) }}"
+node_names: "{{ lookup('env', 'NODE_NAMES').split() }}"
+
+# NOTE(pas-ha) name and default are chosen to be the same
+# as in 'bifrost-ironic-install' role
+network_interface: "virbr0"
+# NOTE(pas-ha) these correspond to settings for the libvirt network created by default
+vm_network: "{{ lookup('env', 'VM_NET_BRIDGE') | default('default', true) }}"
+node_network_netmask: "255.255.255.0"
+
+node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
+node_storage_pool_path: "/var/lib/libvirt/images"
+node_logdir: "/var/log/libvirt/baremetal_logs"
+# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars
+vm_emulator: "/usr/bin/qemu-system-x86_64"
+# NOTE(pas-ha) not really tested with non-local qemu connections
+vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
+
+opnfv_image_path: "/var/lib/libvirt/images"
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml
new file mode 100644
index 00000000..d8169c2f
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml
@@ -0,0 +1,166 @@
+---
+# Create a VM and volume for it, save its MAC address
+- shell: "sudo virsh list --all | grep 'shut off' | wc -l"
+ register: num_vms
+
+- name: "Creating VM"
+ block:
+ # NOTE(pas-ha) item here refers to name of the vm
+ - set_fact:
+ vm_name: "{{ node_names[num_vms.stdout | int] }}"
+
+ - set_fact:
+ vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
+ vm_host_group: "{{ vm_default_groups }}"
+
+ - set_fact:
+ vm_host_group: "{{ vm_default_groups | union(vm_groups[vm_name]) }}"
+ when: vm_groups[vm_name] is defined
+
+ - name: set prealloc arg for Debian
+ set_fact:
+ prealloc: "--prealloc-metadata"
+ when:
+ - ansible_os_family == 'Debian'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ - name: list info on pools
+ virt_pool:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+ - name: list existing vms
+ virt:
+ command: list_vms
+ register: existing_vms
+
+ - block:
+ - name: Check if volume exists
+ stat:
+ path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
+ register: _vm_volume_prepared
+
+ # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
+ # mbuil: Assuming there is only one disk [0]
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+ when:
+ - not _vm_volume_prepared.stat.exists
+ - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes
+
+ - name: set path to the volume created
+ set_fact:
+ vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2"
+
+ - name: pre-touch the vm volume
+ file:
+ state: touch
+ path: "{{ vm_volume_path }}"
+ when: vm_libvirt_uri == 'qemu:///system'
+
+ # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr
+ # fails to set +C on XFS. This could be more elegant, however the use
+ # case is for CI testing.
+ - name: set copy-on-write for volume on non-CentOS systems
+ command: chattr +C {{ vm_volume_path }}
+ ignore_errors: yes
+ when:
+ - ansible_distribution != 'CentOS'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ # Fetches the xml descriptor from the template
+ - name: create_vm
+ virt:
+ command: define
+ name: "{{ vm_name }}"
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+ rescue:
+ - name: "Execute `dmesg` to collect debugging output should VM creation fail."
+ command: dmesg
+ - name: >
+ "Execute `virsh capabilities` to collect debugging output
+ should VM creation fail."
+ command: virsh capabilities
+ - name: "Abort due to failed VM creation"
+ fail: >
+ msg="VM creation step failed, please review dmesg
+ output for additional details"
+ when: vm_name not in existing_vms.list_vms
+
+ # TODO(pas-ha) replace 'command: vbmc ...' tasks
+ # with a custom Ansible module using vbmc Python API
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ register: vbmc_list
+
+ # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
+ - name: delete vm from virtualbmc if it is there
+ command: vbmc delete {{ vm_name }}
+ when: vbmc_list.stdout.find(vm_name) != -1
+
+ - set_fact:
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (num_vms.stdout | int ) }}"
+
+ - name: plug vm into vbmc
+ command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+
+ - name: start virtualbmc
+ command: vbmc start {{ vm_name }}
+
+ - name: get XML of the vm
+ virt:
+ name: "{{ vm_name }}"
+ command: get_xml
+ register: vm_xml
+
+ - name: Fetch the ip
+ set_fact:
+ vm_ip: "{%- for interface in item.interfaces %}{%- if 'native' in (interface.vlan | string) %}{{ interface.address }}{%- endif %}{%- endfor %}"
+
+ # Assumes there is only a single NIC per VM
+ - name: get MAC from vm XML
+ set_fact:
+ vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"<mac address='.*'/>\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}"
+
+ # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively
+ # see vbmc add --help
+ - name: set the json entry for vm
+ set_fact:
+ vm_data:
+ name: "{{ vm_name }}"
+ uuid: "{{ vm_name | to_uuid }}"
+ host_groups: "{{ vm_host_group }}"
+ driver: "{{ vm_node_driver|default('ipmi') }}"
+ driver_info:
+ power:
+ ipmi_address: "192.168.122.1"
+ ipmi_port: "{{ virtual_ipmi_port }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ vm_mac }}"
+ ansible_ssh_host: "{{ vm_ip }}"
+ ipv4_address: "{{ vm_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: add created vm info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}"
+ when: vm_name != 'opnfv'
+
+ - name: Record OPNFV VM ip
+ set_fact:
+ opnfv_vm_ip: "{{ vm_ip }}"
+ when: vm_name == 'opnfv'
+
+ when: (num_nodes | int) > (num_vms.stdout | int)
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml
new file mode 100644
index 00000000..a227bc4f
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml
@@ -0,0 +1,32 @@
+---
+- name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+- name: Extract checksum
+ shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ register: _image_checksum
+- fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+- set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+- name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: "{{ xci_cache }}/deployment_image.qcow2"
+ force: no
+- name: Set correct mode for deployment_image.qcow2 file
+ file:
+ path: "{{ xci_cache }}/deployment_image.qcow2"
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+
+- name: Create copy of original deployment image
+ shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2"
+ become: yes
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/main.yml b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml
new file mode 100644
index 00000000..7e0090e4
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+# baremetal_json_file could be the file coming from pdf/idf
+
+- name: "Load distribution defaults"
+ include_vars: "{{ ansible_os_family | lower }}.yml"
+
+# From the previous list
+- name: "Install required packages"
+ package:
+ name: "{{ required_packages }}"
+
+- include_tasks: prepare_libvirt.yml
+- include_tasks: download_opnfvimage.yml
+
+- name: create placeholder var for vm entries in JSON format
+ set_fact:
+ nodes_json_data: {}
+
+# First we create the opnfv_vm
+- include_tasks: create_vm.yml
+ with_items: "{{ [opnfv_vm] + nodes }}"
+
+- name: Start the opnfv vm
+ virt:
+ command: start
+ name: opnfv
+
+- name: remove previous baremetal data file
+ file:
+ state: absent
+ path: "{{ baremetal_json_file }}"
+
+# We got nodes_json_data from the create_vm playbook
+- name: write to baremetal json file
+ copy:
+ dest: "{{ baremetal_json_file }}"
+ content: "{{ nodes_json_data | to_nice_json }}"
+
+- debug: var=nodes_json_data
+
+- name: >
+ "Set file permissions such that the baremetal data file
+ can be read by the user executing Ansible"
+ file:
+ path: "{{ baremetal_json_file }}"
+ owner: "{{ ansible_env.SUDO_USER }}"
+ when: >
+ ansible_env.SUDO_USER is defined and
+ baremetal_json_file != ""
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml
new file mode 100644
index 00000000..e09e2d6b
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml
@@ -0,0 +1,119 @@
+---
+- name: "Restart libvirt service"
+ service: name="{{libvirt_service_name}}" state=restarted
+
+# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to
+# operate properly with dnsmasq. This should be done before starting dnsmasq.
+- name: "Enable IP forwarding in sysctl"
+ sysctl:
+ name: "net.ipv4.ip_forward"
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading
+# the ROM from /usr/share/misc.
+- name: "Look for sgabios in {{ sgabios_dir }}"
+ stat: path={{ sgabios_dir }}/sgabios.bin
+ register: test_sgabios_qemu
+
+- name: "Look for sgabios in /usr/share/misc"
+ stat: path=/usr/share/misc/sgabios.bin
+ register: test_sgabios_misc
+
+- name: "Place sgabios.bin"
+ command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
+ when: >
+ test_sgabios_qemu == false and
+ test_sgabios_misc == true
+
+# NOTE(TheJulia): In order to prevent conflicts, stop
+# dnsmasq to prevent conflicts with libvirt restarting.
+# TODO(TheJulia): We shouldn't need to do this, but the
+# libvirt dhcp instance conflicts withour specific config
+# and taking this path allows us to not refactor dhcp at
+# this moment. Our DHCP serving should be refactored
+# so we don't need to do this.
+- name: "Stop default dnsmasq service"
+ service:
+ name: dnsmasq
+ state: stopped
+ ignore_errors: true
+
+# NOTE(TheJulia): Seems if you test in a VM, this might
+# be helpful if your installed your host originally
+# with the default 192.168.122/0/24 network
+- name: destroy libvirt network
+ virt_net:
+ name: "{{ vm_network }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is present
+ virt_net:
+ name: "{{ vm_network }}"
+ state: present
+ xml: "{{ lookup('template', 'net.xml.j2') }}"
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: find facts on libvirt networks
+ virt_net:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+# NOTE(pas-ha) yet another place where non-local libvirt will not work
+- name: "Delete network interface if virtual network is not active"
+ command: ip link del {{ ansible_libvirt_networks[vm_network].bridge }}
+ when:
+ - ansible_libvirt_networks[vm_network].state != 'active'
+ - vm_libvirt_uri == 'qemu:///system'
+ ignore_errors: yes
+
+- name: set libvirt network to autostart
+ virt_net:
+ name: "{{ vm_network }}"
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is running
+ virt_net:
+ name: "{{ vm_network }}"
+ state: active
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: get libvirt network status
+ virt_net:
+ name: "{{ vm_network }}"
+ command: status
+ uri: "{{ vm_libvirt_uri }}"
+ register: test_vm_net_status
+
+- name: fail if libvirt network is not active
+ assert:
+ that: test_vm_net_status.status == 'active'
+
+- name: define a libvirt pool if not set
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: present
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'pool_dir.xml.j2') }}"
+
+- name: ensure libvirt pool is running
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: active
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: create dir for bm logs
+ file:
+ state: directory
+ path: "{{ node_logdir }}"
+ recurse: yes
+ mode: "0755"
+
+- name: install virtualbmc
+ pip:
+ name: virtualbmc
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..3c082170
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
@@ -0,0 +1,18 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ nodes[0].remote_management.address.split(':')[0] }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ {%- for interface in opnfv_vm.interfaces %}
+ {%- if 'native' in (interface.vlan | string) %}
+ <host mac="{{ interface.mac_address }}" ip="{{ interface.address }}"/>
+ {%- endif %}
+ {%- endfor %}
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2
new file mode 100644
index 00000000..e4645deb
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2
@@ -0,0 +1,7 @@
+<pool type='dir'>
+ <name>{{ node_storage_pool }}</name>
+ <target>
+ <path>{{ node_storage_pool_path }}</path>
+ </target>
+</pool>
+
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2
new file mode 100644
index 00000000..c44fa6aa
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2
@@ -0,0 +1,76 @@
+<domain type='{{ vm_domain_type }}'>
+ <name>{{ vm_name }}</name>
+ <memory unit='GiB'>{{ item.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.node.cpus }}</vcpu>
+ <os>
+ <type arch='{{ item.node.arch }}' machine='{{ item.node.model }}'>hvm</type>
+ {%- if 'opnfv' in vm_name -%}
+ <boot dev='hd'/>
+ {%- else -%}
+ <boot dev='network'/>
+ {% endif -%}
+ <bootmenu enable='no'/>
+ <bios useserial='yes' rebootTimeout='10000'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='{{ item.node.cpu_cflags }}'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>{{ vm_emulator }}</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='{{ vm_disk_cache }}'/>
+ <source file='{{ vm_volume_path }}'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ {% set native_interfaces = [] %}
+ {%- for interface in item.interfaces %}
+ {%- if 'native' in (interface.vlan | string) %}
+ {%- set _ = native_interfaces.append(interface) %}
+ {%- endif %}
+ {%- endfor %}
+ {%- for interface in native_interfaces -%}
+ <interface type='network'>
+ <source network='{{ vm_network }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ interface.mac_address }}'/>
+ </interface>
+ {% endfor -%}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <serial type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <serial type='pty'>
+ <source path='/dev/pts/49'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <console type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/debian.yml b/xci/playbooks/roles/create-vm-nodes/vars/debian.yml
new file mode 100644
index 00000000..bcfc47d5
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/debian.yml
@@ -0,0 +1,13 @@
+---
+sgabios_dir: /usr/share/qemu/
+libvirt_service_name: libvirt-bin
+required_packages:
+ - libvirt-bin
+ - qemu-utils
+ - qemu-kvm
+ - qemu-system-x86
+ - sgabios
+ - pkg-config
+ - libvirt-dev
+ - python-lxml
+ - python-libvirt
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml b/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml
new file mode 100644
index 00000000..2b285110
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml
@@ -0,0 +1,17 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-img
+ - qemu-kvm-tools
+ - qemu-kvm
+ - qemu-kvm-common
+ - qemu-system-x86
+ - sgabios-bin
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkgconfig
+ - libvirt-devel
+ - libvirt-python
+ - python-lxml
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/suse.yml b/xci/playbooks/roles/create-vm-nodes/vars/suse.yml
new file mode 100644
index 00000000..7e4c41ef
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-tools
+ - qemu-kvm
+ - qemu-x86
+ - qemu-sgabios
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkg-config
+ - libvirt-devel
+ - python-lxml
+ - libvirt-python