summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xxci/config/aio-vars4
-rwxr-xr-xxci/config/ha-vars6
-rwxr-xr-xxci/config/mini-vars6
-rwxr-xr-xxci/config/noha-vars6
-rwxr-xr-xxci/config/pinned-versions2
-rwxr-xr-xxci/files/xci-destroy-env.sh2
-rw-r--r--xci/files/xci-lib.sh7
-rw-r--r--xci/infra/bifrost/infra-provision.sh68
-rw-r--r--xci/infra/bifrost/playbooks/opnfv-virtual.yml (renamed from xci/infra/bifrost/playbooks/opnfv-virtual.yaml)69
-rw-r--r--xci/infra/bifrost/playbooks/xci-create-virtual.yml (renamed from xci/infra/bifrost/playbooks/bootstrap-bifrost.yml)32
-rw-r--r--xci/infra/bifrost/playbooks/xci-prepare-virtual.yml93
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-env.sh33
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-provision.sh176
-rw-r--r--xci/infra/bifrost/vars/debian.yml19
-rw-r--r--xci/infra/bifrost/vars/redhat.yml19
-rw-r--r--xci/infra/bifrost/vars/suse.yml19
-rw-r--r--xci/playbooks/manage-ssh-keys.yml9
-rw-r--r--xci/playbooks/roles/create-vm-nodes/README.md165
-rw-r--r--xci/playbooks/roles/create-vm-nodes/defaults/main.yml27
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml166
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml32
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/main.yml49
-rw-r--r--xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml119
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/net.xml.j218
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j27
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j276
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/debian.yml13
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/redhat.yml17
-rw-r--r--xci/playbooks/roles/create-vm-nodes/vars/suse.yml15
-rw-r--r--xci/var/opnfv_vm.yml67
30 files changed, 1125 insertions, 216 deletions
diff --git a/xci/config/aio-vars b/xci/config/aio-vars
index 1d2e4f96..cff181a9 100755
--- a/xci/config/aio-vars
+++ b/xci/config/aio-vars
@@ -9,8 +9,8 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=1
-export TEST_VM_NODE_NAMES=opnfv
+export NUM_NODES=1
+export NODE_NAMES=opnfv
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-8}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/ha-vars b/xci/config/ha-vars
index 131de2a7..3440a855 100755
--- a/xci/config/ha-vars
+++ b/xci/config/ha-vars
@@ -9,9 +9,9 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=6
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 master2 master3 node1 node2"
+export NUM_NODES=6
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/mini-vars b/xci/config/mini-vars
index 7d2b227b..9e7e6180 100755
--- a/xci/config/mini-vars
+++ b/xci/config/mini-vars
@@ -9,9 +9,9 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=3
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 compute00"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 node1"
+export NUM_NODES=3
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/noha-vars b/xci/config/noha-vars
index 8d30a243..2f3db993 100755
--- a/xci/config/noha-vars
+++ b/xci/config/noha-vars
@@ -9,9 +9,9 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=4
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 compute00 compute01"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 node1 node2"
+export NUM_NODES=4
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index da82c9cd..5ef0c7c2 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -28,7 +28,7 @@ export OPNFV_RELENG_VERSION="master"
# use functest-healthcheck image that is known to work and contains the original list of testcases
export OPNFV_FUNCTEST_HEALTHCHECK_DOCKER_IMAGE_DIGEST="sha256:faa1ec5778ac1580cc46f0e4f5abec24026868b95fc6fc3ae6023275dc980c2d"
# HEAD of bifrost "master" as of 13.02.2018
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"28b6b8c96f89532bbddeca513285e6c00db89205"}
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"81e48e7b488c15516503b2b08f087f4a7ae9a673"}
# HEAD of ironic "master" as of 13.02.2018
export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"9b8440aa318e4883a74ef8640ad5409dd22858a9"}
# HEAD of ironic-client "master" as of 13.02.2018
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index 3de21795..c95ea838 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -40,7 +40,7 @@ fi
# Destroy all XCI VMs on all flavors
for varfile in ${flavors[@]}; do
source ${XCI_PATH}/xci/config/${varfile}-vars
- for vm in ${TEST_VM_NODE_NAMES}; do
+ for vm in ${NODE_NAMES}; do
if which virsh &>/dev/null; then
virsh destroy $vm &>/dev/null || true
virsh undefine $vm &>/dev/null || true
diff --git a/xci/files/xci-lib.sh b/xci/files/xci-lib.sh
index f55701a2..060dc267 100644
--- a/xci/files/xci-lib.sh
+++ b/xci/files/xci-lib.sh
@@ -13,7 +13,6 @@
function bootstrap_xci_env() {
# Declare our virtualenv
export XCI_VENV=${XCI_PATH}/venv/
-
# source user vars
source $XCI_PATH/xci/config/user-vars
# source pinned versions
@@ -48,6 +47,7 @@ function install_ansible() {
net-tools
python-devel
python
+ python-pyyaml
venv
wget
curl
@@ -69,6 +69,7 @@ function install_ansible() {
[pip]=python-pip
[python]=python
[python-devel]=python-devel
+ [python-pyyaml]=python-PyYAML
[venv]=python-virtualenv
[wget]=wget
[curl]=curl
@@ -97,6 +98,7 @@ function install_ansible() {
[pip]=python-pip
[python]=python-minimal
[python-devel]=libpython-dev
+ [python-pyyaml]=python-yaml
[venv]=python-virtualenv
[wget]=wget
[curl]=curl
@@ -120,6 +122,7 @@ function install_ansible() {
[pip]=python2-pip
[python]=python
[python-devel]=python-devel
+ [python-pyyaml]=PyYAML
[venv]=python-virtualenv
[wget]=wget
[curl]=curl
@@ -155,7 +158,7 @@ function install_ansible() {
# We are inside the virtualenv now so we should be good to use pip and python from it.
pip -q install --upgrade pip==9.0.3 # We need a version which supports the '-c' parameter
- pip -q install --upgrade -c $uc -c $osa_uc ara virtualenv pip setuptools ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
+ pip -q install --upgrade -c $uc -c $osa_uc ara virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
ara_location=$(python -c "import os,ara; print(os.path.dirname(ara.__file__))")
export ANSIBLE_CALLBACK_PLUGINS="/etc/ansible/roles/plugins/callback:${ara_location}/plugins/callbacks"
diff --git a/xci/infra/bifrost/infra-provision.sh b/xci/infra/bifrost/infra-provision.sh
index 9c3adfc2..17eb4158 100644
--- a/xci/infra/bifrost/infra-provision.sh
+++ b/xci/infra/bifrost/infra-provision.sh
@@ -1,3 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
#-------------------------------------------------------------------------------
# Start provisioning VM nodes
#-------------------------------------------------------------------------------
@@ -8,14 +16,64 @@
# - destroys VMs, removes ironic db, leases, logs
# - creates and provisions VMs for the chosen flavor
#-------------------------------------------------------------------------------
+
BIFROST_ROOT_DIR="$(dirname $(realpath ${BASH_SOURCE[0]}))"
+export ANSIBLE_ROLES_PATH="$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles:${XCI_CACHE}/repos/bifrost/playbooks/roles"
+export ANSIBLE_LIBRARY="$HOME/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:${XCI_CACHE}/repos/bifrost/playbooks/library"
-echo "Info: Starting provisining VM nodes using openstack/bifrost"
+echo "Info: Create XCI VM resources"
echo "-------------------------------------------------------------------------"
-cd $BIFROST_ROOT_DIR/playbooks/
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -i "localhost," bootstrap-bifrost.yml
-cd ${XCI_CACHE}/repos/bifrost
-bash ./scripts/bifrost-provision.sh
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -e num_nodes=${NUM_NODES} \
+ -e vm_domain_type=${VM_DOMAIN_TYPE} \
+ -e baremetal_json_file=/tmp/baremetal.json \
+ -e xci_distro=${XCI_DISTRO} \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-create-virtual.yml
+
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --private-key=${XCI_PATH}/xci/scripts/vm/id_rsa_for_dib \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-prepare-virtual.yml
+
+source ${XCI_CACHE}/repos/bifrost/scripts/bifrost-env.sh
+
+# This is hardcoded to delegate to localhost but we really need to delegate to opnfv instead.
+sed -i "/delegate_to:/d" ${XCI_CACHE}/repos/bifrost/playbooks/roles/bifrost-deploy-nodes-dynamic/tasks/main.yml
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ -e use_cirros=false \
+ -e testing_user=root \
+ -e test_vm_num_nodes=${NUM_NODES} \
+ -e test_vm_cpu='host-model' \
+ -e inventory_dhcp=false \
+ -e inventory_dhcp_static_ip=false \
+ -e enable_inspector=true \
+ -e inspect_nodes=true \
+ -e download_ipa=true \
+ -e create_ipa_image=false \
+ -e write_interfaces_file=true \
+ -e ipv4_gateway=192.168.122.1 \
+ -e wait_timeout=3600 \
+ -e enable_keystone=false \
+ -e ironicinspector_source_install=true \
+ -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION:-master} \
+ -e ironicinspectorclient_source_install=true \
+ -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master} \
+ -e ironicclient_source_install=true \
+ -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION:-master} \
+ -e ironic_git_branch=${BIFROST_IRONIC_VERSION:-master} \
+ -e use_prebuilt_images=${BIFROST_USE_PREBUILT_IMAGES:-false} \
+ -e xci_distro=${XCI_DISTRO} \
+ -e ironic_url="http://192.168.122.2:6385/" \
+ ${BIFROST_ROOT_DIR}/playbooks/opnfv-virtual.yml
+
echo "-----------------------------------------------------------------------"
echo "Info: VM nodes are provisioned!"
echo "-----------------------------------------------------------------------"
diff --git a/xci/infra/bifrost/playbooks/opnfv-virtual.yaml b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
index bb0daff6..68d76cfc 100644
--- a/xci/infra/bifrost/playbooks/opnfv-virtual.yaml
+++ b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
@@ -7,11 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- hosts: localhost
- connection: local
+- hosts: opnfv
name: "Host and Ironic bootstrapping"
become: yes
gather_facts: yes
+ vars_files:
+ - "../vars/{{ ansible_os_family | lower }}.yml"
pre_tasks:
- name: Remove pre-existing leases file
file: path=/var/lib/misc/dnsmasq.leases state=absent
@@ -52,6 +53,19 @@
owner: 'root'
group: 'root'
when: use_prebuilt_images | bool == true
+ - name: Ensure /etc/hosts has good defaults
+ lineinfile:
+ create: yes
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
+ - name: Install required packages
+ package:
+ name: "{{ bifrost_required_devel_packages }}"
+ state: present
roles:
- role: bifrost-prep-for-install
@@ -59,7 +73,9 @@
- role: bifrost-keystone-install
- role: bifrost-ironic-install
cleaning: false
- testing: true
+ testing: false
+ enabled_hardware_types: ipmi
+ network_interface: "{{ ansible_default_ipv4.interface }}"
# NOTE(TheJulia): While the next step creates a ramdisk, some elements
# do not support ramdisk-image-create as they invoke steps to cleanup
# the ramdisk which causes ramdisk-image-create to believe it failed.
@@ -89,7 +105,6 @@
- transform_boot_image | bool == false
- use_prebuilt_images | bool == false
- role: bifrost-keystone-client-config
- user: "{{ ansible_env.SUDO_USER }}"
clouds:
bifrost:
config_username: "{{ ironic.keystone.default_username }}"
@@ -107,18 +122,36 @@
vars:
multinode_testing: "{{ inventory_dhcp | bool == true }}"
become: no
- connection: local
- gather_facts: yes
- pre_tasks:
+ gather_facts: False
+ tasks:
+ - name: Gathering facts
+ setup:
+ delegate_to: opnfv
+ delegate_facts: False
- name: "Override default bifrost DNS if we are behind a proxy"
set_fact:
ipv4_nameserver: "192.168.122.1"
when: lookup('env','http_proxy') != ''
- roles:
- - role: ironic-enroll-dynamic
- - { role: ironic-inspect-node, when: inspect_nodes | default('false') | bool == true }
- - role: bifrost-configdrives-dynamic
- - role: bifrost-deploy-nodes-dynamic
+ - name: Find network interface in the OPNFV node
+ set_fact:
+ network_interface: "{{ ansible_default_ipv4.interface }}"
+ - import_role:
+ name: ironic-enroll-dynamic
+ private: True
+ delegate_to: opnfv
+ - import_role:
+ name: ironic-inspect-node
+ private: True
+ delegate_to: opnfv
+ when: inspect_nodes | default('false') | bool == true
+ - import_role:
+ name: bifrost-configdrives-dynamic
+ private: True
+ delegate_to: opnfv
+ - import_role:
+ name: bifrost-deploy-nodes-dynamic
+ private: True
+ delegate_to: opnfv
environment:
http_proxy: "{{ lookup('env','http_proxy') }}"
https_proxy: "{{ lookup('env','https_proxy') }}"
@@ -127,7 +160,13 @@
- hosts: baremetal
name: "Deploy machines."
become: no
- connection: local
serial: 1
- roles:
- - role: bifrost-prepare-for-test-dynamic
+ gather_facts: False
+ tasks:
+ #- name: Gathering facts
+ #setup:
+ #delegate_to: opnfv
+ #delegate_facts: False
+ - import_role:
+ name: bifrost-prepare-for-test-dynamic
+ delegate_to: opnfv
diff --git a/xci/infra/bifrost/playbooks/bootstrap-bifrost.yml b/xci/infra/bifrost/playbooks/xci-create-virtual.yml
index 2153b3b3..043907fe 100644
--- a/xci/infra/bifrost/playbooks/bootstrap-bifrost.yml
+++ b/xci/infra/bifrost/playbooks/xci-create-virtual.yml
@@ -1,29 +1,42 @@
---
# SPDX-license-identifier: Apache-2.0
##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright (c) 2018 SUSE LINUX GmbH.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: localhost
- connection: local
- gather_facts: true
+
+- hosts: deployment_host
+ name: "Bootstrap XCI hardware resources and prepare provisioning environment"
+ gather_facts: yes
vars_files:
+ - "{{ xci_path }}/xci/var/pdf.yml"
+ - "{{ xci_path }}/xci/var/opnfv_vm.yml"
- "{{ xci_path }}/xci/var/opnfv.yml"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
+ - role: create-vm-nodes
+ become: yes
- role: clone-repository
project: "opnfv/bifrost"
repo: "{{ openstack_bifrost_git_url }}"
dest: "{{ xci_cache }}/repos/bifrost"
version: "{{ openstack_bifrost_version }}"
-
tasks:
+ - name: Wait for host to come back to life
+ local_action:
+ module: wait_for
+ host: "{{ opnfv_vm_ip }}"
+ delay: 15
+ state: started
+ port: 22
+ connect_timeout: 10
+ timeout: 180
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
@@ -40,3 +53,12 @@
copy:
src: "{{ xci_path}}/xci/infra/bifrost/"
dest: "{{ xci_cache }}/repos/bifrost"
+ - name: "Ensure /etc/hosts has good defaults"
+ lineinfile:
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ become: yes
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
diff --git a/xci/infra/bifrost/playbooks/xci-prepare-virtual.yml b/xci/infra/bifrost/playbooks/xci-prepare-virtual.yml
new file mode 100644
index 00000000..b4ad8c0c
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/xci-prepare-virtual.yml
@@ -0,0 +1,93 @@
+- name: Prepare deployment host
+ hosts: deployment_host
+ gather_facts: True
+ tasks:
+ - name: Ensure common private key has correct permissions
+ file:
+ path: "{{ xci_path }}/xci/scripts/vm/id_rsa_for_dib"
+ mode: "0600"
+
+ - name: Remove host from known_hosts file if necessary
+ shell:
+ ssh-keygen -R {{ hostvars['opnfv'].ip }}
+ failed_when: false
+
+- name: Prepare the OPNFV host
+ hosts: opnfv
+ gather_facts: True
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Copy bifrost inventory file
+ copy:
+ src: /tmp/baremetal.json
+ dest: /tmp/baremetal.json
+
+ - name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+ - name: Determine local user
+ become: no
+ local_action: command whoami
+ changed_when: False
+ register: _ansible_user
+
+ - name: Fetch local SSH key
+ delegate_to: localhost
+ become: no
+ slurp:
+ src: "/home/{{ _ansible_user.stdout }}/.ssh/id_rsa.pub"
+ register: _local_ssh_key
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (devuser)"
+ authorized_key:
+ exclusive: no
+ user: devuser
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (root)"
+ authorized_key:
+ exclusive: no
+ user: root
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+ become: yes
+
+ - name: Ensure /httpboot directory exists
+ file:
+ path: /httpboot
+ state: directory
+ become: yes
+
+ - name: Copy original qcow2 image to OPNFV VM
+ synchronize:
+ src: "{{ xci_cache }}/{{ item }}"
+ dest: /httpboot/
+ recursive: yes
+ delete: yes
+ with_items:
+ - "deployment_image.qcow2"
+ - "deployment_image.qcow2.sha256.txt"
+ become: yes
+
+ - name: Configure DNS on openSUSE
+ block:
+ - stat:
+ path: /etc/resolv.conf.netconfig
+ register: _resolv_conf_netconfig
+ - shell: |
+ mv /etc/resolv.conf.netconfig /etc/resolv.conf
+ become: yes
+ when: _resolv_conf_netconfig.stat.exists
+ when: ansible_pkg_mgr == 'zypper'
diff --git a/xci/infra/bifrost/scripts/bifrost-env.sh b/xci/infra/bifrost/scripts/bifrost-env.sh
new file mode 100755
index 00000000..72d1dafe
--- /dev/null
+++ b/xci/infra/bifrost/scripts/bifrost-env.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# dib configuration
+case ${XCI_DISTRO,,} in
+ # These should ideally match the CI jobs
+ ubuntu)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
+ ;;
+ centos)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+ opensuse)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+esac
+
+export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
+
+pip install -q --upgrade -r "${XCI_CACHE}/repos/bifrost/requirements.txt"
diff --git a/xci/infra/bifrost/scripts/bifrost-provision.sh b/xci/infra/bifrost/scripts/bifrost-provision.sh
deleted file mode 100755
index 940e9439..00000000
--- a/xci/infra/bifrost/scripts/bifrost-provision.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -eu
-set -o pipefail
-
-# This is normally passed from the XCI deployment script but
-# we also need it here for the bifrost jobs which run outside of XCI
-export XCI_PATH="${XCI_PATH:-$(git rev-parse --show-toplevel)}"
-# Declare our virtualenv
-export XCI_VENV="${XCI_VENV:-${XCI_PATH}/venv/}"
-export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
-
-export PYTHONUNBUFFERED=1
-SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
-BIFROST_HOME=$SCRIPT_HOME/..
-ENABLE_VENV="true"
-export VENV=${XCI_VENV}
-PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-# This is normally exported by XCI env but we should initialize it here
-# in case we run this script on its own for debug purposes
-XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-}
-# Ironic SHAs
-BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-master}
-BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master}
-BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-master}
-BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-master}
-
-# set UPPER_CONSTRAINTS_FILE since it is needed in order to limit libvirt-python to 4.0.0
-export UPPER_CONSTRAINTS_FILE=https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt
-
-# Ensure the right inventory files is used based on branch
-CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
- INVENTORY_FILE_FORMAT="baremetal_json_file"
-else
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'}
- INVENTORY_FILE_FORMAT="baremetal_csv_file"
-fi
-export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
-
-# Default settings for VMs
-export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
-export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"}
-export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
-export VM_CPU=${VM_CPU:-4}
-export VM_DISK=${VM_DISK:-100}
-export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
-export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
-
-# Settings for bifrost
-TEST_PLAYBOOK="opnfv-virtual.yaml"
-USE_INSPECTOR=true
-USE_CIRROS=false
-TESTING_USER=root
-DOWNLOAD_IPA=true
-CREATE_IPA_IMAGE=false
-INSPECT_NODES=true
-INVENTORY_DHCP=false
-INVENTORY_DHCP_STATIC_IP=false
-WRITE_INTERFACES_FILE=true
-
-# Settings for console access
-export DIB_DEV_USER_PWDLESS_SUDO=yes
-export DIB_DEV_USER_PASSWORD=devuser
-
-# Additional dib elements
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
-
-# dib configuration
-case ${XCI_DISTRO,,} in
- # These should ideally match the CI jobs
- ubuntu)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
- ;;
- centos)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
- opensuse)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
-esac
-
-# Copy the OS images if found
-if [[ -e ${XCI_PATH}/deployment_image.qcow2 ]]; then
- sudo mkdir -p /httpboot
- sudo mv ${XCI_PATH}/deployment_image.qcow2* /httpboot/
-fi
-
-# Install missing dependencies. Use sudo since for bifrost jobs
-# the venv is not ready yet.
-if [[ -n ${VIRTUAL_ENV:-} ]]; then
- _sudo=""
-else
- virtualenv --quiet --no-site-packages ${XCI_VENV}
- set +u
- source ${XCI_VENV}/bin/activate
- set -u
- _sudo="sudo -H -E"
-fi
-${_sudo} pip install -q --upgrade -r "$(dirname $0)/../requirements.txt"
-
-# Change working directory
-cd $BIFROST_HOME/playbooks
-
-# NOTE(hwoarang): Disable selinux as we are hitting issues with it from time to
-# time. Remove this when Centos7 is a proper gate on bifrost so we know that
-# selinux works as expected.
-if [[ -e /etc/centos-release ]]; then
- echo "*************************************"
- echo "WARNING: Disabling selinux on CentOS7"
- echo "*************************************"
- sudo setenforce 0
-fi
-
-# Create the VMS
-ansible-playbook ${XCI_ANSIBLE_PARAMS} \
- -i inventory/localhost \
- test-bifrost-create-vm.yaml \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-model' \
- -e test_vm_memory_size=${VM_MEMORY_SIZE} \
- -e enable_venv=${ENABLE_VENV} \
- -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
- -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE}
-
-# Execute the installation and VM startup test
-ansible-playbook ${XCI_ANSIBLE_PARAMS} \
- -i inventory/bifrost_inventory.py \
- ${TEST_PLAYBOOK} \
- -e use_cirros=${USE_CIRROS} \
- -e testing_user=${TESTING_USER} \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-model' \
- -e inventory_dhcp=${INVENTORY_DHCP} \
- -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
- -e enable_venv=${ENABLE_VENV} \
- -e enable_inspector=${USE_INSPECTOR} \
- -e inspect_nodes=${INSPECT_NODES} \
- -e download_ipa=${DOWNLOAD_IPA} \
- -e create_ipa_image=${CREATE_IPA_IMAGE} \
- -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
- -e ipv4_gateway=192.168.122.1 \
- -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
- -e enable_keystone=false \
- -e ironicinspector_source_install=true \
- -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION} \
- -e ironicinspectorclient_source_install=true \
- -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION} \
- -e ironicclient_source_install=true \
- -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION} \
- -e ironic_git_branch=${BIFROST_IRONIC_VERSION} \
- -e use_prebuilt_images=${BIFROST_USE_PREBUILT_IMAGES} \
- -e xci_distro=${XCI_DISTRO}
-EXITCODE=$?
-
-if [ $EXITCODE != 0 ]; then
- echo "************************************"
- echo "Provisioning failed. See logs folder"
- echo "************************************"
-fi
-
-exit $EXITCODE
diff --git a/xci/infra/bifrost/vars/debian.yml b/xci/infra/bifrost/vars/debian.yml
new file mode 100644
index 00000000..95303b38
--- /dev/null
+++ b/xci/infra/bifrost/vars/debian.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-dev
+ - libssl-dev
+ - lsb-release
+ - make
+ - net-tools
+ - libpython-dev
+ - wget
+ - iptables
diff --git a/xci/infra/bifrost/vars/redhat.yml b/xci/infra/bifrost/vars/redhat.yml
new file mode 100644
index 00000000..056c4d61
--- /dev/null
+++ b/xci/infra/bifrost/vars/redhat.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - openssl-devel
+ - redhat-lsb
+ - make
+ - net-tools
+ - python-devel
+ - wget
+ - iptables
diff --git a/xci/infra/bifrost/vars/suse.yml b/xci/infra/bifrost/vars/suse.yml
new file mode 100644
index 00000000..8e2e9041
--- /dev/null
+++ b/xci/infra/bifrost/vars/suse.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - libopenssl-devel
+ - make
+ - net-tools
+ - python-devel
+ - python-xml
+ - wget
+ - iptables
diff --git a/xci/playbooks/manage-ssh-keys.yml b/xci/playbooks/manage-ssh-keys.yml
index ff797aad..999215d8 100644
--- a/xci/playbooks/manage-ssh-keys.yml
+++ b/xci/playbooks/manage-ssh-keys.yml
@@ -6,6 +6,15 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+- name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
- name: Configure SSH key for root user
user:
name: root
diff --git a/xci/playbooks/roles/create-vm-nodes/README.md b/xci/playbooks/roles/create-vm-nodes/README.md
new file mode 100644
index 00000000..d96a2981
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/README.md
@@ -0,0 +1,165 @@
+create-vm-nodes
+================
+
+This role creates the XCI VMs used to deploy scenarios. It is a branch from the
+bifrost role "bifrost-create-vm-nodes":
+
+https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
+
+It creates the VMs based on the pdf and idf document which describes the
+characteristics of the VMs or physical servers. For more information check the
+spec:
+
+https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
+
+
+Flow
+----
+
+The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
+playbook that starts executing the role:
+
+xci-create-vms.yaml
+
+Note that at this stage the pdf and the opnfv_vm.yml are loaded.
+
+Some distro specific tasks related to variables are done and then the
+prepare_libvirt playbook is run. This playbook, as the name says,
+gets everything ready to run libvirt.
+
+After that, the nodes_json_data dictionary is initialized. This will collect
+the data and finally dump it all into the baremetal_json_file which will be
+read by bifrost in the subsequent role.
+
+The opnfv vm and the rest of vms get created using the xml libvirt template,
+which gets filled with the pdf and opnfv_vm.yml variables.
+
+Finally nodes_json_data is dumped.
+
+Requirements
+------------
+
+The following packages are required and ensured to be present:
+- libvirt-bin
+- qemu-utils
+- qemu-kvm
+- sgabios
+
+
+Warning
+-------
+
+- It is currently assumed that the OS for the VM will be installed in the first
+disk of the node described by the pdf. That's why there is a [0] in:
+
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+
+- It is assumed that the opnfv VM characteristics are not described in the pdf
+but in a similar document called opnfv_vm.yml
+
+- All references to csv from bifrost-create-vm-nodes were removed
+
+Role Variables
+--------------
+
+baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the
+ required information for bifrost to configure the
+ VMs appropriately
+
+vm_disk_cache: Disk cache mode to use by VMs disk.
+ Defaults to shell variable 'VM_DISK_CACHE', or,
+ if that is not set, to 'writeback'.
+
+node_names: Space-separated names for nodes to be created.
+ Defaults to shell variable 'NODE_NAMES'.
+ If not set, VM names will be autogenerated.
+ Note that independent on the number of names in this list,
+ at most 'test_vm_num_nodes' VMs will be created.
+
+vm_network: Name of the libvirt network to create the nodes on.
+ Defaults to shell variable 'VM_NET_BRIDGE', or,
+ if that is not set, to 'default'.
+
+node_storage_pool: Name of the libvirt storage pool to create disks
+ for VMs in.
+ Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or,
+ if that is not set, to 'default'.
+ If absent, this pool will be created.
+
+node_storage_pool_path: Path used by the libvirt storage pool
+ 'node_storage_pool' if it has to be created.
+ Defaults to "/var/lib/libvirt/images".
+
+node_logdir: Folder where to store VM logs.
+ Defaults to "/var/log/libvirt/baremetal_logs".
+
+vm_emulator: Path to emulator executable used to define VMs in libvirt.
+ Defaults to "/usr/bin/qemu-system-x86_64".
+ Generally users should not need to modify this setting,
+ as it is OS-specific and is overwritten by
+ os/distribution-specific defaults in this role when needed.
+
+vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM
+ related actions.
+ Defaults to shell variable 'LIBVIRT_CONNECT_URI', or,
+ if that is not set, to 'qemu:///system'.
+ Note that currently connecting to remote libvirt is
+ not tested and is unsupported.
+
+network_interface: Name of the bridge to create when creating
+ 'vm_network' libvirt network.
+ Defaults to "virbr0".
+ Name and default of this option are chosen to be the same
+ as in 'bifrost-ironic-install' role.
+
+opnfv_vm_network_ip: IP for the 'network_interface' bridge.
+ Defaults to '192.168.122.1'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+node_network_netmask: Subnet mask for 'network_interface' bridge.
+ Defaults to '255.255.255.0'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+Dependencies
+------------
+
+None at this time.
+
+Example Playbook
+----------------
+
+- hosts: localhost
+ connection: local
+ become: yes
+ gather_facts: yes
+ roles:
+ - role: create-vm-nodes
+
+License
+-------
+
+Copyright (c) 2018 SUSE Linux GmbH.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author Information
+------------------
+
+mbuil@suse.com
diff --git a/xci/playbooks/roles/create-vm-nodes/defaults/main.yml b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml
new file mode 100644
index 00000000..6ac266a5
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/defaults/main.yml
@@ -0,0 +1,27 @@
+---
+# defaults file for bifrost-create-vm-nodes
+baremetal_json_file: '/tmp/baremetal.json'
+
+# We collect these parameters from the pdf
+vm_nic: "virtio"
+vm_groups: {}
+vm_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+vm_disk_cache: unsafe
+node_names: "{{ lookup('env', 'NODE_NAMES').split() }}"
+
+# NOTE(pas-ha) name and default are chosen to be the same
+# as in 'bifrost-ironic-install' role
+network_interface: "virbr0"
+# NOTE(pas-ha) these correspond to settings for the libvirt network created by default
+vm_network: "{{ lookup('env', 'VM_NET_BRIDGE') | default('default', true) }}"
+node_network_netmask: "255.255.255.0"
+
+node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
+node_storage_pool_path: "/var/lib/libvirt/images"
+node_logdir: "/var/log/libvirt/baremetal_logs"
+# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars
+vm_emulator: "/usr/bin/qemu-system-x86_64"
+# NOTE(pas-ha) not really tested with non-local qemu connections
+vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
+
+opnfv_image_path: "/var/lib/libvirt/images"
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml
new file mode 100644
index 00000000..d8169c2f
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml
@@ -0,0 +1,166 @@
+---
+# Create a VM and volume for it, save its MAC address
+- shell: "sudo virsh list --all | grep 'shut off' | wc -l"
+ register: num_vms
+
+- name: "Creating VM"
+ block:
+ # NOTE(pas-ha) item here refers to name of the vm
+ - set_fact:
+ vm_name: "{{ node_names[num_vms.stdout | int] }}"
+
+ - set_fact:
+ vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
+ vm_host_group: "{{ vm_default_groups }}"
+
+ - set_fact:
+ vm_host_group: "{{ vm_default_groups | union(vm_groups[vm_name]) }}"
+ when: vm_groups[vm_name] is defined
+
+ - name: set prealloc arg for Debian
+ set_fact:
+ prealloc: "--prealloc-metadata"
+ when:
+ - ansible_os_family == 'Debian'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ - name: list info on pools
+ virt_pool:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+ - name: list existing vms
+ virt:
+ command: list_vms
+ register: existing_vms
+
+ - block:
+ - name: Check if volume exists
+ stat:
+ path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
+ register: _vm_volume_prepared
+
+ # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
+ # mbuil: Assuming there is only one disk [0]
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+ when:
+ - not _vm_volume_prepared.stat.exists
+ - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes
+
+ - name: set path to the volume created
+ set_fact:
+ vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2"
+
+ - name: pre-touch the vm volume
+ file:
+ state: touch
+ path: "{{ vm_volume_path }}"
+ when: vm_libvirt_uri == 'qemu:///system'
+
+ # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr
+ # fails to set +C on XFS. This could be more elegant, however the use
+ # case is for CI testing.
+ - name: set copy-on-write for volume on non-CentOS systems
+ command: chattr +C {{ vm_volume_path }}
+ ignore_errors: yes
+ when:
+ - ansible_distribution != 'CentOS'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ # Fetches the xml descriptor from the template
+ - name: create_vm
+ virt:
+ command: define
+ name: "{{ vm_name }}"
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+ rescue:
+ - name: "Execute `dmesg` to collect debugging output should VM creation fail."
+ command: dmesg
+ - name: >
+ "Execute `virsh capabilities` to collect debugging output
+ should VM creation fail."
+ command: virsh capabilities
+ - name: "Abort due to failed VM creation"
+ fail: >
+ msg="VM creation step failed, please review dmesg
+ output for additional details"
+ when: vm_name not in existing_vms.list_vms
+
+ # TODO(pas-ha) replace 'command: vbmc ...' tasks
+ # with a custom Ansible module using vbmc Python API
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ register: vbmc_list
+
+ # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
+ - name: delete vm from virtualbmc if it is there
+ command: vbmc delete {{ vm_name }}
+ when: vbmc_list.stdout.find(vm_name) != -1
+
+ - set_fact:
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (num_vms.stdout | int ) }}"
+
+ - name: plug vm into vbmc
+ command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+
+ - name: start virtualbmc
+ command: vbmc start {{ vm_name }}
+
+ - name: get XML of the vm
+ virt:
+ name: "{{ vm_name }}"
+ command: get_xml
+ register: vm_xml
+
+ - name: Fetch the ip
+ set_fact:
+ vm_ip: "{%- for interface in item.interfaces %}{%- if 'native' in (interface.vlan | string) %}{{ interface.address }}{%- endif %}{%- endfor %}"
+
+ # Assumes there is only a single NIC per VM
+ - name: get MAC from vm XML
+ set_fact:
+ vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"<mac address='.*'/>\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}"
+
+ # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively
+ # see vbmc add --help
+ - name: set the json entry for vm
+ set_fact:
+ vm_data:
+ name: "{{ vm_name }}"
+ uuid: "{{ vm_name | to_uuid }}"
+ host_groups: "{{ vm_host_group }}"
+ driver: "{{ vm_node_driver|default('ipmi') }}"
+ driver_info:
+ power:
+ ipmi_address: "192.168.122.1"
+ ipmi_port: "{{ virtual_ipmi_port }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ vm_mac }}"
+ ansible_ssh_host: "{{ vm_ip }}"
+ ipv4_address: "{{ vm_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: add created vm info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}"
+ when: vm_name != 'opnfv'
+
+ - name: Record OPNFV VM ip
+ set_fact:
+ opnfv_vm_ip: "{{ vm_ip }}"
+ when: vm_name == 'opnfv'
+
+ when: (num_nodes | int) > (num_vms.stdout | int)
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml
new file mode 100644
index 00000000..a227bc4f
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml
@@ -0,0 +1,32 @@
+---
+- name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+- name: Extract checksum
+ shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ register: _image_checksum
+- fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+- set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+- name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: "{{ xci_cache }}/deployment_image.qcow2"
+ force: no
+- name: Set correct mode for deployment_image.qcow2 file
+ file:
+ path: "{{ xci_cache }}/deployment_image.qcow2"
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+
+- name: Create copy of original deployment image
+ shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2"
+ become: yes
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/main.yml b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml
new file mode 100644
index 00000000..7e0090e4
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+# baremetal_json_file could be the file coming from pdf/idf
+
+- name: "Load distribution defaults"
+ include_vars: "{{ ansible_os_family | lower }}.yml"
+
+# From the previous list
+- name: "Install required packages"
+ package:
+ name: "{{ required_packages }}"
+
+- include_tasks: prepare_libvirt.yml
+- include_tasks: download_opnfvimage.yml
+
+- name: create placeholder var for vm entries in JSON format
+ set_fact:
+ nodes_json_data: {}
+
+# First we create the opnfv_vm
+- include_tasks: create_vm.yml
+ with_items: "{{ [opnfv_vm] + nodes }}"
+
+- name: Start the opnfv vm
+ virt:
+ command: start
+ name: opnfv
+
+- name: remove previous baremetal data file
+ file:
+ state: absent
+ path: "{{ baremetal_json_file }}"
+
+# We got nodes_json_data from the create_vm playbook
+- name: write to baremetal json file
+ copy:
+ dest: "{{ baremetal_json_file }}"
+ content: "{{ nodes_json_data | to_nice_json }}"
+
+- debug: var=nodes_json_data
+
+- name: >
+ "Set file permissions such that the baremetal data file
+ can be read by the user executing Ansible"
+ file:
+ path: "{{ baremetal_json_file }}"
+ owner: "{{ ansible_env.SUDO_USER }}"
+ when: >
+ ansible_env.SUDO_USER is defined and
+ baremetal_json_file != ""
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml
new file mode 100644
index 00000000..e09e2d6b
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml
@@ -0,0 +1,119 @@
+---
+- name: "Restart libvirt service"
+ service: name="{{libvirt_service_name}}" state=restarted
+
+# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to
+# operate properly with dnsmasq. This should be done before starting dnsmasq.
+- name: "Enable IP forwarding in sysctl"
+ sysctl:
+ name: "net.ipv4.ip_forward"
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading
+# the ROM from /usr/share/misc.
+- name: "Look for sgabios in {{ sgabios_dir }}"
+ stat: path={{ sgabios_dir }}/sgabios.bin
+ register: test_sgabios_qemu
+
+- name: "Look for sgabios in /usr/share/misc"
+ stat: path=/usr/share/misc/sgabios.bin
+ register: test_sgabios_misc
+
+- name: "Place sgabios.bin"
+ command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
+ when: >
+ test_sgabios_qemu == false and
+ test_sgabios_misc == true
+
+# NOTE(TheJulia): In order to prevent conflicts, stop
+# dnsmasq to prevent conflicts with libvirt restarting.
+# TODO(TheJulia): We shouldn't need to do this, but the
+# libvirt dhcp instance conflicts withour specific config
+# and taking this path allows us to not refactor dhcp at
+# this moment. Our DHCP serving should be refactored
+# so we don't need to do this.
+- name: "Stop default dnsmasq service"
+ service:
+ name: dnsmasq
+ state: stopped
+ ignore_errors: true
+
+# NOTE(TheJulia): Seems if you test in a VM, this might
+# be helpful if your installed your host originally
+# with the default 192.168.122/0/24 network
+- name: destroy libvirt network
+ virt_net:
+ name: "{{ vm_network }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is present
+ virt_net:
+ name: "{{ vm_network }}"
+ state: present
+ xml: "{{ lookup('template', 'net.xml.j2') }}"
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: find facts on libvirt networks
+ virt_net:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+# NOTE(pas-ha) yet another place where non-local libvirt will not work
+- name: "Delete network interface if virtual network is not active"
+ command: ip link del {{ ansible_libvirt_networks[vm_network].bridge }}
+ when:
+ - ansible_libvirt_networks[vm_network].state != 'active'
+ - vm_libvirt_uri == 'qemu:///system'
+ ignore_errors: yes
+
+- name: set libvirt network to autostart
+ virt_net:
+ name: "{{ vm_network }}"
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is running
+ virt_net:
+ name: "{{ vm_network }}"
+ state: active
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: get libvirt network status
+ virt_net:
+ name: "{{ vm_network }}"
+ command: status
+ uri: "{{ vm_libvirt_uri }}"
+ register: test_vm_net_status
+
+- name: fail if libvirt network is not active
+ assert:
+ that: test_vm_net_status.status == 'active'
+
+- name: define a libvirt pool if not set
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: present
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'pool_dir.xml.j2') }}"
+
+- name: ensure libvirt pool is running
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: active
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: create dir for bm logs
+ file:
+ state: directory
+ path: "{{ node_logdir }}"
+ recurse: yes
+ mode: "0755"
+
+- name: install virtualbmc
+ pip:
+ name: virtualbmc
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..3c082170
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
@@ -0,0 +1,18 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ nodes[0].remote_management.address.split(':')[0] }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ {%- for interface in opnfv_vm.interfaces %}
+ {%- if 'native' in (interface.vlan | string) %}
+ <host mac="{{ interface.mac_address }}" ip="{{ interface.address }}"/>
+ {%- endif %}
+ {%- endfor %}
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2
new file mode 100644
index 00000000..e4645deb
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2
@@ -0,0 +1,7 @@
+<pool type='dir'>
+ <name>{{ node_storage_pool }}</name>
+ <target>
+ <path>{{ node_storage_pool_path }}</path>
+ </target>
+</pool>
+
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2
new file mode 100644
index 00000000..c44fa6aa
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2
@@ -0,0 +1,76 @@
+<domain type='{{ vm_domain_type }}'>
+ <name>{{ vm_name }}</name>
+ <memory unit='GiB'>{{ item.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.node.cpus }}</vcpu>
+ <os>
+ <type arch='{{ item.node.arch }}' machine='{{ item.node.model }}'>hvm</type>
+ {%- if 'opnfv' in vm_name -%}
+ <boot dev='hd'/>
+ {%- else -%}
+ <boot dev='network'/>
+ {% endif -%}
+ <bootmenu enable='no'/>
+ <bios useserial='yes' rebootTimeout='10000'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='{{ item.node.cpu_cflags }}'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>{{ vm_emulator }}</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='{{ vm_disk_cache }}'/>
+ <source file='{{ vm_volume_path }}'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ {% set native_interfaces = [] %}
+ {%- for interface in item.interfaces %}
+ {%- if 'native' in (interface.vlan | string) %}
+ {%- set _ = native_interfaces.append(interface) %}
+ {%- endif %}
+ {%- endfor %}
+ {%- for interface in native_interfaces -%}
+ <interface type='network'>
+ <source network='{{ vm_network }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ interface.mac_address }}'/>
+ </interface>
+ {% endfor -%}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <serial type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <serial type='pty'>
+ <source path='/dev/pts/49'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <console type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/debian.yml b/xci/playbooks/roles/create-vm-nodes/vars/debian.yml
new file mode 100644
index 00000000..bcfc47d5
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/debian.yml
@@ -0,0 +1,13 @@
+---
+sgabios_dir: /usr/share/qemu/
+libvirt_service_name: libvirt-bin
+required_packages:
+ - libvirt-bin
+ - qemu-utils
+ - qemu-kvm
+ - qemu-system-x86
+ - sgabios
+ - pkg-config
+ - libvirt-dev
+ - python-lxml
+ - python-libvirt
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml b/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml
new file mode 100644
index 00000000..2b285110
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml
@@ -0,0 +1,17 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-img
+ - qemu-kvm-tools
+ - qemu-kvm
+ - qemu-kvm-common
+ - qemu-system-x86
+ - sgabios-bin
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkgconfig
+ - libvirt-devel
+ - libvirt-python
+ - python-lxml
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/suse.yml b/xci/playbooks/roles/create-vm-nodes/vars/suse.yml
new file mode 100644
index 00000000..7e4c41ef
--- /dev/null
+++ b/xci/playbooks/roles/create-vm-nodes/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-tools
+ - qemu-kvm
+ - qemu-x86
+ - qemu-sgabios
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkg-config
+ - libvirt-devel
+ - python-lxml
+ - libvirt-python
diff --git a/xci/var/opnfv_vm.yml b/xci/var/opnfv_vm.yml
new file mode 100644
index 00000000..17f5038c
--- /dev/null
+++ b/xci/var/opnfv_vm.yml
@@ -0,0 +1,67 @@
+---
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+opnfv_vm:
+ name: opnfv
+ node: &nodeparams
+ type: virtual
+ vendor: libvirt
+ model: pc
+ arch: x86_64
+ cpus: 6
+ cpu_cflags: host-model
+ cores: 6
+ memory: 12G
+ disks: &disks
+ - name: disk1
+ disk_capacity: 80G
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation:
+ remote_params: &remote_params
+ type:
+ - ipmi: [2.0]
+ user: admin
+ pass: password
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:625
+ mac_address: "52:54:00:fe:3b:01"
+ interface_common_nic1: &interface_common_nic1
+ name: nic1
+ speed:
+ features:
+ vlan: 10
+ interface_common_nic2: &interface_common_nic2
+ name: nic2
+ speed:
+ features:
+ vlan: 20
+ interface_common_nic3: &interface_common_nic3
+ name: nic3
+ speed:
+ features:
+ vlan: native
+ interface_common_nic4: &interface_common_nic4
+ name: nic4
+ speed:
+ features:
+ vlan: 30
+ interfaces:
+ - mac_address: "52:54:00:33:82:d0"
+ address: 172.29.236.10
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:33:82:d0"
+ address: 172.29.244.10
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:33:82:d0"
+ address: 192.168.122.2
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:33:82:d0"
+ address: 172.29.240.10
+ <<: *interface_common_nic4