summaryrefslogtreecommitdiffstats
path: root/xci/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'xci/playbooks')
-rw-r--r--xci/playbooks/configure-localhost.yml116
-rwxr-xr-xxci/playbooks/dynamic_inventory.py240
-rw-r--r--xci/playbooks/get-opnfv-scenario-requirements.yml165
-rw-r--r--xci/playbooks/manage-ssh-keys.yml56
-rw-r--r--xci/playbooks/prepare-tests.yml8
-rw-r--r--xci/playbooks/provision-vm-nodes.yml42
-rw-r--r--xci/playbooks/roles/.gitignore8
-rw-r--r--xci/playbooks/roles/bootstrap-host/defaults/main.yml11
-rwxr-xr-xxci/playbooks/roles/bootstrap-host/files/network-config-suse (renamed from xci/playbooks/roles/configure-network/files/network-config-suse)0
-rw-r--r--xci/playbooks/roles/bootstrap-host/handlers/main.yml12
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/main.yml15
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network.yml64
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml98
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml32
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml93
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/time.yml (renamed from xci/playbooks/roles/synchronize-time/tasks/main.yml)9
l---------xci/playbooks/roles/bootstrap-host/templates/kubespray1
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j239
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j226
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 (renamed from xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2)7
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j21
l---------xci/playbooks/roles/bootstrap-host/templates/osh1
-rw-r--r--xci/playbooks/roles/bootstrap-host/vars/main.yml70
-rw-r--r--xci/playbooks/roles/clone-repository/tasks/main.yml4
-rw-r--r--xci/playbooks/roles/configure-network/tasks/main.yml103
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j275
l---------xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j21
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j266
l---------xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j21
l---------xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j21
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j266
-rw-r--r--xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j29
-rw-r--r--xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j210
-rw-r--r--xci/playbooks/roles/configure-network/templates/suse/suse.routes.j21
-rw-r--r--xci/playbooks/roles/configure-nfs/tasks/main.yml2
-rw-r--r--xci/playbooks/roles/create-nodes/README.md160
-rw-r--r--xci/playbooks/roles/create-nodes/defaults/main.yml31
-rw-r--r--xci/playbooks/roles/create-nodes/files/virtualbmc.conf3
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml91
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/create_vm.yml198
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml32
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/main.yml54
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml139
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-admin.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j211
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j27
-rw-r--r--xci/playbooks/roles/create-nodes/templates/vm.xml.j269
-rw-r--r--xci/playbooks/roles/create-nodes/vars/debian.yml13
-rw-r--r--xci/playbooks/roles/create-nodes/vars/redhat.yml17
-rw-r--r--xci/playbooks/roles/create-nodes/vars/suse.yml15
-rw-r--r--xci/playbooks/roles/prepare-functest/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-functest/tasks/main.yml32
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/env.j24
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j212
-rw-r--r--xci/playbooks/roles/prepare-tests/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/main.yml56
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml19
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/env.j215
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j246
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j252
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j247
-rw-r--r--xci/playbooks/roles/prepare-tests/vars/main.yml17
63 files changed, 2113 insertions, 536 deletions
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
new file mode 100644
index 00000000..7aab18f3
--- /dev/null
+++ b/xci/playbooks/configure-localhost.yml
@@ -0,0 +1,116 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ failed_when: false
+ with_items:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ - name: cleanup leftovers of previous deployment
+ file:
+ path: "{{ item }}"
+ state: absent
+ recurse: no
+ with_items:
+ - "{{ log_path }} "
+ - "{{ opnfv_ssh_host_keys_path }}"
+
+ roles:
+ - role: clone-repository
+ project: "openstack/openstack-ansible-openstack_openrc"
+ repo: "{{ openstack_osa_openrc_git_url }}"
+ dest: roles/openstack-ansible-openstack_openrc
+ version: "master"
+ when: installer_type == "osa"
+ - role: clone-repository
+ project: "openstack/openstack-ansible"
+ repo: "{{ openstack_osa_git_url }}"
+ dest: "{{ xci_cache }}/repos/openstack-ansible"
+ version: "{{ openstack_osa_version }}"
+ when: installer_type == "osa"
+ - role: clone-repository
+ project: "kubernetes-incubator/kubespray"
+ repo: "{{ kubespray_git_url }}"
+ dest: "{{ xci_cache }}/repos/kubespray"
+ version: "{{ kubespray_version }}"
+ when: installer_type in ["kubespray", "osh"]
+ - role: clone-repository
+ project: "openstack/openstack-ansible-haproxy_server"
+ repo: "{{ openstack_osa_haproxy_git_url }}"
+ dest: roles/haproxy_server
+ version: "{{ haproxy_version }}"
+ when:
+ - installer_type == "kubespray" or installer_type == "osh"
+ - role: clone-repository
+ project: "ansible-keepalived"
+ repo: "{{ keepalived_git_url }}"
+ dest: roles/keepalived
+ version: "{{ keepalived_version }}"
+ when:
+ - installer_type == "kubespray" or installer_type == "osh"
+
+ tasks:
+ - name: create log directory {{log_path}}
+ file:
+ path: "{{log_path}}"
+ state: directory
+ recurse: no
+
+ - name: Synchronize local development OSA repository to XCI paths
+ # command module is much faster than the copy module
+ synchronize:
+ src: "{{ openstack_osa_dev_path }}"
+ dest: "{{ xci_cache }}/repos/openstack-ansible"
+ recursive: yes
+ delete: yes
+ when:
+ - openstack_osa_dev_path != ""
+ - installer_type == "osa"
+
+ - name: Configure SSH key for local user
+ user:
+ name: "{{ ansible_env.USER }}"
+ createhome: yes
+ home: "/home/{{ ansible_env.USER }}"
+ move_home: yes
+ shell: /bin/bash
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ ssh_key_file: .ssh/id_rsa
+ state: present
+
+ - name: Dump XCI execution environment to a file
+ shell: env > "{{ xci_path }}/.cache/xci.env"
+ args:
+ executable: /bin/bash
+ creates: "{{ xci_path }}/.cache/xci.env"
+
+ #TODO: Create an Ansible variable for
+ # kube_service_addresses(10.233.0.0/18)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 10.233.0.0/18 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
+
+ #Provide access to the external network (for tests)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 172.24.4.0/24 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py
new file mode 100755
index 00000000..ed63141c
--- /dev/null
+++ b/xci/playbooks/dynamic_inventory.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# coding utf-8
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Based on https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/cobbler.py
+
+import argparse
+import glob
+import os
+import sys
+import yaml
+import json
+
+
+class XCIInventory(object):
+ """
+
+ Generates the ansible inventory based on the idf and pdf files provided
+ when executing the deployment script
+
+ """
+ def __init__(self):
+ super(XCIInventory, self).__init__()
+ self.inventory = {}
+ self.inventory['all'] = {}
+ self.inventory['all']['hosts'] = []
+ self.inventory['all']['vars'] = {}
+ self.inventory['_meta'] = {}
+ self.inventory['_meta']['hostvars'] = {}
+ self.installer = os.environ.get('INSTALLER_TYPE', 'osa')
+ self.flavor = os.environ.get('XCI_FLAVOR', 'mini')
+ self.flavor_files = os.path.dirname(os.path.realpath(__file__)) + "/../installer/" + self.installer + "/files/" + self.flavor
+
+ # Static information for opnfv host for now
+ self.add_host('opnfv')
+ self.add_hostvar('opnfv', 'ansible_host', '192.168.122.2')
+ self.add_hostvar('opnfv', 'ip', '192.168.122.2')
+ self.add_to_group('deployment', 'opnfv')
+ self.add_to_group('opnfv', 'opnfv')
+
+ self.opnfv_networks = {}
+ self.opnfv_networks['opnfv'] = {}
+ self.opnfv_networks['opnfv']['mgmt'] = {}
+ self.opnfv_networks['opnfv']['mgmt']['address'] = '172.29.236.10/22'
+ self.opnfv_networks['opnfv']['public'] = {}
+ self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24'
+ self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1'
+ self.opnfv_networks['opnfv']['public']['dns'] = ['192.168.122.1']
+ self.opnfv_networks['opnfv']['private'] = {}
+ self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22'
+ self.opnfv_networks['opnfv']['storage'] = {}
+ self.opnfv_networks['opnfv']['storage']['address'] = '172.29.244.10/24'
+
+ # Add localhost
+ self.add_host('deployment_host')
+ self.add_hostvar('deployment_host', 'ansible_ssh_host', '127.0.0.1')
+ self.add_hostvar('deployment_host', 'ansible_connection', 'local')
+
+ self.read_pdf_idf()
+
+ self.parse_args()
+
+ if self.args.host:
+ self.dump(self.get_host_info(self.args.host))
+ else:
+ self.dump(self.inventory)
+
+ def parse_args(self):
+ parser = argparse.ArgumentParser(description='Produce an Ansible inventory based on PDF/IDF XCI files')
+ parser.add_argument('--list', action='store_true', default=True, help='List XCI hosts (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific host')
+ self.args = parser.parse_args()
+
+ def read_pdf_idf(self):
+ pdf_file = os.environ['PDF']
+ idf_file = os.environ['IDF']
+ opnfv_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_pdf.yml"
+ opnfv_idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_idf.yml"
+ nodes = []
+ host_networks = {}
+
+ with open(pdf_file) as f:
+ try:
+ pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(idf_file) as f:
+ try:
+ idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_file) as f:
+ try:
+ opnfv_pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_idf_file) as f:
+ try:
+ opnfv_idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+
+ valid_host = (host for host in idf['xci']['installers'][self.installer]['nodes_roles'] \
+ if host in idf['xci']['flavors'][self.flavor] \
+ and host != 'opnfv')
+
+ for host in valid_host:
+ nodes.append(host)
+ hostname = idf['xci']['installers'][self.installer]['hostnames'][host]
+ self.add_host(hostname)
+ for role in idf['xci']['installers'][self.installer]['nodes_roles'][host]:
+ self.add_to_group(role, hostname)
+
+ pdf_host_info = list(filter(lambda x: x['name'] == host, pdf['nodes']))[0]
+ native_vlan_if = list(filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces']))
+ self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address'])
+ self.add_hostvar(hostname, 'ip', native_vlan_if[0]['address'])
+ host_networks[hostname] = {}
+ # And now record the rest of the information
+ for network, ndata in idf['idf']['net_config'].items():
+ network_interface_num = idf['idf']['net_config'][network]['interface']
+ host_networks[hostname][network] = {}
+ host_networks[hostname][network]['address'] = pdf_host_info['interfaces'][int(network_interface_num)]['address'] + "/" + str(ndata['mask'])
+ if 'gateway' in ndata.keys():
+ host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask'])
+ if 'dns' in ndata.keys():
+ host_networks[hostname][network]['dns'] = []
+ for d in ndata['dns']:
+ host_networks[hostname][network]['dns'].append(str(d))
+
+ # Get also vlan and mac_address from pdf
+ host_networks[hostname][network]['mac_address'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['mac_address'])
+ host_networks[hostname][network]['vlan'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['vlan'])
+
+ # Get also vlan and mac_address from opnfv_pdf
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ opnfv_mgmt = opnfv_pdf['opnfv_vm_pdf']['interfaces'][mgmt_idf_index]
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ opnfv_public = opnfv_pdf['opnfv_vm_pdf']['interfaces'][admin_idf_index]
+ self.opnfv_networks['opnfv']['mgmt']['mac_address'] = str(opnfv_mgmt['mac_address'])
+ self.opnfv_networks['opnfv']['mgmt']['vlan'] = str(opnfv_mgmt['vlan'])
+ self.opnfv_networks['opnfv']['public']['mac_address'] = str(opnfv_public['mac_address'])
+ self.opnfv_networks['opnfv']['public']['vlan'] = str(opnfv_public['vlan'])
+
+ # Add the interfaces from idf
+
+
+ host_networks.update(self.opnfv_networks)
+
+ self.add_groupvar('all', 'host_info', host_networks)
+
+ if 'deployment_host_interfaces' in idf['xci']['installers'][self.installer]['network']:
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ self.add_hostvar('deployment_host', 'network_interface_admin', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][admin_idf_index])
+ self.add_hostvar('deployment_host', 'network_interface_mgmt', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][mgmt_idf_index])
+
+ # Now add the additional groups
+ for parent in idf['xci']['installers'][self.installer]['groups'].keys():
+ for host in idf['xci']['installers'][self.installer]['groups'][parent]:
+ self.add_group(host, parent)
+
+ # Read additional group variables
+ self.read_additional_group_vars()
+
+ def read_additional_group_vars(self):
+ if not os.path.exists(self.flavor_files + "/inventory/group_vars"):
+ return
+ group_dir = self.flavor_files + "/inventory/group_vars/*.yml"
+ group_file = glob.glob(group_dir)
+ for g in group_file:
+ with open(g) as f:
+ try:
+ group_vars = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+ for k,v in group_vars.items():
+ self.add_groupvar(os.path.basename(g.replace('.yml', '')), k, v)
+
+ def dump(self, data):
+ print (json.dumps(data, sort_keys=True, indent=2))
+
+ def add_host(self, host):
+ self.inventory['all']['hosts'].append(host)
+
+ def hosts(self):
+ return self.inventory['all']['hosts']
+
+ def add_group(self, group, parent = 'all'):
+ if parent not in self.inventory.keys():
+ self.inventory[parent] = {}
+ if 'children' not in self.inventory[parent]:
+ self.inventory[parent]['children'] = []
+ self.inventory[parent]['children'].append(group)
+
+ def add_to_group(self, group, host):
+ if group not in self.inventory.keys():
+ self.inventory[group] = []
+ self.inventory[group].append(host)
+
+ def add_hostvar(self, host, param, value):
+ if host not in self.hostvars():
+ self.inventory['_meta']['hostvars'][host] = {}
+ self.inventory['_meta']['hostvars'][host].update({param: value})
+
+ def add_groupvar(self, group, param, value):
+ if param not in self.groupvars(group):
+ self.inventory[group]['vars'][param] = {}
+ self.inventory[group]['vars'].update({param: value})
+
+ def hostvars(self):
+ return iter(self.inventory['_meta']['hostvars'].keys())
+
+ def groupvars(self, group):
+ return iter(self.inventory[group]['vars'].keys())
+
+ def get_host_info(self, host):
+ return self.inventory['_meta']['hostvars'][host]
+
+if __name__ == '__main__':
+ XCIInventory()
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/playbooks/get-opnfv-scenario-requirements.yml b/xci/playbooks/get-opnfv-scenario-requirements.yml
index 7eaa43de..a9165709 100644
--- a/xci/playbooks/get-opnfv-scenario-requirements.yml
+++ b/xci/playbooks/get-opnfv-scenario-requirements.yml
@@ -31,15 +31,88 @@
loop_control:
label: "{{ item[0].scenario }}"
- - name: Create scenario directories
- file:
- path: "{{ role_path_default }}/{{ item.scenario }}"
- state: directory
+ - name: Update scenarios with local overrides
+ set_fact:
+ scenarios: >
+ {%- for z in xci_scenarios_overrides -%}
+ {%- for x in scenarios if x.scenario == z.scenario -%}
+ {%- set _ = x.update(z) -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- scenarios -}}
+ with_items: "{{ xci_scenarios_overrides }}"
+ loop_control:
+ label: "{{ item.scenario }}"
+ when: xci_scenarios_overrides is defined
+
+ - name: Collect list of known scenarions
+ set_fact:
+ known_scenarios: >
+ {%- set scenario_names = [] -%}
+ {%- for x in scenarios -%}
+ {%- set _ = scenario_names.append(x.scenario) -%}
+ {%- endfor -%}
+ {{- scenario_names -}}
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- - name: Clone git repos (with git)
+ - name: Fail if 'DEPLOY_SCENARIO' is not defined
+ fail:
+ msg: "DEPLOY_SCENARIO env variable is not defined so no scenario can be deployed"
+ when: deploy_scenario is not defined
+
+ - name: Ensure {{ deploy_scenario }} is a known XCI scenario
+ fail:
+ msg: "{{ deploy_scenario }} does not exist"
+ when: deploy_scenario not in known_scenarios
+
+ - name: Collect scenario information
+ set_fact:
+ xci_scenario: >
+ {%- set xci_scenario = {} -%}
+ {%- for x in scenarios if x.scenario == deploy_scenario -%}
+ {%- for z in x.installers if z.installer == installer_type -%}
+ {%- set _ = xci_scenario.update({'flavors': z.flavors}) -%}
+ {%- set _ = xci_scenario.update({'distros': z.distros}) -%}
+ {%- endfor -%}
+ {%- set _ = xci_scenario.update({'role': x.role | basename}) -%}
+ {%- endfor -%}
+ {{ xci_scenario }}
+
+ - name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+ become: true
+
+ - name: Record scenario information
+ ini_file:
+ create: yes
+ section: scenarios
+ state: present
+ option: role
+ value: "{{ xci_scenario.role | basename }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ become: true
+
+ - name: Fail if {{ deploy_scenario }} is not supported
+ fail:
+ msg:
+ - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ - ERROR! The {{ deploy_scenario }} scenario can't be deployed. This is because
+ - the {{ installer_type }} XCI installer or the {{ xci_flavor }} flavor or the {{ xci_distro }}
+ - distribution is not supported by this scenario. It may also be possible that
+ - this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
+ - ''
+ - This is a great chance for you to contribute to XCI ;-)
+ - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ - ''
+ when:
+ (xci_scenario['flavors'] is defined and xci_flavor not in xci_scenario['flavors']) or
+ (xci_scenario['distros'] is defined and xci_distro not in xci_scenario['distros'])
+
+ - name: Clone git repos
git:
repo: "{{ item.src }}"
dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
@@ -47,8 +120,6 @@
refspec: "{{ item.refspec | default(omit) }}"
update: true
force: true
- when:
- - item.scm == "git" or item.scm is undefined
with_items: "{{ scenarios }}"
register: git_clone
until: git_clone | success
@@ -57,91 +128,19 @@
loop_control:
label: "{{ item.scenario }}"
- - name: Check that scenarios exist
- stat:
- path: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}"
- register: scenarios_list_exists
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
-
- - name: Plug in the scenario to XCI
- synchronize:
- src: "{{ scenario_path_default }}/{{ item.item.scenario }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- when: item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Synchronize local changes to scenarios' master branch
+ - name: Plug in the scenario Ansible roles to XCI
synchronize:
- src: "{{ XCI_PATH }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- failed_when: false
- when:
- - item.stat.exists
- - item.item.version == 'master'
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Plug in the scenario to XCI (fallback)
- synchronize:
- src: "{{ XCI_PATH }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- when: not item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Gather information about the selected {{ DEPLOY_SCENARIO }} scenario
- set_fact:
- deploy_scenario: "{{ item }}"
+ src: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}/"
+ dest: "{{ role_path_default }}/{{ item.role | basename }}"
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- when: DEPLOY_SCENARIO | lower == item.scenario
-
- - name: Determine if the selected {{ DEPLOY_SCENARIO }} scenario can be deployed
- block:
- - set_fact:
- deploy_scenario_installer: "{{ item }}"
- with_items: "{{ deploy_scenario.installers }}"
- loop_control:
- label: "{{ item.installer }}"
- when: item.installer == XCI_INSTALLER
- - set_fact:
- deploy_scenario_flavor: "{{ (XCI_FLAVOR in deploy_scenario_installer.flavors) | bool }}"
- when:
- - deploy_scenario_installer
- - set_fact:
- deploy_scenario_distro: "{{ (XCI_DISTRO in deploy_scenario_installer.distros) | bool }}"
- when:
- - deploy_scenario_installer
- - deploy_scenario_flavor
- when: deploy_scenario is defined
-
- - name: Fail if {{ DEPLOY_SCENARIO }} is not supported
- fail:
- msg:
- - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ERROR! The {{ DEPLOY_SCENARIO }} scenario can't be deployed. This is because
- - the {{ XCI_INSTALLER }} XCI installer or the {{ XCI_FLAVOR }} flavor or the {{ XCI_DISTRO }}
- - distribution is not supported by this scenario. It may also be possible that
- - this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
- - ''
- - This is a great chance for you to contribute to XCI ;-)
- - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ''
- when:
- - deploy_scenario is not defined or not deploy_scenario_distro
vars:
ansible_python_interpreter: "/usr/bin/python"
scenarios: "{{ lookup('file', scenario_file) | from_yaml }}"
scenario_file: '../opnfv-scenario-requirements.yml'
- scenario_path_default: "{{ XCI_SCENARIOS_CACHE }}"
+ scenario_path_default: "{{ xci_scenarios_cache }}"
role_path_default: "{{ playbook_dir }}/roles"
git_clone_retries: 2
git_clone_retry_delay: 5
diff --git a/xci/playbooks/manage-ssh-keys.yml b/xci/playbooks/manage-ssh-keys.yml
new file mode 100644
index 00000000..999215d8
--- /dev/null
+++ b/xci/playbooks/manage-ssh-keys.yml
@@ -0,0 +1,56 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+- name: Configure SSH key for root user
+ user:
+ name: root
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+- name: Determine local user
+ become: no
+ local_action: command whoami
+ changed_when: False
+ register: _ansible_user
+
+- name: Fetch local SSH key
+ delegate_to: localhost
+ become: no
+ slurp:
+ src: "/home/{{ _ansible_user.stdout }}/.ssh/id_rsa.pub"
+ register: _local_ssh_key
+
+- name: Fetch OPNFV SSH key
+ delegate_to: opnfv
+ slurp:
+ src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
+ register: _opnfv_ssh_key
+
+- name: "Configure {{ inventory_hostname }} authorized_keys file"
+ authorized_key:
+ exclusive: "{{ item.exclusive }}"
+ user: root
+ state: present
+ manage_dir: yes
+ key: "{{ item.key }}"
+ comment: "{{ item.comment }}"
+ with_items:
+ - { key: "{{ _local_ssh_key['content'] | b64decode }}", comment: "{{ _ansible_user.stdout }} key", exclusive: yes }
+ - { key: "{{ _opnfv_ssh_key['content'] | b64decode }}", comment: "opnfv host key", exclusive: no }
diff --git a/xci/playbooks/prepare-tests.yml b/xci/playbooks/prepare-tests.yml
index ee30094d..1a1935aa 100644
--- a/xci/playbooks/prepare-tests.yml
+++ b/xci/playbooks/prepare-tests.yml
@@ -13,7 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-- name: Setup functest installing required packages and create the public network
+- name: Prepare the environment for testing
hosts: opnfv
+ user: root
+ vars_files:
+ - ../var/opnfv.yml
+ - ../installer/osa/files/openstack_services.yml
roles:
- - role: "prepare-functest"
+ - role: "prepare-tests"
diff --git a/xci/playbooks/provision-vm-nodes.yml b/xci/playbooks/provision-vm-nodes.yml
deleted file mode 100644
index 8b8bb30d..00000000
--- a/xci/playbooks/provision-vm-nodes.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
- connection: local
- gather_facts: true
- vars_files:
- - ../var/opnfv.yml
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: ../var/{{ ansible_os_family }}.yml
- roles:
- - role: clone-repository
- project: "opnfv/bifrost"
- repo: "{{ OPENSTACK_BIFROST_GIT_URL }}"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
- version: "{{ OPENSTACK_BIFROST_VERSION }}"
-
- tasks:
- - name: Load distribution variables
- include_vars:
- file: ../var/{{ ansible_os_family }}.yml
- - name: Synchronize local development bifrost repository to XCI paths
- # command module is much faster than the copy module
- synchronize:
- src: "{{ OPENSTACK_BIFROST_DEV_PATH }}"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
- recursive: yes
- delete: yes
- when:
- - OPENSTACK_BIFROST_DEV_PATH != ""
- - name: combine opnfv/releng-xci and openstack/bifrost scripts/playbooks
- copy:
- src: "{{ XCI_PATH}}/bifrost/"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
diff --git a/xci/playbooks/roles/.gitignore b/xci/playbooks/roles/.gitignore
deleted file mode 100644
index e0b47770..00000000
--- a/xci/playbooks/roles/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-*
-!.gitignore
-!clone-repository/
-!configure-network/
-!configure-nfs/
-!prepare-functest/
-!remote-folders/
-!synchronize-time/
diff --git a/xci/playbooks/roles/bootstrap-host/defaults/main.yml b/xci/playbooks/roles/bootstrap-host/defaults/main.yml
new file mode 100644
index 00000000..8e5a0e34
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/defaults/main.yml
@@ -0,0 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+configure_network: yes
+configure_time: yes
diff --git a/xci/playbooks/roles/configure-network/files/network-config-suse b/xci/playbooks/roles/bootstrap-host/files/network-config-suse
index 02cdd998..02cdd998 100755
--- a/xci/playbooks/roles/configure-network/files/network-config-suse
+++ b/xci/playbooks/roles/bootstrap-host/files/network-config-suse
diff --git a/xci/playbooks/roles/bootstrap-host/handlers/main.yml b/xci/playbooks/roles/bootstrap-host/handlers/main.yml
new file mode 100644
index 00000000..b9103233
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: Reload facts
+ setup:
+ filter: ansible_local
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/main.yml b/xci/playbooks/roles/bootstrap-host/tasks/main.yml
new file mode 100644
index 00000000..7d6d259e
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linx GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- include: network.yml
+ when: configure_network
+
+- include: time.yml
+ when: configure_time
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network.yml b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
new file mode 100644
index 00000000..a4f260c4
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
@@ -0,0 +1,64 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: ensure glean rules are removed
+ file:
+ path: "/etc/udev/rules.d/99-glean.rules"
+ state: absent
+
+- name: Determine required packages
+ set_fact:
+ network_packages:
+ - bridge-utils
+ - "{{ (ansible_pkg_mgr in ['zypper', 'apt']) | ternary('iproute2', 'iproute') }}"
+ - "{{ (ansible_pkg_mgr == 'apt') | ternary('vlan', 'bridge-utils') }}"
+ - iptables
+
+- name: Ensure networking packages are present
+ package:
+ name: "{{ network_packages }}"
+ state: present
+
+- name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+
+# NOTE(hwoarang) We have to check all levels of the local fact before we add it
+# otherwise Ansible will fail.
+- name: Record initial active interface
+ ini_file:
+ create: yes
+ section: network
+ state: present
+ option: xci_interface
+ value: "{{ ansible_default_ipv4.interface }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ when: ansible_local is not defined
+ or (ansible_local is defined and ansible_local.xci is not defined)
+ or (ansible_local is defined and ansible_local.xci is defined and ansible_local.xci.network is not defined)
+ or (ansible_local is defined and ansible_local.xci is defined and ansible_local.xci.network is defined and ansible_local.xci.network.xci_interface is not defined)
+ notify:
+ - Reload facts
+
+- name: Run handlers
+ meta: flush_handlers
+
+- name: "Configure networking on {{ ansible_os_family }}"
+ include_tasks: "network_{{ ansible_os_family | lower }}.yml"
+
+- name: Wait for host to come back to life
+ local_action:
+ module: wait_for
+ host: "{{ ansible_host }}"
+ delay: 15
+ state: started
+ port: 22
+ connect_timeout: 10
+ timeout: 180
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
new file mode 100644
index 00000000..176c7eb1
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
@@ -0,0 +1,98 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+
+- name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+
+- name: ensure interfaces.d folder is empty
+ file:
+ state: "{{ item }}"
+ path: "/etc/network/interfaces.d"
+ with_items:
+ - absent
+ - directory
+
+- name: Ensure /etc/interfaces can source additional files
+ copy:
+ content: |
+ auto lo
+ iface lo inet loopback
+ source /etc/network/interfaces.d/*.cfg
+ dest: "/etc/network/interfaces"
+
+- name: "Configure networking for {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: restart network service
+ shell: "/sbin/ip addr flush dev {{ item }}; /sbin/ifdown -a; /sbin/ifup -a"
+ async: 15
+ poll: 0
+ with_items:
+ - "{{ public_interface }}"
+ - "{{ mgmt_interface }}"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
new file mode 100644
index 00000000..288fdf65
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.ifcfg.j2"
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
+ - { name: "br-vlan" , network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-storage", network: "{{ host_info[inventory_hostname].storage }}" }
+ - { name: "br-vxlan" , network: "{{ host_info[inventory_hostname].private }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
new file mode 100644
index 00000000..a8f1bf59
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
@@ -0,0 +1,93 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ mgmt_interface }}.30", vlan_id: 30 }
+ - { name: "{{ mgmt_interface }}.20", vlan_id: 20 }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ mgmt_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-storage", bridge_ports: "{{ mgmt_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: Add postup/postdown scripts on SUSE
+ copy:
+ src: "network-config-suse"
+ dest: "/etc/sysconfig/network/scripts/network-config-suse"
+ mode: 0755
+
+- name: Configure static DNS on SUSE
+ lineinfile:
+ regexp: '^NETCONFIG_DNS_STATIC_SERVERS=.*'
+ line: "NETCONFIG_DNS_STATIC_SERVERS=\"{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}\""
+ path: "/etc/sysconfig/network/config"
+ state: present
+ when: host_info[inventory_hostname]['public']['dns'] is defined
+
+- name: Configure routes on SUSE
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.routes.j2"
+ dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
+ with_items:
+ - { name: "br-vlan", gateway: "{{ host_info[inventory_hostname]['public']['gateway'] }}", route: "default" }
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/synchronize-time/tasks/main.yml b/xci/playbooks/roles/bootstrap-host/tasks/time.yml
index 8f94d33f..9eca769d 100644
--- a/xci/playbooks/roles/synchronize-time/tasks/main.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/time.yml
@@ -10,14 +10,21 @@
- name: install chrony
package:
name: "chrony"
- state: latest
+ state: present
- name: restart chrony
service:
name: "{{ (ansible_pkg_mgr == 'apt') | ternary('chrony', 'chronyd') }}"
state: restarted
- name: synchronize time
shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+ args:
+ executable: /bin/bash
+ changed_when: True
register: chrony_got_time
until: chrony_got_time.rc == 0
retries: 5
delay: 5
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/kubespray b/xci/playbooks/roles/bootstrap-host/templates/kubespray
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/kubespray
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
new file mode 100644
index 00000000..2f976002
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
@@ -0,0 +1,39 @@
+# {{ ansible_managed }}
+
+# Physical interface
+{% if item.bridge_ports is not defined %}
+auto {{ item.name }}
+iface {{ item.name }} inet manual
+{% if item.vlan_id is defined %}
+ vlan-raw-device {{ item.name|replace('.' ~ item.vlan_id, '') }}
+{% endif %}
+
+{% else %}
+auto {{ item.name }}
+iface {{ item.name }} inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports {{ item.bridge_ports }}
+{% if item.name == 'br-vlan' %}
+ # Create veth pair, don't bomb if already exists
+ pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+ # Set both ends UP
+ pre-up ip link set br-vlan-veth up
+ pre-up ip link set eth12 up
+ # Delete veth pair on DOWN
+ post-down ip link del br-vlan-veth || true
+ bridge_ports br-vlan-veth
+{% endif %}
+{% if item.network is defined %}
+ address {{ item.network.address | ipaddr('address') }}
+ netmask {{ item.network.address | ipaddr('netmask') }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+ gateway {{ item.network.gateway | ipaddr('address') }}
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+ dns-nameservers {{ item.network.dns | join(' ') }}
+{% endif %}
+
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
new file mode 100644
index 00000000..525686d9
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
@@ -0,0 +1,26 @@
+DEVICE={{ item.name }}
+NM_CONTROLLED=no
+ONBOOT=yes
+BOOTPROTO=none
+{% if item.vlan_id is defined %}
+VLAN=yes
+ETHERDEVICE={{ ansible_local.xci.network.xci_interface }}
+VLAN_ID={{ item.vlan_id }}
+{% endif %}
+{% if item.bridge is not defined %}
+BRIDGE={{ item.bridge }}
+{% else %}
+TYPE=Bridge
+DELAY=0
+STP=off
+{% endif %}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+GATEWAY="{{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}"
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+DNS="{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}"
+{% endif %}
+{% endif %}
diff --git a/xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
index ffa418d4..7c2929d6 100644
--- a/xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
@@ -1,8 +1,7 @@
STARTMODE='auto'
BOOTPROTO='static'
{% if item.vlan_id is defined %}
-ETHERDEVICE={{ interface }}
-VLAN_ID={{ item.vlan_id }}
+ETHERDEVICE={{ item.name.split('.')[0] }}
{% endif %}
{% if item.bridge_ports is defined %}
BRIDGE='yes'
@@ -10,8 +9,8 @@ BRIDGE_FORWARDDELAY='0'
BRIDGE_STP=off
BRIDGE_PORTS={{ item.bridge_ports }}
{% endif %}
-{% if item.ip is defined %}
-IPADDR={{ item.ip }}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
{% endif %}
PRE_UP_SCRIPT="compat:suse:network-config-suse"
POST_DOWN_SCRIPT="compat:suse:network-config-suse"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
new file mode 100644
index 00000000..93941fad
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
@@ -0,0 +1 @@
+{{ item.route }} {{ item.gateway | ipaddr('address') }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osh b/xci/playbooks/roles/bootstrap-host/templates/osh
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osh
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/vars/main.yml b/xci/playbooks/roles/bootstrap-host/vars/main.yml
new file mode 100644
index 00000000..1730ad57
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/vars/main.yml
@@ -0,0 +1,70 @@
+---
+# admin network information
+admin_mac: "{{ host_info[inventory_hostname].admin.mac_address }}"
+admin_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == admin_mac -%}
+ {%- if admin_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ admin_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+admin_vlan: "{{ host_info[inventory_hostname].admin.vlan }}"
+
+# mgmt network information
+mgmt_mac: "{{ host_info[inventory_hostname].mgmt.mac_address }}"
+mgmt_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == mgmt_mac -%}
+ {%- if mgmt_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ mgmt_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+mgmt_vlan: "{{ host_info[inventory_hostname].mgmt.vlan }}"
+
+# storage network information
+storage_mac: "{{ host_info[inventory_hostname].storage.mac_address }}"
+storage_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == storage_mac -%}
+ {%- if storage_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ storage_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+storage_vlan: "{{ host_info[inventory_hostname].storage.vlan }}"
+
+# public vlan netwrk information
+public_mac: "{{ host_info[inventory_hostname].public.mac_address }}"
+public_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == public_mac -%}
+ {%- if public_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ public_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+public_vlan: "{{ host_info[inventory_hostname].public.vlan }}"
+
+# private vxlan network information
+private_mac: "{{ host_info[inventory_hostname].private.mac_address }}"
+private_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == private_mac -%}
+ {%- if private_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{x.device}}.{{ private_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+private_vlan: "{{ host_info[inventory_hostname].private.vlan }}"
diff --git a/xci/playbooks/roles/clone-repository/tasks/main.yml b/xci/playbooks/roles/clone-repository/tasks/main.yml
index a124003d..0ba80c0a 100644
--- a/xci/playbooks/roles/clone-repository/tasks/main.yml
+++ b/xci/playbooks/roles/clone-repository/tasks/main.yml
@@ -13,3 +13,7 @@
dest: "{{ dest }}"
version: "{{ version }}"
force: yes
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
diff --git a/xci/playbooks/roles/configure-network/tasks/main.yml b/xci/playbooks/roles/configure-network/tasks/main.yml
deleted file mode 100644
index 65abaa40..00000000
--- a/xci/playbooks/roles/configure-network/tasks/main.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: ensure glean rules are removed
- file:
- path: "/etc/udev/rules.d/99-glean.rules"
- state: absent
-
-- block:
- - name: configure modules
- lineinfile:
- dest: /etc/modules
- state: present
- create: yes
- line: "8021q"
- - name: add modules
- modprobe:
- name: 8021q
- state: present
- - name: ensure interfaces.d folder is empty
- shell: "/bin/rm -rf /etc/network/interfaces.d/*"
- - name: ensure interfaces file is updated
- template:
- src: "{{ ansible_os_family | lower }}/{{ ansible_hostname }}.interface.j2"
- dest: "/etc/network/interfaces"
- - name: restart network service
- shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
- when: ansible_os_family | lower == "debian"
-
-- block:
- - name: Remove existing network configuration
- file:
- path: "/etc/sysconfig/network/{{ item }}"
- state: absent
- with_items:
- - "ifcfg-eth0"
- - "ifroute-eth0"
-
- - name: Configure networking on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.interface.j2"
- dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ interface }}" }
- - { name: "{{ interface }}.10", vlan_id: 10 }
- - { name: "{{ interface }}.30", vlan_id: 30 }
- - { name: "{{ interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ interface }}.10", ip: "{{ host_info[inventory_hostname].MGMT_IP }}/22" }
- - { name: "br-vxlan", bridge_ports: "{{ interface }}.30", ip: "{{ host_info[inventory_hostname].VXLAN_IP }}/22" }
- - { name: "br-vlan", bridge_ports: "{{ interface }}", ip: "{{ host_info[inventory_hostname].VLAN_IP }}/24" }
- - { name: "br-storage", bridge_ports: "{{ interface }}.20", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}/22" }
-
- - name: Add postup/postdown scripts on SUSE
- copy:
- src: "network-config-suse"
- dest: "/etc/sysconfig/network/scripts/network-config-suse"
- mode: 0755
-
- - name: Configure routes on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.routes.j2"
- dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
- with_items:
- - { name: "br-vlan", gateway: "192.168.122.1", route: "default" }
-
- - name: restart network service
- shell: "/usr/sbin/wicked ifreload all"
- when: ansible_os_family | lower == "suse"
-
-- block:
- - name: Configure networking on CentOS for interfaces
- template:
- src: "{{ ansible_os_family | lower }}/interface.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ interface }}" , bridge: "br-vlan" }
- - { name: "{{ interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
- - { name: "{{ interface }}.20", bridge: "br-storage", vlan_id: 20 }
- - { name: "{{ interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
- - name: Configure networking on CentOS for bridges
- template:
- src: "{{ ansible_os_family | lower }}/bridge.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "br-vlan" , ip: "{{ host_info[inventory_hostname].VLAN_IP }}", prefix: 24 }
- - { name: "br-mgmt" , ip: "{{ host_info[inventory_hostname].MGMT_IP }}", prefix: 22 }
- - { name: "br-storage", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}", prefix: 22 }
- - { name: "br-vxlan" , ip: "{{ host_info[inventory_hostname].VXLAN_IP }}", prefix: 22 }
- - name: Add default route through br-vlan
- lineinfile:
- path: "/etc/sysconfig/network-scripts/ifcfg-br-vlan"
- line: "GATEWAY=192.168.122.1"
- - name: Restart networking
- command: "systemctl restart network"
- - name: wait for the server to come back
- wait_for_connection:
- when: ansible_os_family | lower == "redhat"
diff --git a/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2
deleted file mode 100644
index 6d6a3835..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2
+++ /dev/null
@@ -1,75 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- offload-sg off
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2
deleted file mode 120000
index a74df1c2..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-compute00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2
deleted file mode 100644
index 5d42a5d2..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2
deleted file mode 100644
index 42826414..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{ host_info[inventory_hostname].VXLAN_IP }}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2 b/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2
deleted file mode 100644
index 06b5f177..00000000
--- a/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-IPADDR={{ item.ip }}
-PREFIX={{ item.prefix }}
-ONBOOT=yes
-BOOTPROTO=none
-TYPE=Bridge
-DELAY=0
-STP=off
diff --git a/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2 b/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2
deleted file mode 100644
index b0dea0f5..00000000
--- a/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-ONBOOT=yes
-BOOTPROTO=none
-{% if item.vlan_id is defined %}
-VLAN=yes
-ETHERDEVICE={{ interface }}
-VLAN_ID={{ item.vlan_id }}
-{% endif %}
-BRIDGE={{ item.bridge }}
diff --git a/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2 b/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2
deleted file mode 100644
index 7c868447..00000000
--- a/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ item.route }} {{ item.gateway }}
diff --git a/xci/playbooks/roles/configure-nfs/tasks/main.yml b/xci/playbooks/roles/configure-nfs/tasks/main.yml
index 25e81496..3b349ad6 100644
--- a/xci/playbooks/roles/configure-nfs/tasks/main.yml
+++ b/xci/playbooks/roles/configure-nfs/tasks/main.yml
@@ -47,7 +47,7 @@
- name: Install the NFS server package
package:
name: "{{ nfs_server_package }}"
- state: latest
+ state: present
- name: restart NFS service
service:
diff --git a/xci/playbooks/roles/create-nodes/README.md b/xci/playbooks/roles/create-nodes/README.md
new file mode 100644
index 00000000..bf190296
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/README.md
@@ -0,0 +1,160 @@
+create-nodes
+================
+
+This role creates the all nodes required for the XCI deployment. In a baremetal
+deployment, it creates the OPNFV VM and provisions the physical servers. In a
+non-baremetal deployment, it creates the OPNFV VM and the rest of VMs used to
+deploy scenarios. It is based on the bifrost role:
+
+https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
+
+It creates the VMs or provisions the physical servers based on the pdf and idf
+document which describes the characteristics of the VMs or physical servers.
+For more information check the spec:
+
+https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
+
+
+Flow
+----
+
+The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
+playbook that starts executing the role:
+
+xci-setup-nodes.yaml
+
+Note that at this stage the pdf and the opnfv_pdf_vm.yml are loaded.
+
+Some distro specific tasks related to variables are done and then the
+prepare_libvirt playbook is run. This playbook, as the name says,
+gets everything ready to run libvirt.
+
+After that, the nodes_json_data dictionary is initialized. This will collect
+the data and finally dump it all into the baremetal_json_file which will be
+read by bifrost in the subsequent role.
+
+The opnfv vm and the rest of vms get created using the xml libvirt template,
+which gets filled with the pdf and opnfv_pdf_vm.yml variables. If there is a
+baremetal deployment, the nodes_json_data gets filled in the
+baremetalhoststojson.yml playbook which basically reads the pdf info.
+
+Finally nodes_json_data is dumped.
+
+Requirements
+------------
+
+The following packages are required and ensured to be present:
+- libvirt-bin
+- qemu-utils
+- qemu-kvm
+- sgabios
+
+
+Warning
+-------
+
+- It is assumed that the opnfv VM characteristics are not described in the pdf
+but in a similar document called opnfv_pdf_vm.yml. There is also an idf
+document opnfv_idf_vm.yml
+
+- All references to csv from bifrost-create-vm-nodes were removed
+
+Role Variables
+--------------
+
+baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the
+ required information for bifrost to configure the
+ VMs appropriately
+
+vm_disk_cache: Disk cache mode to use by VMs disk.
+ Defaults to shell variable 'VM_DISK_CACHE', or,
+ if that is not set, to 'writeback'.
+
+node_names: Space-separated names for nodes to be created.
+ It is taken from the hostnames variable in idf.
+ If not set, VM names will be autogenerated.
+ Note that independent on the number of names in this list,
+ at most 'test_vm_num_nodes' VMs will be created.
+
+vm_network: Name of the libvirt network to create the nodes on.
+ Defaults to shell variable 'VM_NET_BRIDGE', or,
+ if that is not set, to 'default'.
+
+node_storage_pool: Name of the libvirt storage pool to create disks
+ for VMs in.
+ Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or,
+ if that is not set, to 'default'.
+ If absent, this pool will be created.
+
+node_storage_pool_path: Path used by the libvirt storage pool
+ 'node_storage_pool' if it has to be created.
+ Defaults to "/var/lib/libvirt/images".
+
+node_logdir: Folder where to store VM logs.
+ Defaults to "/var/log/libvirt/baremetal_logs".
+
+vm_emulator: Path to emulator executable used to define VMs in libvirt.
+ Defaults to "/usr/bin/qemu-system-x86_64".
+ Generally users should not need to modify this setting,
+ as it is OS-specific and is overwritten by
+ os/distribution-specific defaults in this role when needed.
+
+vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM
+ related actions.
+ Defaults to shell variable 'LIBVIRT_CONNECT_URI', or,
+ if that is not set, to 'qemu:///system'.
+ Note that currently connecting to remote libvirt is
+ not tested and is unsupported.
+
+network_interface: Name of the bridge to create when creating
+ 'vm_network' libvirt network.
+ Defaults to "virbr0".
+ Name and default of this option are chosen to be the same
+ as in 'bifrost-ironic-install' role.
+
+opnfv_vm_network_ip: IP for the 'network_interface' bridge.
+ Defaults to '192.168.122.1'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+node_network_netmask: Subnet mask for 'network_interface' bridge.
+ Defaults to '255.255.255.0'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+Dependencies
+------------
+
+None at this time.
+
+Example Playbook
+----------------
+
+- hosts: localhost
+ connection: local
+ become: yes
+ gather_facts: yes
+ roles:
+ - role: create-vm-nodes
+
+License
+-------
+
+Copyright (c) 2018 SUSE Linux GmbH.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author Information
+------------------
+
+mbuil@suse.com
diff --git a/xci/playbooks/roles/create-nodes/defaults/main.yml b/xci/playbooks/roles/create-nodes/defaults/main.yml
new file mode 100644
index 00000000..889f9c10
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/defaults/main.yml
@@ -0,0 +1,31 @@
+---
+# defaults file for bifrost-create-vm-nodes
+baremetal_json_file: '/tmp/baremetal.json'
+
+# We collect these parameters from the pdf
+vm_nic: "virtio"
+vm_disk_cache: unsafe
+node_groups: {}
+node_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+
+network_bridge_admin: 'br-admin'
+network_bridge_mgmt: 'br-mgmt'
+
+vm_network_admin: "{{ lookup('env', 'VM_NET_BRIDGE') | default('admin', true) }}"
+vm_network_mgmt: "{{ lookup('env', 'VM_NET_BRIDGE_MGMT') | default('mgmt', true) }}"
+
+node_network_netmask: "255.255.255.0"
+
+node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
+node_storage_pool_path: "/var/lib/libvirt/images"
+node_logdir: "/var/log/libvirt/baremetal_logs"
+# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars
+vm_emulator: "/usr/bin/qemu-system-x86_64"
+# NOTE(pas-ha) not really tested with non-local qemu connections
+vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
+
+opnfv_image_path: "/var/lib/libvirt/images"
+
+vms_to_create: "{{ (baremetal | bool) | ternary([opnfv_vm_pdf], [opnfv_vm_pdf] + nodes) }}"
+baremetal_nodes: "{{ (baremetal | bool) | ternary(nodes, omit) }}"
+libvirt_networks: "{{ (baremetal | bool) | ternary([vm_network_admin,vm_network_mgmt],[vm_network_admin]) }}"
diff --git a/xci/playbooks/roles/create-nodes/files/virtualbmc.conf b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
new file mode 100644
index 00000000..f8351dc1
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
@@ -0,0 +1,3 @@
+[log]
+logfile: /var/log/vbmc.log
+debug: true
diff --git a/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
new file mode 100644
index 00000000..ef6ec345
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
@@ -0,0 +1,91 @@
+---
+# Copyright 2018, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This playbook builds the json file with information about the baremetal nodes
+# which is read by ironic to start the pxe booting
+
+
+- name: BAREMETAL - Create file for static ip
+ file:
+ path: /tmp/baremetalstaticips
+ state: touch
+ group: root
+ owner: root
+ mode: 0644
+
+- name: "Generating the json describing baremetal nodes"
+ block:
+
+ - set_fact:
+ node_name: "{{ idf.kubespray.hostnames[item.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ node_name: "{{ idf.osa.hostnames[item.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ node_name: "{{ idf.osh.hostnames[item.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ host_group: "{{ node_default_groups | union(node_groups[node_name]) }}"
+ when: node_groups[node_name] is defined
+
+ - name: BAREMETAL - Fetch the ip
+ set_fact:
+ admin_ip: "{{ item.interfaces[idf.net_config.admin.interface].address }}"
+
+ - name: BAREMETAL - Fetch the mac
+ set_fact:
+ admin_mac: "{{ item.interfaces[idf.net_config.admin.interface].mac_address }}"
+
+ - name: BAREMETAL - set the json entry for baremetal nodes
+ set_fact:
+ node_data:
+ name: "{{ node_name }}"
+ uuid: "{{ node_name | to_uuid }}"
+ host_groups: "{{ host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "{{ item.remote_management.address }}"
+ ipmi_port: "{{ virtual_ipmi_port| default('623') }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ admin_mac }}"
+ ansible_ssh_host: "{{ admin_ip }}"
+ ipv4_address: "{{ admin_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: BAREMETAL - Static ip config for dnsmasq
+ lineinfile:
+ path: /tmp/baremetalstaticips
+ state: present
+ line: '{{ admin_mac }},{{ admin_ip }}'
+
+ - name: BAREMETAL - add created node info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({node_name: node_data}) }}"
+
+ when: (num_nodes | int) > (nodes_json_data | length | int) + 1
diff --git a/xci/playbooks/roles/create-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
new file mode 100644
index 00000000..ac55bf32
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
@@ -0,0 +1,198 @@
+---
+- name: "Creating VM"
+ block:
+ - set_fact:
+ vm_name: "{{ idf.kubespray.hostnames[item.1.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ vm_name: "{{ idf.osa.hostnames[item.1.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ vm_name: "{{ idf.osh.hostnames[item.1.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
+ vm_host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ vm_host_group: "{{ node_default_groups | union(node_groups[vm_name]) }}"
+ when: node_groups[vm_name] is defined
+
+ - name: set prealloc arg for Debian
+ set_fact:
+ prealloc: "--prealloc-metadata"
+ when:
+ - ansible_os_family == 'Debian'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ - name: list info on pools
+ virt_pool:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+ - name: list existing vms
+ virt:
+ command: list_vms
+ register: existing_vms
+
+ - block:
+ - name: Check if volume exists
+ stat:
+ path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
+ register: _vm_volume_prepared
+
+ - name: Resize opnfv VM image to {{ item.1.disks[0].disk_capacity }}
+ command: "qemu-img resize {{ opnfv_image_path }}/opnfv.qcow2 {{ item.1.disks[0].disk_capacity }}"
+ when:
+ - vm_name == 'opnfv'
+ - _vm_volume_prepared.stat.exists
+
+ # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
+ # mbuil: Assuming there is only one disk [0]
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.1.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+ when:
+ - not _vm_volume_prepared.stat.exists
+ - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes
+
+ - name: set path to the volume created
+ set_fact:
+ vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2"
+
+ - name: pre-touch the vm volume
+ file:
+ state: touch
+ path: "{{ vm_volume_path }}"
+ when: vm_libvirt_uri == 'qemu:///system'
+
+ # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr
+ # fails to set +C on XFS. This could be more elegant, however the use
+ # case is for CI testing.
+ - name: set copy-on-write for volume on non-CentOS systems
+ command: chattr +C {{ vm_volume_path }}
+ ignore_errors: yes
+ when:
+ - ansible_distribution != 'CentOS'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ # Fetches the xml descriptor from the template
+ - name: create_vm
+ virt:
+ command: define
+ name: "{{ vm_name }}"
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+ rescue:
+ - name: "Execute `dmesg` to collect debugging output should VM creation fail."
+ command: dmesg
+ - name: >
+ "Execute `virsh capabilities` to collect debugging output
+ should VM creation fail."
+ command: virsh capabilities
+ - name: "Abort due to failed VM creation"
+ fail: >
+ msg="VM creation step failed, please review dmesg
+ output for additional details"
+ when: vm_name not in existing_vms.list_vms
+
+ # TODO(pas-ha) replace 'command: vbmc ...' tasks
+ # with a custom Ansible module using vbmc Python API
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list
+
+ - debug: var=vbmc_list
+
+ # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
+ - name: delete vm from virtualbmc if it is there
+ command: vbmc delete {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ when: vbmc_list.stdout.find(vm_name) != -1
+
+ - set_fact:
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (item.0 | int) }}"
+
+ - name: plug vm into vbmc
+ command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: start virtualbmc
+ command: vbmc start {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list2
+
+ - debug: var=vbmc_list2
+
+ - name: get XML of the vm
+ virt:
+ name: "{{ vm_name }}"
+ command: get_xml
+ register: vm_xml
+
+ - name: Fetch the index for admin network
+ set_fact:
+ admin_index: "{{ (vm_name == 'opnfv') | ternary(opnfv_vm_idf.net_config.admin.interface, idf.net_config.admin.interface) | int }}"
+
+ - name: Fetch the ip
+ set_fact:
+ vm_ip: "{{ item.1.interfaces[admin_index | int].address }}"
+
+ # Assumes there is only a single NIC per VM
+ - name: get MAC from vm XML
+ set_fact:
+ vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"<mac address='.*'/>\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}"
+
+ # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively
+ # see vbmc add --help
+ - name: set the json entry for vm
+ set_fact:
+ vm_data:
+ name: "{{ vm_name }}"
+ uuid: "{{ vm_name | to_uuid }}"
+ host_groups: "{{ vm_host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "192.168.122.1"
+ ipmi_port: "{{ virtual_ipmi_port }}"
+ ipmi_username: "{{ item.1.remote_management.user }}"
+ ipmi_password: "{{ item.1.remote_management.pass }}"
+ nics:
+ - mac: "{{ vm_mac }}"
+ ansible_ssh_host: "{{ vm_ip }}"
+ ipv4_address: "{{ vm_ip }}"
+ properties:
+ cpu_arch: "{{ item.1.node.arch }}"
+ ram: "{{ item.1.node.memory.rstrip('G') }}"
+ cpus: "{{ item.1.node.cpus }}"
+ disk_size: "{{ item.1.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: add created vm info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}"
+ when: vm_name != 'opnfv'
+
+ - name: Record OPNFV VM ip
+ set_fact:
+ opnfv_vm_ip: "{{ vm_ip }}"
+ when: vm_name == 'opnfv'
+
+ when: (num_nodes | int) > (item.0 | int)
diff --git a/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
new file mode 100644
index 00000000..a227bc4f
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
@@ -0,0 +1,32 @@
+---
+- name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+- name: Extract checksum
+ shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ register: _image_checksum
+- fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+- set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+- name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: "{{ xci_cache }}/deployment_image.qcow2"
+ force: no
+- name: Set correct mode for deployment_image.qcow2 file
+ file:
+ path: "{{ xci_cache }}/deployment_image.qcow2"
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+
+- name: Create copy of original deployment image
+ shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2"
+ become: yes
diff --git a/xci/playbooks/roles/create-nodes/tasks/main.yml b/xci/playbooks/roles/create-nodes/tasks/main.yml
new file mode 100644
index 00000000..607ac494
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+# baremetal_json_file could be the file coming from pdf/idf
+
+- name: "Load distribution defaults"
+ include_vars: "{{ ansible_os_family | lower }}.yml"
+
+# From the previous list
+- name: "Install required packages"
+ package:
+ name: "{{ required_packages }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
+
+- include_tasks: prepare_libvirt.yml
+ with_items: "{{ libvirt_networks }}"
+
+- include_tasks: download_opnfvimage.yml
+
+- name: create placeholder var for vm entries in JSON format
+ set_fact:
+ nodes_json_data: {}
+
+# First we create the opnfv_vm
+- include_tasks: create_vm.yml
+ with_indexed_items: "{{ vms_to_create }}"
+
+- include_tasks: baremetalhoststojson.yml
+ with_items: "{{ baremetal_nodes }}"
+
+- name: Start the opnfv vm
+ virt:
+ command: start
+ name: opnfv
+
+- name: remove previous baremetal data file
+ file:
+ state: absent
+ path: "{{ baremetal_json_file }}"
+
+# We got nodes_json_data from the create_vm playbook
+- name: write to baremetal json file
+ copy:
+ dest: "{{ baremetal_json_file }}"
+ content: "{{ nodes_json_data | to_nice_json }}"
+
+- name: >
+ "Set file permissions such that the baremetal data file
+ can be read by the user executing Ansible"
+ file:
+ path: "{{ baremetal_json_file }}"
+ owner: "{{ ansible_env.SUDO_USER }}"
+ when: >
+ ansible_env.SUDO_USER is defined and
+ baremetal_json_file != ""
diff --git a/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
new file mode 100644
index 00000000..06afaec3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
@@ -0,0 +1,139 @@
+---
+- name: "Restart libvirt service"
+ service: name="{{libvirt_service_name}}" state=restarted
+
+# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to
+# operate properly with dnsmasq. This should be done before starting dnsmasq.
+- name: "Enable IP forwarding in sysctl"
+ sysctl:
+ name: "net.ipv4.ip_forward"
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading
+# the ROM from /usr/share/misc.
+- name: "Look for sgabios in {{ sgabios_dir }}"
+ stat: path={{ sgabios_dir }}/sgabios.bin
+ register: test_sgabios_qemu
+
+- name: "Look for sgabios in /usr/share/misc"
+ stat: path=/usr/share/misc/sgabios.bin
+ register: test_sgabios_misc
+
+- name: "Place sgabios.bin"
+ command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
+ when: >
+ test_sgabios_qemu == false and
+ test_sgabios_misc == true
+
+# NOTE(TheJulia): In order to prevent conflicts, stop
+# dnsmasq to prevent conflicts with libvirt restarting.
+# TODO(TheJulia): We shouldn't need to do this, but the
+# libvirt dhcp instance conflicts withour specific config
+# and taking this path allows us to not refactor dhcp at
+# this moment. Our DHCP serving should be refactored
+# so we don't need to do this.
+- name: "Stop default dnsmasq service"
+ service:
+ name: dnsmasq
+ state: stopped
+ ignore_errors: true
+
+# NOTE(TheJulia): Seems if you test in a VM, this might
+# be helpful if your installed your host originally
+# with the default 192.168.122/0/24 network
+- name: destroy libvirt network
+ virt_net:
+ name: "{{ item }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+# Ubuntu creates a default network when installing libvirt.
+# This network uses the 192.168.122.0/24 range and thus
+# conflicts with our admin network
+- name: destroy libvirt network
+ virt_net:
+ name: "default"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is present
+ virt_net:
+ name: "{{ item }}"
+ state: present
+ xml: "{{ lookup('template', 'net-'+item+'.xml.j2') }}"
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: find facts on libvirt networks
+ virt_net:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: "Delete network interface if virtual network is not active"
+ command: ip link del {{ ansible_libvirt_networks[item].bridge }}
+ when:
+ - ansible_libvirt_networks[item].state != 'active'
+ - vm_libvirt_uri == 'qemu:///system'
+ ignore_errors: yes
+
+- name: set libvirt network to autostart
+ virt_net:
+ name: "{{ item }}"
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is running
+ virt_net:
+ name: "{{ item }}"
+ state: active
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: get libvirt network status
+ virt_net:
+ name: "{{ item }}"
+ command: status
+ uri: "{{ vm_libvirt_uri }}"
+ register: test_vm_net_status
+
+- name: fail if libvirt network is not active
+ assert:
+ that: test_vm_net_status.status == 'active'
+
+- name: define a libvirt pool if not set
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: present
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'pool_dir.xml.j2') }}"
+
+- name: ensure libvirt pool is running
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: active
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: create dir for bm logs
+ file:
+ state: directory
+ path: "{{ node_logdir }}"
+ recurse: yes
+ mode: "0755"
+
+- name: install virtualbmc
+ pip:
+ name: virtualbmc
+ version: 1.5 # >1.3 needs zmq dependency.
+ virtualenv: "{{ lookup('env', 'XCI_VENV') }}"
+
+- name: Create directory for the config of vbmc
+ file:
+ path: /etc/virtualbmc
+ state: directory
+
+- name: Place the config for virtualbmc
+ copy:
+ src: virtualbmc.conf
+ dest: /etc/virtualbmc/virtualbmc.conf
diff --git a/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
new file mode 100644
index 00000000..aedbbeb7
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
new file mode 100644
index 00000000..4a9964c3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
@@ -0,0 +1,11 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='route'>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..7e372ffe
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
new file mode 100644
index 00000000..e4645deb
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
@@ -0,0 +1,7 @@
+<pool type='dir'>
+ <name>{{ node_storage_pool }}</name>
+ <target>
+ <path>{{ node_storage_pool_path }}</path>
+ </target>
+</pool>
+
diff --git a/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
new file mode 100644
index 00000000..9fad42b8
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
@@ -0,0 +1,69 @@
+<domain type='{{ vm_domain_type }}'>
+ <name>{{ vm_name }}</name>
+ <memory unit='GiB'>{{ item.1.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.1.node.cpus }}</vcpu>
+ <os>
+ <type arch='{{ item.1.node.arch }}' machine='{{ item.1.node.model }}'>hvm</type>
+ {%- if 'opnfv' in vm_name -%}
+ <boot dev='hd'/>
+ {%- else -%}
+ <boot dev='network'/>
+ {% endif -%}
+ <bootmenu enable='no'/>
+ <bios useserial='yes' rebootTimeout='10000'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='{{ item.1.node.cpu_cflags }}'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>{{ vm_emulator }}</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='{{ vm_disk_cache }}'/>
+ <source file='{{ vm_volume_path }}'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <source network='{{ vm_network_admin }}'/>
+ <model type='{{ vm_nic }}'/>
+ {%- if vm_name == 'opnfv' -%}
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}'/>
+ {%- else -%}
+ <mac address='{{ item.1.interfaces[idf.net_config.admin.interface].mac_address }}'/>
+ {%- endif -%}
+ </interface>
+ {%- if baremetal | bool -%}
+ <interface type='network'>
+ <source network='{{ vm_network_mgmt }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}'/>
+ </interface>
+ {%- endif -%}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <serial type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/xci/playbooks/roles/create-nodes/vars/debian.yml b/xci/playbooks/roles/create-nodes/vars/debian.yml
new file mode 100644
index 00000000..bcfc47d5
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/debian.yml
@@ -0,0 +1,13 @@
+---
+sgabios_dir: /usr/share/qemu/
+libvirt_service_name: libvirt-bin
+required_packages:
+ - libvirt-bin
+ - qemu-utils
+ - qemu-kvm
+ - qemu-system-x86
+ - sgabios
+ - pkg-config
+ - libvirt-dev
+ - python-lxml
+ - python-libvirt
diff --git a/xci/playbooks/roles/create-nodes/vars/redhat.yml b/xci/playbooks/roles/create-nodes/vars/redhat.yml
new file mode 100644
index 00000000..2b285110
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/redhat.yml
@@ -0,0 +1,17 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-img
+ - qemu-kvm-tools
+ - qemu-kvm
+ - qemu-kvm-common
+ - qemu-system-x86
+ - sgabios-bin
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkgconfig
+ - libvirt-devel
+ - libvirt-python
+ - python-lxml
diff --git a/xci/playbooks/roles/create-nodes/vars/suse.yml b/xci/playbooks/roles/create-nodes/vars/suse.yml
new file mode 100644
index 00000000..7e4c41ef
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-tools
+ - qemu-kvm
+ - qemu-x86
+ - qemu-sgabios
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkg-config
+ - libvirt-devel
+ - python-lxml
+ - libvirt-python
diff --git a/xci/playbooks/roles/prepare-functest/defaults/main.yml b/xci/playbooks/roles/prepare-functest/defaults/main.yml
deleted file mode 100644
index a3638302..00000000
--- a/xci/playbooks/roles/prepare-functest/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Gateway parameters
-gateway_ip: "10.10.10.1"
-gateway_ip_mask: "10.10.10.1/24"
-broadcast_ip: "10.10.10.255"
-gateway_interface: "br-vlan"
-
-# Network parameters
-external_network: "ext-net"
-
-# Subnet parameters
-subnet_name: "ext-subnet"
-allocation_pool: "start=10.10.10.5,end=10.10.10.254"
-subnet_cidr: "10.10.10.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/tasks/main.yml b/xci/playbooks/roles/prepare-functest/tasks/main.yml
deleted file mode 100644
index 9a380cd1..00000000
--- a/xci/playbooks/roles/prepare-functest/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 SUSE Linux GmbH
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: check if the gateway was already set
- shell: "ip a | grep {{ gateway_ip }}"
- register: gateway_ip_result
- ignore_errors: True
-
-- name: add public network gateway
- command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
- when: gateway_ip_result|failed
-
-- name: prepare script to create networks for functest
- template:
- src: prepare-functest.sh.j2
- dest: /root/prepare-functest.sh
- mode: 0755
-
-- name: Create networks
- shell: "/root/prepare-functest.sh"
-
-- name: prepare environment file for functest
- template:
- src: env.j2
- dest: /root/env
- mode: 0755
diff --git a/xci/playbooks/roles/prepare-functest/templates/env.j2 b/xci/playbooks/roles/prepare-functest/templates/env.j2
deleted file mode 100644
index 87093325..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/env.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-INSTALLER_TYPE=osa
-INSTALLER_IP=192.168.122.2
-EXTERNAL_NETWORK={{ external_network }}
-DEPLOY_SCENARIO="os-nosdn-nofeature-noha"
diff --git a/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2 b/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2
deleted file mode 100644
index febe8369..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-source /root/openrc
-
-openstack --insecure network create --external \
- --provider-physical-network flat \
- --provider-network-type flat {{ external_network }}
-
-openstack --insecure subnet create --network {{ external_network }} \
- --allocation-pool {{ allocation_pool }} \
- --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
- --no-dhcp {{ subnet_name }}
diff --git a/xci/playbooks/roles/prepare-tests/defaults/main.yml b/xci/playbooks/roles/prepare-tests/defaults/main.yml
new file mode 100644
index 00000000..7002586c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# Gateway parameters
+gateway_ip: "192.168.122.1"
+gateway_ip_mask: "192.168.122.1/24"
+broadcast_ip: "192.168.122.255"
+gateway_interface: "br-vlan"
+
+# Network parameters
+external_network: "ext-net"
+
+# Subnet parameters
+subnet_name: "ext-subnet"
+allocation_pool: "start=192.168.122.100,end=192.168.122.254"
+subnet_cidr: "192.168.122.0/24"
diff --git a/xci/playbooks/roles/prepare-tests/tasks/main.yml b/xci/playbooks/roles/prepare-tests/tasks/main.yml
new file mode 100644
index 00000000..a543ac1f
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/main.yml
@@ -0,0 +1,56 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 SUSE Linux GmbH
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: install required packages
+ package:
+ name: "{{ required_packages[ansible_pkg_mgr] }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
+
+# Docker is needed for test frameworks
+- name: Ensure Docker service is started and enabled
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+- name: install required pip packages
+ pip:
+ name: "{{ required_pip }}"
+ state: present
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+
+# odl scenarios require to add odl variables to env
+- include_tasks: process_neutron_conf.yml
+ when: "'-odl-' in deploy_scenario"
+
+- name: prepare environment file for tests
+ template:
+ src: env.j2
+ dest: /root/env
+ mode: 0755
+
+- name: create the script to prepare for testing
+ template:
+ src: prepare-tests.sh.j2
+ dest: /root/prepare-tests.sh
+ mode: 0755
+
+- name: create the script to run functest
+ template:
+ src: run-functest.sh.j2
+ dest: /root/run-functest.sh
+ mode: 0755
+
+- name: create the script to run yardstick
+ template:
+ src: run-yardstick.sh.j2
+ dest: /root/run-yardstick.sh
+ mode: 0755
diff --git a/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
new file mode 100644
index 00000000..45608df3
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
@@ -0,0 +1,19 @@
+---
+- name: Collecting ODL variables
+ block:
+ - name: Fetch odl_password variable
+ shell: "cat /tmp/ml2_conf.ini | grep password | cut -d ' ' -f3"
+ register: odl_password
+
+ - name: Fetch odl_username variable
+ shell: "cat /tmp/ml2_conf.ini | grep username | cut -d ' ' -f3"
+ register: odl_username
+
+ - name: Fetch odl_port variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f3 | cut -d '/' -f1"
+ register: odl_port
+
+ - name: Fetch odl_ip variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f2 | cut -d '/' -f3"
+ register: odl_ip
+ when: "'-odl-' in deploy_scenario"
diff --git a/xci/playbooks/roles/prepare-tests/templates/env.j2 b/xci/playbooks/roles/prepare-tests/templates/env.j2
new file mode 100644
index 00000000..d4f8f86c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/env.j2
@@ -0,0 +1,15 @@
+INSTALLER_IP=192.168.122.2
+TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
+ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
+{# external network is only valid for OpenStack based scenarios #}
+{% if 'os-' in deploy_scenario %}
+EXTERNAL_NETWORK={{ external_network }}
+{% endif %}
+{% if '-odl-' in deploy_scenario %}
+SDN_CONTROLLER_IP={{ odl_ip.stdout }}
+SDN_CONTROLLER_USER={{ odl_username.stdout }}
+SDN_CONTROLLER_PASSWORD={{ odl_password.stdout }}
+SDN_CONTROLLER_RESTCONFPORT={{ odl_port.stdout }}
+SDN_CONTROLLER_WEBPORT={{ odl_port.stdout }}
+{% endif %}
+
diff --git a/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
new file mode 100644
index 00000000..1b779cb9
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Variables that we need to pass from XCI to testing
+XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME FUNCTEST_VERSION)
+
+# Extract variables from xci.env file
+if [[ -e /root/xci.env ]]; then
+ for x in ${XCI_ENV[@]}; do
+ grep "^${x}=" /root/xci.env >> /root/env
+ done
+ # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
+ # set the functest container's DEPLOY_SCENARIO variable in the
+ # following format <scenario>-<flavor>. But the XCI's mini flavor
+ # is converted into noha.
+ DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
+ XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
+ XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
+ echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
+fi
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+source /root/openrc
+
+openstack --insecure network create --external \
+ --provider-physical-network flat \
+ --provider-network-type flat {{ external_network }}
+
+openstack --insecure subnet create --network {{ external_network }} \
+ --allocation-pool {{ allocation_pool }} \
+ --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
+ --no-dhcp {{ subnet_name }}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# Create k8s.creds file for testing
+KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
+KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
+cat << EOF > ~/k8s.creds
+KUBERNETES_PROVIDER=local
+KUBE_MASTER_URL=$KUBE_MASTER_URL
+KUBE_MASTER_IP=$KUBE_MASTER_IP
+EOF
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
new file mode 100644
index 00000000..b4cf46d7
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Create directory to store functest logs
+mkdir -p /root/functest-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- functest environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+# the needed images differ between the suites so avoid downloading unnecessary images
+echo "Downloading the images needed for functest-$FUNCTEST_SUITE_NAME"
+mkdir ~/images && cd ~/images
+if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img \
+ http://testresults.opnfv.org/functest/shaker-image.qcow2 \
+ https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+else
+ echo "Unsupported test suite for functest"
+ exit 1
+fi
+echo "------------------------------------------------------"
+ls -al . && cd ~
+echo "------------------------------------------------------"
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}:${FUNCTEST_VERSION}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ ${DOCKER_IMAGE_NAME}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/.kube/config:/root/.kube/config \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ $DOCKER_IMAGE_NAME
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
new file mode 100644
index 00000000..6a7fd8be
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Create directory to store yardstick logs
+mkdir -p /root/yardstick-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- yardstick environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+rc_file_vol="-v /root/openrc:/etc/yardstick/openstack.creds"
+{% else %}
+{# k8 scenario name is hardcoded for the timebeing until we clarify #}
+{# which suite name we should use for the scenarios without yardstick suites #}
+DEPLOY_SCENARIO="k8-nosdn-nofeature-noha"
+rc_file_vol="-v /root/admin.conf:/etc/yardstick/admin.conf"
+{% endif %}
+
+OS_CACERT="/etc/ssl/certs/haproxy.cert"
+DOCKER_IMAGE_NAME="opnfv/yardstick"
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_${DEPLOY_SCENARIO}_daily.yaml"
+
+# add OS_CACERT to openrc
+echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ~/openrc
+
+opts="--privileged=true --rm"
+envs="-e INSTALLER_TYPE=$INSTALLER_TYPE -e INSTALLER_IP=$INSTALLER_IP \
+ -e NODE_NAME=$NODE_NAME -e EXTERNAL_NETWORK=$EXTERNAL_NETWORK \
+ -e YARDSTICK_BRANCH=master -e BRANCH=master \
+ -e DEPLOY_SCENARIO=$DEPLOY_SCENARIO -e CI_DEBUG=true"
+cacert_file_vol="-v $OS_CACERT:/etc/yardstick/os_cacert"
+map_log_dir="-v /root/yardstick-results:/tmp/yardstick"
+sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} \
+ ${map_log_dir} ${sshkey} ${DOCKER_IMAGE_NAME} \
+ exec_tests.sh ${YARDSTICK_SCENARIO_SUITE_NAME}"
+echo "Running yardstick with the command"
+echo "------------------------------------------------------"
+echo $cmd
+echo "------------------------------------------------------"
+$cmd
diff --git a/xci/playbooks/roles/prepare-tests/vars/main.yml b/xci/playbooks/roles/prepare-tests/vars/main.yml
new file mode 100644
index 00000000..83638466
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/vars/main.yml
@@ -0,0 +1,17 @@
+---
+required_packages:
+ apt:
+ - docker.io
+ - wget
+ - xz-utils
+ zypper:
+ - docker
+ - wget
+ - xz
+ yum:
+ - docker
+ - wget
+ - xz
+
+required_pip:
+ - docker-py