summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--INFO.yaml27
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/requirements.txt3
-rw-r--r--docs/specs/infra_manager.rst130
-rw-r--r--docs/specs/k8-calico-onap.rst141
-rw-r--r--docs/specs/k8-odl-coe.rst105
-rw-r--r--docs/xci-overview.rst2
-rw-r--r--docs/xci-user-guide.rst21
-rw-r--r--tox.ini25
-rw-r--r--xci/README.rst31
-rwxr-xr-xxci/config/aio-vars4
-rwxr-xr-xxci/config/env-vars23
-rwxr-xr-xxci/config/ha-vars7
-rwxr-xr-xxci/config/mini-vars7
-rwxr-xr-xxci/config/noha-vars7
-rwxr-xr-xxci/config/pinned-versions37
-rwxr-xr-xxci/config/user-vars14
-rw-r--r--xci/files/requirements.yml2
-rwxr-xr-xxci/files/xci-destroy-env.sh24
-rw-r--r--xci/files/xci-lib.sh (renamed from xci/files/install-lib.sh)138
-rw-r--r--xci/infra/bifrost/infra-provision.sh75
-rw-r--r--xci/infra/bifrost/playbooks/opnfv-virtual.yml (renamed from xci/infra/bifrost/playbooks/opnfv-virtual.yaml)86
-rw-r--r--xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml34
-rw-r--r--xci/infra/bifrost/playbooks/wait-for-baremetal.yml17
-rw-r--r--xci/infra/bifrost/playbooks/xci-prepare-env.yml118
-rw-r--r--xci/infra/bifrost/playbooks/xci-setup-nodes.yml (renamed from xci/infra/bifrost/playbooks/bootstrap-bifrost.yml)44
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-env.sh43
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-provision.sh176
-rw-r--r--xci/infra/bifrost/vars/debian.yml (renamed from xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml)21
-rw-r--r--xci/infra/bifrost/vars/redhat.yml (renamed from xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml)21
-rw-r--r--xci/infra/bifrost/vars/suse.yml (renamed from xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml)21
-rwxr-xr-xxci/installer/kubespray/deploy.sh90
-rw-r--r--xci/installer/kubespray/files/aio/inventory/inventory.cfg20
-rw-r--r--xci/installer/kubespray/files/ha/inventory/inventory.cfg32
-rw-r--r--xci/installer/kubespray/files/mini/inventory/inventory.cfg22
-rw-r--r--xci/installer/kubespray/files/noha/inventory/inventory.cfg24
-rw-r--r--xci/installer/kubespray/playbooks/configure-installer.yml50
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml5
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml76
-rw-r--r--xci/installer/kubespray/playbooks/configure-targethosts.yml17
-rw-r--r--xci/installer/kubespray/playbooks/post-deployment.yml42
-rwxr-xr-xxci/installer/osa/deploy.sh29
-rw-r--r--xci/installer/osa/files/aio/flavor-vars.yml3
-rw-r--r--xci/installer/osa/files/aio/inventory2
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml208
-rw-r--r--xci/installer/osa/files/global-requirement-pins.txt13
-rw-r--r--xci/installer/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/installer/osa/files/ha/inventory15
-rw-r--r--xci/installer/osa/files/ha/openstack_user_config.yml60
-rw-r--r--xci/installer/osa/files/ha/user_variables.yml3
-rw-r--r--xci/installer/osa/files/mini/flavor-vars.yml21
-rw-r--r--xci/installer/osa/files/mini/inventory12
-rw-r--r--xci/installer/osa/files/mini/user_variables.yml3
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory13
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml3
-rw-r--r--xci/installer/osa/files/openstack_services.yml226
-rw-r--r--xci/installer/osa/files/setup-openstack.yml2
-rw-r--r--xci/installer/osa/files/user_variables_xci.yml (renamed from xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml)11
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml46
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml3
-rw-r--r--xci/installer/osa/playbooks/post-deployment.yml66
-rw-r--r--xci/installer/osh/README50
-rwxr-xr-xxci/installer/osh/deploy.sh170
-rw-r--r--xci/installer/osh/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/osh/playbooks/configure-installer.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/osh/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/osh/playbooks/group_vars/all.yml55
-rw-r--r--xci/installer/osh/playbooks/install-openstack-helm.yml24
-rw-r--r--xci/installer/osh/playbooks/post-deployment.yml42
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml109
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml18
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml12
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service11
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml31
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml33
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j24
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml7
-rw-r--r--xci/opnfv-scenario-requirements.yml126
-rw-r--r--xci/playbooks/bootstrap-scenarios.yml43
-rw-r--r--xci/playbooks/configure-localhost.yml22
-rwxr-xr-xxci/playbooks/dynamic_inventory.py240
-rw-r--r--xci/playbooks/get-opnfv-scenario-requirements.yml177
-rw-r--r--xci/playbooks/manage-ssh-keys.yml9
-rw-r--r--xci/playbooks/manage-ssl-certs.yml32
-rw-r--r--xci/playbooks/prepare-tests.yml (renamed from xci/playbooks/prepare-functest.yml)4
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network.yml95
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml98
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml32
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml93
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j275
l---------xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j274
l---------xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j21
l---------xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j266
l---------xci/playbooks/roles/bootstrap-host/templates/kubespray1
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j239
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j226
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 (renamed from xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2)7
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j21
l---------xci/playbooks/roles/bootstrap-host/templates/osh1
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j29
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j210
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/vars/main.yml70
-rw-r--r--xci/playbooks/roles/create-nodes/README.md160
-rw-r--r--xci/playbooks/roles/create-nodes/defaults/main.yml31
-rw-r--r--xci/playbooks/roles/create-nodes/files/virtualbmc.conf3
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml91
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/create_vm.yml198
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml32
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/main.yml54
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml139
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-admin.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j211
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j27
-rw-r--r--xci/playbooks/roles/create-nodes/templates/vm.xml.j269
-rw-r--r--xci/playbooks/roles/create-nodes/vars/debian.yml13
-rw-r--r--xci/playbooks/roles/create-nodes/vars/redhat.yml17
-rw-r--r--xci/playbooks/roles/create-nodes/vars/suse.yml15
-rw-r--r--xci/playbooks/roles/prepare-functest/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j284
-rw-r--r--xci/playbooks/roles/prepare-tests/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/main.yml (renamed from xci/playbooks/roles/prepare-functest/tasks/main.yml)45
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml19
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/env.j2 (renamed from xci/playbooks/roles/prepare-functest/templates/env.j2)8
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j246
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j252
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j247
-rw-r--r--xci/playbooks/roles/prepare-tests/vars/main.yml (renamed from xci/playbooks/roles/prepare-functest/vars/main.yml)7
-rw-r--r--xci/scenarios/README.rst1
-rw-r--r--xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml292
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep0
-rw-r--r--xci/scenarios/os-nosdn-nofeature/README.rst2
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml255
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml170
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml172
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml35
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml18
-rw-r--r--xci/scenarios/os-odl-nofeature/.gitkeep0
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml256
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml171
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml173
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml26
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j245
-rw-r--r--xci/scenarios/os-odl-nofeature/vars/main.yml2
-rw-r--r--xci/scenarios/os-odl-nofeature/xci_overrides7
-rwxr-xr-xxci/scripts/update-osa-version-files.sh12
-rwxr-xr-xxci/scripts/vm/start-new-vm.sh18
-rw-r--r--xci/var/ericsson-pod2-idf.yml187
-rw-r--r--xci/var/ericsson-pod2-pdf.yml269
-rw-r--r--xci/var/idf.yml143
-rw-r--r--xci/var/lf-pod4-idf.yml222
-rw-r--r--xci/var/lf-pod4-pdf.yml198
-rw-r--r--xci/var/opnfv.yml14
-rw-r--r--xci/var/opnfv_vm_idf.yml (renamed from xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml)21
-rw-r--r--xci/var/opnfv_vm_pdf.yml53
-rwxr-xr-xxci/xci-deploy.sh82
169 files changed, 5799 insertions, 4153 deletions
diff --git a/.gitignore b/.gitignore
index af9d0080..925736c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
*,~
.*.sw?
-/docs_build/
+docs_build/*
/docs_output/
/releng/
.idea
@@ -33,7 +33,7 @@ coverage.xml
nosetests.xml
testapi_venv/
.cache
-.tox
+.tox/
*.retry
job_output/
# Clear VM files
@@ -42,7 +42,7 @@ job_output/
build.log
*.d/
_static/
-conf.py
*.html
html/
xci/logs/
+docs/_build/*
diff --git a/INFO.yaml b/INFO.yaml
index f7f388f1..43c73870 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -30,33 +30,18 @@ meetings:
time: '14:00 UTC'
committers:
- <<: *opnfv_releng_ptl
- - name: 'Yolanda Robla Mota'
- company: 'Red Hat'
- email: 'yroblamo@redhat.com'
- id: 'yrobla'
- timezone: 'Europe/Barcelona'
- name: 'Markos Chandras'
company: 'SUSE'
email: 'mchandras@suse.de'
id: 'mchandras'
timezone: 'Europe/London'
- - name: 'Tianwei Wu'
- company: 'Huawei'
- email: 'wutianwei1@huawei.com'
- id: 'hw_wutianwei'
- timezone: 'Asia/Shanghai'
- name: 'Manuel Buil'
company: 'SUSE'
email: 'mbuil@suse.com'
- id: 'mbuild'
+ id: 'mbuil'
timezone: 'Europe/Madrid'
- - name: 'Periyasamy Palanisamy'
- company: 'Ericsson'
- email: 'periyasamy.palanisamy@ericsson.com'
- id: 'epalper'
- timezone: 'Europe/Aachen'
- - name: 'David Blaisonneau'
- company: 'Orange'
- email: 'david.blaisonneau@orange.com'
- id: 'David_Orange'
- timezone: 'Europe/Paris'
+ - name: 'Panagiotis Karalis'
+ company: 'Intracom Telecom'
+ email: 'panos.pkaralis@gmail.com'
+ id: 'pkaralis'
+ timezone: 'Europe/Athens'
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..86ab8c57
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # flake8: noqa
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..305b679e
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: releng-xci
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..f26b0414
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,3 @@
+lfdocs-conf
+sphinxcontrib-httpdomain
+sphinx-opnfv-theme
diff --git a/docs/specs/infra_manager.rst b/docs/specs/infra_manager.rst
new file mode 100644
index 00000000..a8ecb548
--- /dev/null
+++ b/docs/specs/infra_manager.rst
@@ -0,0 +1,130 @@
+PDF and IDF support in XCI
+###########################
+:date: 2018-04-30
+
+This spec introduces the work required to adapt XCI to use PDF and IDF which
+will be used for virtual and baremetal deployments
+
+Definition of Terms
+===================
+* Baremetal deployment: Deployment on physical servers as opposed to deploying
+software on virtual machines or containers running in the same physical server
+
+* Virtual deployment: Deployment on virtual machines, i.e. the servers where
+nodes will be deployed are virtualized. For example, in OpenStack, computes and
+controllers will be virtual machines. This deployment is normally done on just
+one physical server
+
+* PDF: It stands for POD Descriptor File, which is a document that lists the
+hardware characteristics of a set of physical or virtual machines which form
+the infrastructure. Example:
+
+https://git.opnfv.org/pharos/tree/config/pdf/pod1.yaml
+
+* IDF: It stands for Installer Descriptor File, which is a document that
+includes useful information for the installers to accomplish the baremetal
+deployment. Example:
+
+https://git.opnfv.org/fuel/tree/mcp/config/labs/local/idf-pod1.yaml
+
+Problem description
+===================
+
+Currently, XCI only supports virtualized deployments running in one server. This
+is good when the user has limited resources, however, baremetal is the preferred
+way to deploy NFV platforms in lab or production environments. Besides, this
+limits the scope of the testing greatly because we cannot test NFV hardware
+specific features such as SRIOV.
+
+Proposed change
+===============
+
+Introduce the infra_manager tool which will prepare the infrastructure for XCI
+to drive the deployment in a set of virtual or baremetal nodes. This tool will
+execute two tasks:
+
+1 - Creation of virtual nodes or initialization of the preparations for
+baremetal nodes
+2 - OS provisioning on nodes, both virtual or baremetal
+
+Once those steps are ready, XCI will continue with the deployment of the
+scenario on the provisioned nodes.
+
+The infra_manager tool will consume the PDF and IDF files describing the
+infrastructure as input. It will then use a <yet-to-be-created-tool> to do
+step 1 and bifrost to boot the Operating System in the nodes.
+
+Among other services Bifrost uses:
+- Disk image builder (dib) to generate the OS images
+- dnsmasq as the DHCP server which will provide the pxe boot mechanism
+- ipmitool to manage the servers
+
+Bifrost will be deployed inside a VM in the jumphost.
+
+For the time being, we will create the infrastructure based on the defined XCI
+flavors, however, the implementation should not hinder the possibility of
+having one pdf and idf per scenario, defining the characteristics and the
+number of nodes to be deployed.
+
+Code impact
+-----------
+
+The new code will be introduced in a new directory called infra_manager under
+releng-xci/xci/prototypes
+
+Tentative User guide
+--------------------
+
+Assuming the user cloned releng-xci in the jumphost, the following should be
+done:
+
+1 - Move the idf and pdf files which describe the infrastructure to
+releng-xci/xci/prototypes/infra_manager/var. There is an example under xci/var
+
+2 - Export the XCI_FLAVOR variable (e.g. export XCI_FLAVOR=noha)
+
+3 - Run the <yet-to-be-created-tool> to create the virtual nodes based on the
+provided PDF information (cpu, ram, disk...) or initialize the preparations for
+baremetal nodes
+
+4 - Start the bifrost process to boot the nodes
+
+5 - Run the VIM deployer script:
+releng-xci/xci/installer/$inst/deploy.sh
+
+where $inst = {osa, kubespray, kolla}
+
+In case of problems, the best way to debug is accessing the bifrost vm and use:
+
+* bifrost-utils
+* ipmitool
+* check the DHCP messages in /var/log/syslog
+
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ Manuel Buil (mbuil)
+ Jack Morgan (jmorgan1)
+ Somebody_else_please (niceperson)
+
+Work items
+----------
+
+1. Provide support for a dynamically generated inventory based on PDF and IDF.
+This mechanism could be used for both baremetal and virtual deployments.
+
+2. Contribute the servers-prepare.sh script
+
+3. Contribute the nodes-deploy.sh script
+
+4. Integrate the three previous components correctly
+
+5. Provide support for the XCI supported operating systems (opensuse, Ubuntu,
+centos)
+
+6. Allow pdf and idf per scenario
diff --git a/docs/specs/k8-calico-onap.rst b/docs/specs/k8-calico-onap.rst
new file mode 100644
index 00000000..445e5c71
--- /dev/null
+++ b/docs/specs/k8-calico-onap.rst
@@ -0,0 +1,141 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. Copyright 2018 Intel Corporation
+
+.. Links
+.. _Open Networking Automation Platform: https://www.onap.org/
+.. _ONAP metric analysis: https://onap.biterg.io/
+.. _ONAP on Kubernetes: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_quickstart_guide.html
+.. _Helm: https://docs.helm.sh/
+.. _ONAP on OpenStack: https://wiki.onap.org/display/DW/ONAP+Installation+in+Vanilla+OpenStack
+.. _OOM Minimum Hardware Configuration: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_cloud_setup_guide.html#minimum-hardware-configuration
+.. _OOM Software Requirements: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_cloud_setup_guide.html#software-requirements
+.. _seed code: https://gitlab.com/Orange-OpenSource/onap_oom_automatic_installation
+.. _Orange ONAP OOM Deployment Resource Requirements: https://gitlab.com/Orange-OpenSource/kubespray_automatic_installation/blob/521fa87b20fdf4643f30fc28e5d70bdf9f1c98f3/vars/pdf.yaml
+
+This spec introduces the work required to include the XCI scenario
+for `Open Networking Automation Platform`_ (ONAP) through the ONAP
+Operations Manager(OOM) tool. This tool provides the ability to manage
+the entire life-cycle of an ONAP installation on top of a Kubernetes
+deployment.
+
+Problem description
+===================
+According to the `ONAP metric analysis`_, more than 26K commit
+changes have been submited since its announcement. Every patchset
+that is merged raises a Jenkins Job for the creation and deployment
+of a Docker container image for the corresponding service. Those new
+images are consumed by deployment methods like `ONAP on Kubernetes`_
+and `ONAP on OpenStack`_) during the installation of ONAP services.
+
+Given that ONAP is constantly changing, an early issue detected can
+be crucial for ensuring the proper operation of OOM tool.
+
+Minimum Hardware Requirements
+=============================
+
+Initially, No HA flavor will be the only supported flavor in order to
+bring a reference implementation of the scenario. Support for other
+flavors will be introduced based on this implementation.
+
+According to the `OOM Minimum Hardware Configuration`_, ONAP requires
+large amount of resources, especially on Kubernetes Worker nodes.
+
+Given that No HA flavor has multiple worker nodes, the containers can
+be distributed between the nodes resulting in a smaller footprint of
+of resources.
+
+The No HA scenario consists of 1 Kubernetes master node and 2 Kubernetes
+Worker nodes. Total resource requirements should be calculated based on
+the number of nodes.
+
+This recommendation is work in progress and based on Orange
+implementation which can be seen from
+`Orange ONAP OOM Deployment Resource Requirements`_.
+The resource requirements are subject to change and the scenario will
+be updated as necessary.
+
+Hardware for Kubernetes Master Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 8GB
+* HD: 150GB
+* vCores: 8
+
+Hardware for Kubernetes Worker Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 64GB
+* HD: 80GB
+* vCores: 16
+
+Proposed change
+===============
+
+In order to guarantee the proper installation and validation of ONAP
+services, this spec proposes two phases that complements each other:
+
+1. Creation k8-calico-onap scenario for the installation of ONAP
+services. This new scenario will be designed to validate the
+installation process provided by OOM tool.
+2. Adding Integration tests for ensuring that ONAP is operating
+properly. This process should cover Design and Runtime phases.
+
+Code impact
+-----------
+New code will be created based on the existing k8-calico-nofeature
+scenario and will be placed in scenarios/k8-calico-onap directory
+in releng-xci-scenario repo. The ONAP installation should proceed
+once the VIM has been installed and before the OPNFV tests run.
+
+
+The default configuration for the virtual resources (4 vCores, 8GB RAM,
+and 100GB HD) offered by XCI does not satisfy the ONAP needs. The
+scenario override mechanism will be used to bring up nodes with
+the necessary amount of resources. This will be replaced by PDF and
+IDF once they become available. PDF and IDF implementation is a
+separate work item and it is not expected as dependency for the
+implementation of this scenario.
+
+Software Requirements
+---------------------
+
+OOM has gone through significant changes during Beijing release
+cycle. This resulted in changed way of installing ONAP.
+
+In its current release, new software is necessary to install ONAP
+as listed below and on `OOM Software Requirements`_..
+
+Helm: 2.8.x
+kubectl: 1.8.10
+
+The OOM also provides a Makefile that collects instructions for the
+creation of ONAP packages into the Tiller repository. To determine
+which ONAP services are going to be enabled, this configuration can
+be done by the OOM configuration, this new role will be placed in
+scenarios/k8-calico-onap/role/k8-calico-onap/tasks folder in
+releng-xci-scenario repository.
+
+Tentative User guide
+--------------------
+TBD
+
+Implementation
+==============
+The Orange team has been working on this scenario for a while, this
+new role can use and adapt their `seed code`_ during the implementation.
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ Victor Morales (electrocucaracha)
+ Fatih Degirmenci (fdegir)
+ Jack Morgan (jmorgan1)
+
+Work items
+----------
+TBD
+
+Glossary
+--------
diff --git a/docs/specs/k8-odl-coe.rst b/docs/specs/k8-odl-coe.rst
new file mode 100644
index 00000000..cd29456c
--- /dev/null
+++ b/docs/specs/k8-odl-coe.rst
@@ -0,0 +1,105 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. Copyright 2018 Ericsson AB and Others
+
+.. Links
+.. _OpenDaylight COE: https://wiki.opendaylight.org/view/COE:Main
+.. _setting-up-coe-dev-environment: https://github.com/opendaylight/coe/blob/master/docs/setting-up-coe-dev-environment.rst
+.. _ansible-opendaylight: https://git.opendaylight.org/gerrit/gitweb?p=integration/packaging/ansible-opendaylight.git;a=tree
+
+This spec proposes adding an k8-odl-coe XCI scenario for OpenDaylight as the
+networking provider for Kubernetes using the OpenDaylight COE (Container
+Orchestration Engine) and NetVirt projects.
+
+Problem Description
+===================
+
+Currently OpenDaylight's advanced networking capabilities are not leveraged
+with Kubernetes in any scenarios. This spec proposes a reference platform for
+deployments that want to use OpenDaylight as a networking backend for
+Kubernetes.
+
+Minimum Hardware Requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Hardware for Kubernetes Master Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 16 GB (20 GB for ha flavor i.e. for OpenDaylight Clustering)
+* HD: 80 GB
+* vCores: 6
+
+Hardware for Kubernetes Worker Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 12 GB
+* HD: 80 GB
+* vCores: 6
+
+Supported XCI Sandbox Flavors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This scenario will support deployments on Mini, No HA and HA XCI Sandbox Flavors.
+
+Proposed Change
+===============
+
+1. Provide Pod Descriptor Files (PDF) and IDF (Installer Descriptor Files)
+ specific to this scenario to install Kubernetes with OpenDaylight COE.
+2. Introduce a new scenario k8-odl-coe in releng-xci-scenarios repository.
+3. Reuse the role from k8-nosdn-nofeature scenario to install Kubernetes.
+ It has kube_network_plugin option to 'cloud' in k8s-cluster.yml so that
+ Kubespray doesn't configure networking between pods. This enables
+ OpenDaylight to be chosen as a networking backend in steps 4-7.
+4. Enhance upstream `ansible-opendaylight`_ role to deploy OpenDaylight with
+ COE Watcher on k8s master node(s) and CNI plugin on the k8s master and
+ worker node(s).
+5. Add the required Ansible tasks in k8-odl-coe role to direct XCI and
+ ansible-opendaylight role to configure k8s with OpenDaylight as the
+ networking backend for pod connectivity.
+6. Run the Health Check by testing the pods' connectivity.
+
+The COE Watcher binary and COE CNI plugin are built from OpenDaylight COE
+source code. The user will have flexibility to choose its SHA from XCI's
+ansible-role-requirements.yml file.
+
+Code Impact
+-----------
+
+Code specific to the k8-odl-coe scenario will be added to the xci/scenarios
+directory of the releng-xci-scenarios repository.
+
+User Guide
+----------
+
+No user guide will be provided.
+
+Implementation
+==============
+
+See the Proposed Change section.
+
+Assignee(s)
+-----------
+
+Primary assignees:
+
+* Prem Sankar G (premsa)
+* Periyasamy Palanisamy (epalper)
+* Fatih Degirmenci (fdegir)
+
+Work Items
+----------
+
+1. Enhance the akka.conf.j2 in upstream ansible-opendaylight role to work
+ with k8s deployments (i.e. run ODL cluster on k8s master nodes).
+ Currently this works only for the deployments based on Openstack-Ansible.
+2. Enhance upstream ansible-opendaylight role to install odl-netvirt-coe and
+ odl-restconf Karaf features, build COE watcher and CNI plugin binaries
+ from source.
+3. Implement configure-kubenet.yml to choose OpenDaylight COE as the
+ networking backend.
+4. Implement Health Check tests.
+
+Glossary
+--------
diff --git a/docs/xci-overview.rst b/docs/xci-overview.rst
index 575eb37c..9b225ec1 100644
--- a/docs/xci-overview.rst
+++ b/docs/xci-overview.rst
@@ -138,7 +138,7 @@ Multi-distro Support
--------------------
Giving choice and not imposing things on developers and users are two
-of the important aspects of XCI. This means that if they want to have all in one
+of the important aspects of XCI. This means that if they want to have smaller
deployments, they should be able to do that by using
:ref:`different flavors <sandbox-flavors>` provided by XCI.
diff --git a/docs/xci-user-guide.rst b/docs/xci-user-guide.rst
index 8f506fc4..5e76ca16 100644
--- a/docs/xci-user-guide.rst
+++ b/docs/xci-user-guide.rst
@@ -97,11 +97,6 @@ Available flavors are listed on the table below.
+------------------+------------------------+---------------------+--------------------------+--------------------------+
| Flavor | Number of VM Nodes | VM Specs Per Node | Time Estimates Openstack | Time Estimates Kubernetes|
+==================+========================+=====================+==========================+==========================+
-| All in One (aio) | | 1 VM Node | | vCPUs: 8 | | Provisioning: 10 mins | | Provisioning: 10 mins |
-| | | controller & compute | | RAM: 12GB | | Deployment: 90 mins | | Deployment: 30 mins |
-| | | on single/same node | | Disk: 80GB | | Total: 100 mins | | Total: 40 mins |
-| | | 1 compute node | | NICs: 1 | | | | |
-+------------------+------------------------+---------------------+--------------------------+--------------------------+
| Mini | | 3 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins | | Provisioning: 12 mins |
| | | 1 deployment node | | RAM: 12GB | | Deployment: 65 mins | | Deployment: 35 mins |
| | | 1 controller node | | Disk: 80GB | | Total: 77 mins | | Total: 47 mins |
@@ -150,14 +145,6 @@ ongoing.
The differences between the flavors are documented below.
-**All in One**
-
-As shown on the table in the previous section, this flavor consists of a single
-node. All the OpenStack services, including compute run on the same node.
-
-The flavor All in One (aio) is deployed based on the process described in the
-upstream documentation. Please check `OpenStack Ansible Developer Quick Start <https://docs.openstack.org/openstack-ansible/pike/contributor/quickstart-aio.html>`_ for details.
-
**Mini/No HA/HA**
These flavors consist of multiple nodes.
@@ -184,12 +171,6 @@ are supported currently
The differences between the flavors are documented below.
-**All in One**
-
-As shown on the table in the previous section, this flavor consists of a single
-node. All the kubernetes services run on the same node, which acts as master
-and worker at the same time.
-
**Mini/No HA/HA**
These flavors consist of multiple nodes.
@@ -257,7 +238,7 @@ How to Use
| ``./xci-deploy.sh``
Issuing above command will start the sandbox deployment using the default
-flavor ``aio`` and the verified versions of upstream components.
+flavor ``mini`` and the verified versions of upstream components.
(`pinned-versions <https://git.opnfv.org/releng-xci/tree/xci/config/pinned-versions>`_).
The sandbox should be ready between 1,5 and 2 hours depending on the host
machine.
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..6aa16066
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,25 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = docs,docs-linkcheck
+skipsdist = True
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+
+[testenv:docs]
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/linkcheck
diff --git a/xci/README.rst b/xci/README.rst
index d7555d46..a18d92ee 100644
--- a/xci/README.rst
+++ b/xci/README.rst
@@ -160,6 +160,37 @@ execute sandbox script
./xci-deploy.sh
+Baremetal Usage
+--------------
+
+The previous deployments are based on VMs, i.e. controllers and computes are
+VMs. It is also possible to deploy on baremetal and for that a pdf and idf file
+which describes the hardware needs to be provided to the sandbox script:
+
+clone OPNFV releng-xci repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+change into directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+set the sandbox flavor
+
+ export XCI_FLAVOR=noha
+
+set the version to use for openstack-ansible
+
+ export OPENSTACK_OSA_VERSION=master
+
+set where the logs should be stored
+
+ export LOG_PATH=/home/jenkins/xcilogs
+
+execute sandbox script
+
+ ./xci-deploy.sh -i var/ericsson-pod2-idf.yml -p var/ericsson-pod2-pdf.yml
+
==============
User Variables
==============
diff --git a/xci/config/aio-vars b/xci/config/aio-vars
index 1d2e4f96..cff181a9 100755
--- a/xci/config/aio-vars
+++ b/xci/config/aio-vars
@@ -9,8 +9,8 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=1
-export TEST_VM_NODE_NAMES=opnfv
+export NUM_NODES=1
+export NODE_NAMES=opnfv
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-8}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/env-vars b/xci/config/env-vars
index bf333bdf..a90e8533 100755
--- a/xci/config/env-vars
+++ b/xci/config/env-vars
@@ -8,10 +8,16 @@ export OPNFV_RELENG_GIT_URL=${OPNFV_RELENG_GIT_URL:-https://gerrit.opnfv.org/ger
export OPENSTACK_BIFROST_GIT_URL=${OPENSTACK_BIFROST_GIT_URL:-https://git.openstack.org/openstack/bifrost}
export OPENSTACK_OSA_GIT_URL=${OPENSTACK_OSA_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible}
export OPENSTACK_OSA_OPENRC_GIT_URL=${OPENSTACK_OSA_OPENRC_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-openstack_openrc}
-export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-incubator/kubespray.git}
+export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-sigs/kubespray.git}
+export OSH_GIT_URL=${OSH_GIT_URL:-https://github.com/openstack/openstack-helm.git}
+export OSH_INFRA_GIT_URL=${OSH_INFRA_GIT_URL:-https://github.com/openstack/openstack-helm-infra.git}
export OPENSTACK_OSA_HAPROXY_GIT_URL=${OPENSTACK_OSA_HAPROXY_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-haproxy_server}
export KEEPALIVED_GIT_URL=${KEEPALIVED_GIT_URL:-https://github.com/evrardjp/ansible-keepalived}
+export OSH_HELM_BINARY_URL=${OSH_HELM_BINARY_URL:-https://storage.googleapis.com/kubernetes-helm}
+export OSH_HELM_BINARY_VERSION=${OSH_HELM_BINARY_VERSION:-v2.13.1}
+
+
# Configuration
export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
export OPNFV_HOST_IP=192.168.122.2
@@ -28,6 +34,8 @@ export XCI_PLAYBOOKS=${XCI_PATH}/xci/playbooks
# Functest parameters
export FUNCTEST_MODE=${FUNCTEST_MODE:-"tier"}
export FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME:-"healthcheck"}
+# TODO: Investigate and fix why the env var FUNCTEST_VERSION set by Jenkins job doesn't take effect
+export FUNCTEST_VERSION=${FUNCTEST_VERSION:-"hunter"}
# CI paremeters
export CI_LOOP=${CI_LOOP:-"daily"}
@@ -45,12 +53,15 @@ export LOG_PATH=${LOG_PATH:-${XCI_PATH}/xci/logs}
# This currently matches to OSA Ansible version but it doesn't really
# matter since bifrost and OSA will use the Ansible version they need.
# Overall, it's better to use what OSA supports so we can use new features.
-export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")}
+# OSA currently has 2.5.5 which breaks due to missing
+# https://github.com/ansible/ansible/commit/67859c3476501d5d9839fd904aec55468d09593a
+# This was fixed in 2.5.6 so remove the pin when OSA updates to newer version.
+#export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")}
+export XCI_ANSIBLE_PIP_VERSION="2.7.8"
+
export ANSIBLE_HOST_KEY_CHECKING=False
-# subject of the certificate
-export XCI_SSL_SUBJECT=${XCI_SSL_SUBJECT:-"/C=US/ST=California/L=San Francisco/O=IT/CN=xci.releng.opnfv.org"}
export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature"}
-# Kubespray requires that ansible version is 2.4.0.0
-export XCI_KUBE_ANSIBLE_PIP_VERSION=2.4.0.0
+# attempt to sync Ansible version used by Kubespray with the rest
+export XCI_KUBE_ANSIBLE_PIP_VERSION=$XCI_ANSIBLE_PIP_VERSION
# OpenStack global requirements version
export OPENSTACK_REQUIREMENTS_VERSION=${OPENSTACK_REQUIREMENTS_VERSION:-$(awk '/requirements_git_install_branch:/ {print $2}' ${XCI_PATH}/xci/installer/osa/files/openstack_services.yml)}
diff --git a/xci/config/ha-vars b/xci/config/ha-vars
index 131de2a7..4c40fb33 100755
--- a/xci/config/ha-vars
+++ b/xci/config/ha-vars
@@ -9,9 +9,10 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=6
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 master2 master3 node1 node2"
+export NUM_NODES=6
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/mini-vars b/xci/config/mini-vars
index 7d2b227b..aaa4cb88 100755
--- a/xci/config/mini-vars
+++ b/xci/config/mini-vars
@@ -9,9 +9,10 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=3
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 compute00"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 node1"
+export NUM_NODES=3
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/noha-vars b/xci/config/noha-vars
index 8d30a243..e887ddb8 100755
--- a/xci/config/noha-vars
+++ b/xci/config/noha-vars
@@ -9,9 +9,10 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=4
-[[ "$INSTALLER_TYPE" == "osa" ]] && export TEST_VM_NODE_NAMES="opnfv controller00 compute00 compute01"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && export TEST_VM_NODE_NAMES="opnfv master1 node1 node2"
+export NUM_NODES=4
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 72a0ff61..440972ae 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -25,24 +25,31 @@
#-------------------------------------------------------------------------------
# use releng-xci from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# HEAD of bifrost "master" as of 13.02.2018
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"28b6b8c96f89532bbddeca513285e6c00db89205"}
-# HEAD of ironic "master" as of 13.02.2018
-export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"9b8440aa318e4883a74ef8640ad5409dd22858a9"}
-# HEAD of ironic-client "master" as of 13.02.2018
-export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"1da269b0e99601f8f6395b2ce3f436f5600e8140"}
-# HEAD of ironic-inspector "master" as of 13.02.2018
-export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"84da941fafb905c2debdd9a9ba68ba743af3ce8a"}
-# HEAD of ironic-inspector-client "master" as of 13.02.2018
-export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"b73403fdad3165cfcccbf4b0330d426ae5925e01"}
-# HEAD of osa "stable/queens" as of 03.04.2018
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"90d0679d209cb494b9a71817c56e2c26c7fc5ca1"}
+# HEAD of bifrost "master" as of 02.07.2019
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"cd559480c95867d272b8a32240e50c390646665b"}
+# HEAD of ironic "master" as of 02.07.2019
+export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"1beb8068f95f90a570c72b82f6e518110312b696"}
+# HEAD of ironic-client "master" as of 02.07.2019
+export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"eae60397bfcbed322b2121f77c35ac74d0c6b74c"}
+# HEAD of ironic-inspector "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"0b38536d1c9ab92952e6ecd069ea13facf012830"}
+# HEAD of ironic-inspector-client "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"81ae133bd570ea7359b4797ee5699d2d4233b445"}
+# HEAD of osa "stable/rocky" as of 04.01.2019
+export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"2087cd98f28b35f655ca398d25d2a6c71e38328e"}
+export OPENSTACK_OSH_VERSION="rocky"
+# HEAD of osh "master" as of 17.07.2019
+export OSH_VERSION=${OSH_VERSION:-"dadf9946e076df2b09556f4a18107dc487788cdd"}
+# HEAD of osh-infra "master" as of 16.07.2019
+export OSH_INFRA_VERSION=${OSH_INFRA_VERSION:-"e96bdd9fb6235573acf5d4d1d019dca1e1446b7d"}
export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \
${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
-# HEAD of kubspray "master" as of 27.02.2018
-# kubespray's bug Reference: https://github.com/kubernetes-incubator/kubespray/issues/2400
-export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"5d9bb300d716880610c34dd680c167d2d728984d"}
+# Kubespray release v2.11.0 dated 31.08.2019
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"v2.11.0"}
+# Kubernetes version supported by the pinned kubespray version
+# this is needed for pulling in kubectl
+export KUBERNETES_VERSION=${KUBERNETES_VERSION:-"v1.15.3"}
diff --git a/xci/config/user-vars b/xci/config/user-vars
index 1554777d..d3d7b2f1 100755
--- a/xci/config/user-vars
+++ b/xci/config/user-vars
@@ -20,8 +20,8 @@
# or
# export XCI_FLAVOR="ha"
#-------------------------------------------------------------------------------
-export XCI_FLAVOR=${XCI_FLAVOR:-aio}
-export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
+export XCI_FLAVOR=${XCI_FLAVOR:-mini}
+export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; ID=${ID%%-*}; echo ${ID,,})}
export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
#-------------------------------------------------------------------------------
@@ -34,6 +34,14 @@ export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
# export INSTALLER_TYPE="kubespray"
export INSTALLER_TYPE=${INSTALLER_TYPE:-osa}
+#Wait upstream in openstack-helm (OSH) to support opensuse
+if [ "$XCI_DISTRO" == "opensuse" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export XCI_DISTRO=ubuntu-bionic
+ export OSH_DISTRO=opensuse
+elif [ "$XCI_DISTRO" == "ubuntu" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export OSH_DISTRO=ubuntu
+fi
+
#-------------------------------------------------------------------------------
# Set DEPLOYMENT
#-------------------------------------------------------------------------------
@@ -53,6 +61,6 @@ export INFRA_DEPLOYMENT=${INFRA_DEPLOYMENT:-bifrost}
export XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-""}
export RUN_TEMPEST=${RUN_TEMPEST:-false}
export CORE_OPENSTACK_INSTALL=${CORE_OPENSTACK_INSTALL:-false}
-export BIFROST_USE_PREBUILT_IMAGES=${BIFROST_USE_PREBUILT_IMAGES:-false}
+export BIFROST_CREATE_IMAGE_VIA_DIB=${BIFROST_CREATE_IMAGE_VIA_DIB:-true}
# Set this to to true to force XCI to re-create the target OS images
export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
diff --git a/xci/files/requirements.yml b/xci/files/requirements.yml
index a1b7feb3..1e097b09 100644
--- a/xci/files/requirements.yml
+++ b/xci/files/requirements.yml
@@ -7,4 +7,4 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- src: peru.proxy_settings
+- src: ruzickap.proxy_settings
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index 97b76c7c..058d6569 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -21,27 +21,29 @@ rm -rf /opt/stack
# HOME is normally set by sudo -H
rm -rf ${HOME}/.config/openstack
rm -rf ${HOME}/.ansible
+# keepalived role fails ansible lint when cached
+rm -rf ${HOME}/releng-xci/xci/playbooks/roles/keepalived
+# Wipe repos
+rm -rf ${XCI_CACHE}/repos
-# bifrost installs everything on venv so we need to look there if virtualbmc is not installed on the host.
-if which vbmc &>/dev/null || { [[ -e ${XCI_VENV}/bifrost/bin/activate ]] && source ${XCI_VENV}/bifrost/bin/activate; }; then
+if which ${XCI_VENV}/bin/vbmc &>/dev/null; then
# Delete all libvirt VMs and hosts from vbmc (look for a port number)
- for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
+ for vm in $(${XCI_VENV}/bin/vbmc list | awk '/[0-9]/{{ print $2 }}'); do
if which virsh &>/dev/null; then
- virsh destroy $vm &>/dev/null || true
- virsh undefine $vm &>/dev/null || true
+ virsh destroy $vm || true
+ virsh undefine $vm || true
fi
- vbmc delete $vm
+ ${XCI_VENV}/bin/vbmc delete $vm
done
- which vbmc &>/dev/null || { [[ -e /opt/stack/bifrost/bin/activate ]] && deactivate; }
fi
# Destroy all XCI VMs on all flavors
for varfile in ${flavors[@]}; do
source ${XCI_PATH}/xci/config/${varfile}-vars
- for vm in ${TEST_VM_NODE_NAMES}; do
+ for vm in ${NODE_NAMES}; do
if which virsh &>/dev/null; then
- virsh destroy $vm &>/dev/null || true
- virsh undefine $vm &>/dev/null || true
+ virsh destroy $vm &> /dev/null || true
+ virsh undefine $vm &> /dev/null || true
fi
done
done
@@ -82,5 +84,7 @@ service ironic-conductor start || true
service ironic-inspector restart || true
rm -rf ${XCI_VENV}
+# We also need to clear up previous vbmc config dirs
+rm -rf ${HOME}/.vbmc
# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/files/install-lib.sh b/xci/files/xci-lib.sh
index 14acab1b..860153b9 100644
--- a/xci/files/install-lib.sh
+++ b/xci/files/xci-lib.sh
@@ -7,15 +7,61 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# NOTE(hwoarang): Most parts of this this file were taken from the
-# bifrost repository (scripts/install-deps.sh). This script contains all
-# the necessary distro specific code to install ansible and it's dependencies.
+# Avoid double sourcing the file
+[[ -n ${XCI_LIB_SOURCED:-} ]] && return 0 || export XCI_LIB_SOURCED=1
+
+function usage() {
+ echo "
+Usage: $(basename ${0}) [-i <idf>] [-p <pdf>]
+
+ -h: This message
+ -i: Installer Descriptor File (IDF). (Default ${XCI_PATH}/xci/var/idf.yml)
+ -p: Pod Descriptor File (PDF). (Default ${XCI_PATH}/xci/var/pdf.yml)
+ "
+ exit 0
+}
+
+function parse_cmdline_opts() {
+ IDF=${XCI_PATH}/xci/var/idf.yml
+ PDF=${XCI_PATH}/xci/var/pdf.yml
+
+ while getopts ":hi:p:" o; do
+ case "${o}" in
+ i) IDF="${OPTARG}" ;;
+ p) PDF="${OPTARG}" ;;
+ h) usage ;;
+ *) echo "ERROR: Invalid option '-${OPTARG}'"; usage ;;
+ esac
+ done
+
+ # Do all the exports
+ export PDF=$(realpath ${PDF})
+ export IDF=$(realpath ${IDF})
+}
+
+function bootstrap_xci_env() {
+ # Declare our virtualenv
+ export XCI_VENV=${XCI_PATH}/venv/
+ # source user vars
+ source $XCI_PATH/xci/config/user-vars
+ # source pinned versions
+ source $XCI_PATH/xci/config/pinned-versions
+ # source flavor configuration
+ source "$XCI_PATH/xci/config/${XCI_FLAVOR}-vars"
+ # source installer configuration
+ source "$XCI_PATH/xci/installer/${INSTALLER_TYPE}/env" &>/dev/null || true
+ # source xci configuration
+ source $XCI_PATH/xci/config/env-vars
+ # baremetal variable to true if the vendor in the pdf is not libvirt
+ grep -o vendor.* ${PDF} | grep -q libvirt && export BAREMETAL=false || export BAREMETAL=true
+}
function install_ansible() {
set -eu
# Use the upper-constraints file from the pinned requirements repository.
local uc="https://raw.githubusercontent.com/openstack/requirements/${OPENSTACK_REQUIREMENTS_VERSION}/upper-constraints.txt"
+ local osa_uc="https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/global-requirement-pins.txt"
local install_map
declare -A PKG_MAP
@@ -32,13 +78,15 @@ function install_ansible() {
net-tools
python-devel
python
+ python-pyyaml
venv
wget
+ curl
)
source /etc/os-release || source /usr/lib/os-release
case ${ID,,} in
- *suse)
+ *suse*)
OS_FAMILY="Suse"
INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
CHECK_CMD="zypper search --match-exact --installed"
@@ -52,8 +100,10 @@ function install_ansible() {
[pip]=python-pip
[python]=python
[python-devel]=python-devel
+ [python-pyyaml]=python-PyYAML
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
EXTRA_PKG_DEPS=( python-xml )
sudo zypper -n ref
@@ -79,11 +129,13 @@ function install_ansible() {
[pip]=python-pip
[python]=python-minimal
[python-devel]=libpython-dev
+ [python-pyyaml]=python-yaml
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
EXTRA_PKG_DEPS=( apt-utils )
- sudo apt-get update
+ sudo apt-get update -qq > /dev/null
;;
rhel|fedora|centos)
@@ -101,10 +153,12 @@ function install_ansible() {
[pip]=python2-pip
[python]=python
[python-devel]=python-devel
+ [python-pyyaml]=PyYAML
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
- sudo $PKG_MANAGER updateinfo
+ sudo $PKG_MANAGER updateinfo > /dev/null
EXTRA_PKG_DEPS=( deltarpm )
;;
@@ -118,14 +172,7 @@ function install_ansible() {
install_map+=(${EXTRA_PKG_DEPS[@]} )
- ${INSTALLER_CMD} ${install_map[@]}
-
- # Note(cinerama): If pip is linked to pip3, the rest of the install
- # won't work. Remove the alternatives. This is due to ansible's
- # python 2.x requirement.
- if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
- sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
- fi
+ ${INSTALLER_CMD} ${install_map[@]} > /dev/null
# We need to prepare our virtualenv now
virtualenv --quiet --no-site-packages ${XCI_VENV}
@@ -134,7 +181,8 @@ function install_ansible() {
set -u
# We are inside the virtualenv now so we should be good to use pip and python from it.
- pip -q install --upgrade -c $uc ara virtualenv pip setuptools ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
+ pip -q install --upgrade pip==9.0.3 # We need a version which supports the '-c' parameter
+ pip -q install --upgrade -c $uc -c $osa_uc ara==0.16.4 virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
ara_location=$(python -c "import os,ara; print(os.path.dirname(ara.__file__))")
export ANSIBLE_CALLBACK_PLUGINS="/etc/ansible/roles/plugins/callback:${ara_location}/plugins/callbacks"
@@ -142,11 +190,9 @@ function install_ansible() {
ansible_lint() {
set -eu
- # Use the upper-constraints file from the pinned requirements repository.
- local uc="https://raw.githubusercontent.com/openstack/requirements/${OPENSTACK_REQUIREMENTS_VERSION}/upper-constraints.txt"
- local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks)
+ local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks xci/installer/osh/playbooks)
# Extract role from scenario information
- local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev)
+ local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}$/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev)
# clear XCI_CACHE
rm -rf ${XCI_CACHE}/repos/openstack-ansible-tests
@@ -195,4 +241,58 @@ collect_xci_logs() {
sudo -H -E bash -c 'chown ${SUDO_UID}:${SUDO_GID} -R ${LOG_PATH}/'
}
+submit_bug_report() {
+ cd ${XCI_PATH}
+ echo ""
+ echo "-------------------------------------------------------------------------"
+ echo "Oh nooooo! The XCI deployment failed miserably :-("
+ echo ""
+ echo "If you need help, please choose one of the following options"
+ echo "* #opnfv-pharos @ freenode network"
+ echo "* opnfv-tech-discuss mailing list (https://lists.opnfv.org/mailman/listinfo/opnfv-tech-discuss)"
+ echo " - Please prefix the subject with [XCI]"
+ echo "* https://jira.opnfv.org (Release Engineering project)"
+ echo ""
+ echo "Do not forget to submit the following information on your bug report:"
+ echo ""
+ git diff --quiet && echo "releng-xci tree status: clean" || echo "releng-xci tree status: local modifications"
+ echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
+ echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+ echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+ echo "xci flavor: $XCI_FLAVOR"
+ echo "xci installer: $INSTALLER_TYPE"
+ echo "xci scenario: $DEPLOY_SCENARIO"
+ echo "Environment variables:"
+ env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\|BIFROST\|DIB\)'
+ echo "-------------------------------------------------------------------------"
+}
+
+log_xci_information() {
+ local scenario_version scenario_sha
+
+ cd ${XCI_SCENARIOS_CACHE}/${DEPLOY_SCENARIO}
+ scenario_sha=$(git rev-parse HEAD)
+ scenario_version=$(git describe --exact 2>/dev/null || echo "master")
+ cd -
+ echo "Info: Starting XCI Deployment"
+ echo "Info: Deployment parameters"
+ echo "-------------------------------------------------------------------------"
+ echo "OPNFV scenario: $DEPLOY_SCENARIO"
+ echo "Scenario version: ${scenario_version} (sha: ${scenario_sha})"
+ echo "xci flavor: $XCI_FLAVOR"
+ echo "xci installer: $INSTALLER_TYPE"
+ echo "infra deployment: $INFRA_DEPLOYMENT"
+ echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
+ [[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+ [[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+ [[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ [[ "$INSTALLER_TYPE" == "osh" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ echo "-------------------------------------------------------------------------"
+}
+
+exit_trap() {
+ submit_bug_report
+ collect_xci_logs
+}
+
# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/infra/bifrost/infra-provision.sh b/xci/infra/bifrost/infra-provision.sh
index 9c3adfc2..b0617733 100644
--- a/xci/infra/bifrost/infra-provision.sh
+++ b/xci/infra/bifrost/infra-provision.sh
@@ -1,3 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
#-------------------------------------------------------------------------------
# Start provisioning VM nodes
#-------------------------------------------------------------------------------
@@ -8,14 +16,71 @@
# - destroys VMs, removes ironic db, leases, logs
# - creates and provisions VMs for the chosen flavor
#-------------------------------------------------------------------------------
+
BIFROST_ROOT_DIR="$(dirname $(realpath ${BASH_SOURCE[0]}))"
+export ANSIBLE_ROLES_PATH="$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles:${XCI_CACHE}/repos/bifrost/playbooks/roles"
+export ANSIBLE_LIBRARY="$HOME/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:${XCI_CACHE}/repos/bifrost/playbooks/library"
-echo "Info: Starting provisining VM nodes using openstack/bifrost"
+echo "Info: Create XCI VM resources"
echo "-------------------------------------------------------------------------"
-cd $BIFROST_ROOT_DIR/playbooks/
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -i "localhost," bootstrap-bifrost.yml
-cd ${XCI_CACHE}/repos/bifrost
-bash ./scripts/bifrost-provision.sh
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -e num_nodes=${NUM_NODES} \
+ -e vm_domain_type=${VM_DOMAIN_TYPE} \
+ -e baremetal_json_file=/tmp/baremetal.json \
+ -e xci_distro=${XCI_DISTRO} \
+ -e pdf_file=${PDF} \
+ -e idf_file=${IDF} \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-setup-nodes.yml
+
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --private-key=${XCI_PATH}/xci/scripts/vm/id_rsa_for_dib \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-prepare-env.yml
+
+source ${XCI_CACHE}/repos/bifrost/scripts/bifrost-env.sh
+
+# This is hardcoded to delegate to localhost but we really need to delegate to opnfv instead.
+sed -i "/delegate_to:/d" ${XCI_CACHE}/repos/bifrost/playbooks/roles/bifrost-deploy-nodes-dynamic/tasks/main.yml
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ -e use_cirros=false \
+ -e testing_user=root \
+ -e test_vm_num_nodes=${NUM_NODES} \
+ -e test_vm_cpu='host-model' \
+ -e inventory_dhcp=${BIFROST_INVENTORY_DHCP} \
+ -e inventory_dhcp_static_ip=false \
+ -e enable_inspector=true \
+ -e inspect_nodes=true \
+ -e download_ipa=${BIFROST_DOWNLOAD_IPA} \
+ -e create_ipa_image=${BIFROST_CREATE_IPA} \
+ -e write_interfaces_file=true \
+ -e ipv4_gateway=192.168.122.1 \
+ -e wait_timeout=3600 \
+ -e enable_keystone=false \
+ -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION:-master} \
+ -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master} \
+ -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION:-master} \
+ -e ironic_git_branch=${BIFROST_IRONIC_VERSION:-master} \
+ -e create_image_via_dib=${BIFROST_CREATE_IMAGE_VIA_DIB:-true} \
+ -e xci_distro=${XCI_DISTRO} \
+ -e ironic_url="http://192.168.122.2:6385/" \
+ ${BIFROST_ROOT_DIR}/playbooks/opnfv-virtual.yml
+
+
+if [ "${BAREMETAL}" = true ]; then
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/wait-for-baremetal.yml
+fi
+
echo "-----------------------------------------------------------------------"
echo "Info: VM nodes are provisioned!"
echo "-----------------------------------------------------------------------"
diff --git a/xci/infra/bifrost/playbooks/opnfv-virtual.yaml b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
index bb0daff6..f97eae4b 100644
--- a/xci/infra/bifrost/playbooks/opnfv-virtual.yaml
+++ b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
@@ -7,11 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-- hosts: localhost
- connection: local
+- hosts: opnfv
name: "Host and Ironic bootstrapping"
become: yes
gather_facts: yes
+ vars_files:
+ - "../vars/{{ ansible_os_family | lower }}.yml"
pre_tasks:
- name: Remove pre-existing leases file
file: path=/var/lib/misc/dnsmasq.leases state=absent
@@ -51,7 +52,20 @@
mode: '0755'
owner: 'root'
group: 'root'
- when: use_prebuilt_images | bool == true
+ when: create_image_via_dib | bool == false
+ - name: Ensure /etc/hosts has good defaults
+ lineinfile:
+ create: yes
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
+ - name: Install required packages
+ package:
+ name: "{{ bifrost_required_devel_packages }}"
+ state: present
roles:
- role: bifrost-prep-for-install
@@ -59,20 +73,20 @@
- role: bifrost-keystone-install
- role: bifrost-ironic-install
cleaning: false
- testing: true
- # NOTE(TheJulia): While the next step creates a ramdisk, some elements
- # do not support ramdisk-image-create as they invoke steps to cleanup
- # the ramdisk which causes ramdisk-image-create to believe it failed.
+ testing: false
+ enabled_hardware_types: ipmi
+ network_interface: "{{ ansible_default_ipv4.interface }}"
+ # Create the IPA image for ironic to boot the nodes and write the final distro in the hard drive
+ # fedora is used because it is the only one working with ericsson-pod2 (it has support for newer hardware)
- role: bifrost-create-dib-image
dib_imagename: "{{ http_boot_folder }}/ipa"
build_ramdisk: false
- dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
- dib_os_release: "jessie"
+ dib_os_element: "{{ ipa_dib_os_element|default('fedora') }}"
dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
dib_notmpfs: true
when:
- create_ipa_image | bool == true
- - not use_prebuilt_images | bool == false
+ # Create the final distro image
- role: bifrost-create-dib-image
dib_imagetype: "qcow2"
dib_imagename: "{{deploy_image}}"
@@ -87,9 +101,7 @@
when:
- create_image_via_dib | bool == true
- transform_boot_image | bool == false
- - use_prebuilt_images | bool == false
- role: bifrost-keystone-client-config
- user: "{{ ansible_env.SUDO_USER }}"
clouds:
bifrost:
config_username: "{{ ironic.keystone.default_username }}"
@@ -107,18 +119,34 @@
vars:
multinode_testing: "{{ inventory_dhcp | bool == true }}"
become: no
- connection: local
- gather_facts: yes
- pre_tasks:
- - name: "Override default bifrost DNS if we are behind a proxy"
+ gather_facts: False
+ tasks:
+ - name: Gathering facts
+ setup:
+ delegate_to: opnfv
+ delegate_facts: False
+ - name: Find network interface in the OPNFV node
set_fact:
- ipv4_nameserver: "192.168.122.1"
- when: lookup('env','http_proxy') != ''
- roles:
- - role: ironic-enroll-dynamic
- - { role: ironic-inspect-node, when: inspect_nodes | default('false') | bool == true }
- - role: bifrost-configdrives-dynamic
- - role: bifrost-deploy-nodes-dynamic
+ network_interface: "{{ ansible_default_ipv4.interface }}"
+ - import_role:
+ name: ironic-enroll-dynamic
+ private: True
+ delegate_to: opnfv
+ - import_role:
+ name: ironic-inspect-node
+ private: True
+ delegate_to: opnfv
+ when: inspect_nodes | default('false') | bool == true
+ - import_role:
+ name: bifrost-configdrives-dynamic
+ private: True
+ vars:
+ ipv4_nameserver: "{{ host_info[inventory_hostname]['public']['dns'] | list }}"
+ delegate_to: opnfv
+ - import_role:
+ name: bifrost-deploy-nodes-dynamic
+ private: True
+ delegate_to: opnfv
environment:
http_proxy: "{{ lookup('env','http_proxy') }}"
https_proxy: "{{ lookup('env','https_proxy') }}"
@@ -127,7 +155,13 @@
- hosts: baremetal
name: "Deploy machines."
become: no
- connection: local
serial: 1
- roles:
- - role: bifrost-prepare-for-test-dynamic
+ gather_facts: False
+ tasks:
+ #- name: Gathering facts
+ #setup:
+ #delegate_to: opnfv
+ #delegate_facts: False
+ - import_role:
+ name: bifrost-prepare-for-test-dynamic
+ delegate_to: opnfv
diff --git a/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
new file mode 100644
index 00000000..7f7ad670
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: "If VENV is set in the environment, enable installation into venv"
+ set_fact:
+ enable_venv: true
+ when: lookup('env', 'VENV') | length > 0
+
+- name: "Retrieve venv python path"
+ shell: "/bin/echo -e \"import sys\\nprint(':'.join(sys.path))\" | {{ ansible_python.get('executable', '/usr/bin/python').split('/')[-1] }}"
+ environment: "{{ bifrost_venv_env | default({}) }}"
+ register: venv_pythonpath_result
+ when: enable_venv
+
+- name: "Compute venv python path"
+ set_fact:
+ venv_pythonpath:
+ PYTHONPATH: "{{ venv_pythonpath_result.get('stdout', '') }}"
+ when: enable_venv
+
+- name: "Compute proper complete venv including proper Python path"
+ set_fact:
+ venv: "{{ venv | default({}) | combine(bifrost_venv_env | default({})) | combine(venv_pythonpath | default({})) }}"
+
diff --git a/xci/infra/bifrost/playbooks/wait-for-baremetal.yml b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
new file mode 100644
index 00000000..96aab29c
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
@@ -0,0 +1,17 @@
+# ironic needs to boot the server again to install the OS in the hard drive
+# we are currently modifying opnfv vm networking config while ironic is
+# doing that and it sometimes fail because of networking glitches. We should
+# wait until the OS is installed to do the opnfv config
+
+- hosts: baremetal
+ name: "Wait for baremetal blades to be ready"
+ become: no
+ gather_facts: False
+ tasks:
+ - name: "Wait for nodes to reboot."
+ wait_for: state=stopped port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+ - name: "Wait for nodes to become available."
+ wait_for: state=started port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+
diff --git a/xci/infra/bifrost/playbooks/xci-prepare-env.yml b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
new file mode 100644
index 00000000..d576324d
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
@@ -0,0 +1,118 @@
+- name: Prepare deployment host
+ hosts: deployment_host
+ gather_facts: True
+ tasks:
+ - name: Ensure common private key has correct permissions
+ file:
+ path: "{{ xci_path }}/xci/scripts/vm/id_rsa_for_dib"
+ mode: "0600"
+
+ - name: Remove host from known_hosts file if necessary
+ shell:
+ ssh-keygen -R {{ hostvars['opnfv'].ip }}
+ failed_when: false
+
+- name: Prepare the OPNFV host
+ hosts: opnfv
+ gather_facts: True
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+
+ - name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+ - name: Determine local user
+ become: no
+ local_action: command whoami
+ changed_when: False
+ register: _ansible_user
+
+ - name: Fetch local SSH key
+ delegate_to: localhost
+ become: no
+ slurp:
+ src: "/home/{{ _ansible_user.stdout }}/.ssh/id_rsa.pub"
+ register: _local_ssh_key
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (devuser)"
+ authorized_key:
+ exclusive: no
+ user: devuser
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (root)"
+ authorized_key:
+ exclusive: no
+ user: root
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+ become: yes
+
+ - name: Ensure /httpboot directory exists
+ file:
+ path: /httpboot
+ state: directory
+ become: yes
+
+ # Directory must exist before passing the static config
+ - name: "Setup Inventory DHCP Hosts Directory"
+ file:
+ path: "/etc/dnsmasq.d/bifrost.dhcp-hosts.d"
+ state: directory
+ owner: "root"
+ group: "root"
+ mode: 0755
+ become: yes
+
+ - name: Copy bifrost files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dst }}"
+ with_items:
+ - { src: '/tmp/baremetal.json', dst: '/tmp/baremetal.json' }
+ - { src: '/tmp/baremetalstaticips', dst: '/etc/dnsmasq.d/bifrost.dhcp-hosts.d/baremetalstaticips' }
+ become: yes
+
+ - name: Copy original qcow2 image to OPNFV VM
+ synchronize:
+ src: "{{ xci_cache }}/{{ item }}"
+ dest: /httpboot/
+ recursive: yes
+ delete: yes
+ with_items:
+ - "deployment_image.qcow2"
+ - "deployment_image.qcow2.sha256.txt"
+ become: yes
+
+ - name: Configure DNS on openSUSE
+ block:
+ - stat:
+ path: /etc/resolv.conf.netconfig
+ register: _resolv_conf_netconfig
+ - shell: |
+ mv /etc/resolv.conf.netconfig /etc/resolv.conf
+ become: yes
+ when: _resolv_conf_netconfig.stat.exists
+ when: ansible_pkg_mgr == 'zypper'
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route del default"
+ become: yes
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route add default via {{ host_info[inventory_hostname].public.gateway }}"
+ become: yes
diff --git a/xci/infra/bifrost/playbooks/bootstrap-bifrost.yml b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
index 2153b3b3..a0f92159 100644
--- a/xci/infra/bifrost/playbooks/bootstrap-bifrost.yml
+++ b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
@@ -1,29 +1,54 @@
---
# SPDX-license-identifier: Apache-2.0
##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright (c) 2018 SUSE LINUX GmbH.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: localhost
- connection: local
- gather_facts: true
+
+- hosts: deployment_host
+ name: "Bootstrap XCI hardware resources and prepare provisioning environment"
+ gather_facts: yes
vars_files:
+ - "{{ pdf_file }}"
+ - "{{ idf_file }}"
+ - "{{ xci_path }}/xci/var/opnfv_vm_pdf.yml"
+ - "{{ xci_path }}/xci/var/opnfv_vm_idf.yml"
- "{{ xci_path }}/xci/var/opnfv.yml"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
+ - role: create-nodes
+ become: yes
- role: clone-repository
project: "opnfv/bifrost"
repo: "{{ openstack_bifrost_git_url }}"
dest: "{{ xci_cache }}/repos/bifrost"
version: "{{ openstack_bifrost_version }}"
-
tasks:
+ - name: Wait for host to come back to life
+ local_action:
+ module: wait_for
+ host: "{{ opnfv_vm_ip }}"
+ delay: 15
+ state: started
+ port: 22
+ connect_timeout: 10
+ timeout: 10180
+
+ # No ansible module for brctl found
+ - name: Add pxe interface to the bridge
+ shell: "brctl addif {{ item.bridge }} {{ item.interface }}"
+ become: true
+ when: baremetal | bool == true
+ with_items:
+ - { bridge: "{{ network_bridge_admin }}", interface: "{{ network_interface_admin }}" }
+ - { bridge: "{{ network_bridge_mgmt }}", interface: "{{ network_interface_mgmt }}" }
+
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
@@ -40,3 +65,12 @@
copy:
src: "{{ xci_path}}/xci/infra/bifrost/"
dest: "{{ xci_cache }}/repos/bifrost"
+ - name: "Ensure /etc/hosts has good defaults"
+ lineinfile:
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ become: yes
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
diff --git a/xci/infra/bifrost/scripts/bifrost-env.sh b/xci/infra/bifrost/scripts/bifrost-env.sh
new file mode 100755
index 00000000..7d882125
--- /dev/null
+++ b/xci/infra/bifrost/scripts/bifrost-env.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# dib configuration
+case ${XCI_DISTRO,,} in
+ # These should ideally match the CI jobs
+ ubuntu)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
+ ;;
+ centos)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+ opensuse)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+esac
+
+export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
+
+if [ "${BAREMETAL}" = true ]; then
+ export BIFROST_INVENTORY_DHCP=true
+ export BIFROST_DOWNLOAD_IPA=false
+ export BIFROST_CREATE_IPA=true
+else
+ export BIFROST_INVENTORY_DHCP=false
+ export BIFROST_DOWNLOAD_IPA=true
+ export BIFROST_CREATE_IPA=false
+fi
+
+pip install -q --upgrade -r "${XCI_CACHE}/repos/bifrost/requirements.txt"
diff --git a/xci/infra/bifrost/scripts/bifrost-provision.sh b/xci/infra/bifrost/scripts/bifrost-provision.sh
deleted file mode 100755
index 940e9439..00000000
--- a/xci/infra/bifrost/scripts/bifrost-provision.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -eu
-set -o pipefail
-
-# This is normally passed from the XCI deployment script but
-# we also need it here for the bifrost jobs which run outside of XCI
-export XCI_PATH="${XCI_PATH:-$(git rev-parse --show-toplevel)}"
-# Declare our virtualenv
-export XCI_VENV="${XCI_VENV:-${XCI_PATH}/venv/}"
-export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
-
-export PYTHONUNBUFFERED=1
-SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
-BIFROST_HOME=$SCRIPT_HOME/..
-ENABLE_VENV="true"
-export VENV=${XCI_VENV}
-PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-# This is normally exported by XCI env but we should initialize it here
-# in case we run this script on its own for debug purposes
-XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-}
-# Ironic SHAs
-BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-master}
-BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master}
-BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-master}
-BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-master}
-
-# set UPPER_CONSTRAINTS_FILE since it is needed in order to limit libvirt-python to 4.0.0
-export UPPER_CONSTRAINTS_FILE=https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt
-
-# Ensure the right inventory files is used based on branch
-CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
- INVENTORY_FILE_FORMAT="baremetal_json_file"
-else
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'}
- INVENTORY_FILE_FORMAT="baremetal_csv_file"
-fi
-export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
-
-# Default settings for VMs
-export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
-export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"}
-export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
-export VM_CPU=${VM_CPU:-4}
-export VM_DISK=${VM_DISK:-100}
-export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
-export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
-
-# Settings for bifrost
-TEST_PLAYBOOK="opnfv-virtual.yaml"
-USE_INSPECTOR=true
-USE_CIRROS=false
-TESTING_USER=root
-DOWNLOAD_IPA=true
-CREATE_IPA_IMAGE=false
-INSPECT_NODES=true
-INVENTORY_DHCP=false
-INVENTORY_DHCP_STATIC_IP=false
-WRITE_INTERFACES_FILE=true
-
-# Settings for console access
-export DIB_DEV_USER_PWDLESS_SUDO=yes
-export DIB_DEV_USER_PASSWORD=devuser
-
-# Additional dib elements
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
-
-# dib configuration
-case ${XCI_DISTRO,,} in
- # These should ideally match the CI jobs
- ubuntu)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
- ;;
- centos)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
- opensuse)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
-esac
-
-# Copy the OS images if found
-if [[ -e ${XCI_PATH}/deployment_image.qcow2 ]]; then
- sudo mkdir -p /httpboot
- sudo mv ${XCI_PATH}/deployment_image.qcow2* /httpboot/
-fi
-
-# Install missing dependencies. Use sudo since for bifrost jobs
-# the venv is not ready yet.
-if [[ -n ${VIRTUAL_ENV:-} ]]; then
- _sudo=""
-else
- virtualenv --quiet --no-site-packages ${XCI_VENV}
- set +u
- source ${XCI_VENV}/bin/activate
- set -u
- _sudo="sudo -H -E"
-fi
-${_sudo} pip install -q --upgrade -r "$(dirname $0)/../requirements.txt"
-
-# Change working directory
-cd $BIFROST_HOME/playbooks
-
-# NOTE(hwoarang): Disable selinux as we are hitting issues with it from time to
-# time. Remove this when Centos7 is a proper gate on bifrost so we know that
-# selinux works as expected.
-if [[ -e /etc/centos-release ]]; then
- echo "*************************************"
- echo "WARNING: Disabling selinux on CentOS7"
- echo "*************************************"
- sudo setenforce 0
-fi
-
-# Create the VMS
-ansible-playbook ${XCI_ANSIBLE_PARAMS} \
- -i inventory/localhost \
- test-bifrost-create-vm.yaml \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-model' \
- -e test_vm_memory_size=${VM_MEMORY_SIZE} \
- -e enable_venv=${ENABLE_VENV} \
- -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
- -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE}
-
-# Execute the installation and VM startup test
-ansible-playbook ${XCI_ANSIBLE_PARAMS} \
- -i inventory/bifrost_inventory.py \
- ${TEST_PLAYBOOK} \
- -e use_cirros=${USE_CIRROS} \
- -e testing_user=${TESTING_USER} \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-model' \
- -e inventory_dhcp=${INVENTORY_DHCP} \
- -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
- -e enable_venv=${ENABLE_VENV} \
- -e enable_inspector=${USE_INSPECTOR} \
- -e inspect_nodes=${INSPECT_NODES} \
- -e download_ipa=${DOWNLOAD_IPA} \
- -e create_ipa_image=${CREATE_IPA_IMAGE} \
- -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
- -e ipv4_gateway=192.168.122.1 \
- -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
- -e enable_keystone=false \
- -e ironicinspector_source_install=true \
- -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION} \
- -e ironicinspectorclient_source_install=true \
- -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION} \
- -e ironicclient_source_install=true \
- -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION} \
- -e ironic_git_branch=${BIFROST_IRONIC_VERSION} \
- -e use_prebuilt_images=${BIFROST_USE_PREBUILT_IMAGES} \
- -e xci_distro=${XCI_DISTRO}
-EXITCODE=$?
-
-if [ $EXITCODE != 0 ]; then
- echo "************************************"
- echo "Provisioning failed. See logs folder"
- echo "************************************"
-fi
-
-exit $EXITCODE
diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml b/xci/infra/bifrost/vars/debian.yml
index 5efd7c83..95303b38 100644
--- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml
+++ b/xci/infra/bifrost/vars/debian.yml
@@ -1,14 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
##############################################################################
-# Copyright (c) 2018 taseer94@gmail.com & others.
-#
+# Copyright (c) 2018 SUSE Linux GmbH.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
----
-
-- name: copy the k8-cluster config file
- copy:
- src: k8-cluster.yml
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-dev
+ - libssl-dev
+ - lsb-release
+ - make
+ - net-tools
+ - libpython-dev
+ - wget
+ - iptables
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/xci/infra/bifrost/vars/redhat.yml
index 5b2939f1..056c4d61 100644
--- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
+++ b/xci/infra/bifrost/vars/redhat.yml
@@ -1,14 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
+# Copyright (c) 2018 SUSE Linux GmbH.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - openssl-devel
+ - redhat-lsb
+ - make
+ - net-tools
+ - python-devel
+ - wget
+ - iptables
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml b/xci/infra/bifrost/vars/suse.yml
index 5b2939f1..8e2e9041 100644
--- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
+++ b/xci/infra/bifrost/vars/suse.yml
@@ -1,14 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
+# Copyright (c) 2018 SUSE Linux GmbH.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - libopenssl-devel
+ - make
+ - net-tools
+ - python-devel
+ - python-xml
+ - wget
+ - iptables
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
index 1a0b34bc..af80b38f 100755
--- a/xci/installer/kubespray/deploy.sh
+++ b/xci/installer/kubespray/deploy.sh
@@ -28,12 +28,25 @@ echo "Info: Configuring localhost for kubespray"
echo "-----------------------------------------------------------------------"
cd $XCI_PLAYBOOKS
ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
- -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
- configure-localhost.yml
+ -i dynamic_inventory.py configure-localhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured localhost for kubespray"
#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
# Configure deployment host, opnfv
#-------------------------------------------------------------------------------
# This playbook
@@ -46,9 +59,8 @@ echo "Info: Configured localhost for kubespray"
echo "Info: Configuring opnfv deployment host for kubespray"
echo "-----------------------------------------------------------------------"
cd $K8_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
- -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
- configure-opnfvhost.yml
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured opnfv deployment host for kubespray"
@@ -65,56 +77,78 @@ if [ $XCI_FLAVOR != "aio" ]; then
echo "Info: Configuring target hosts for kubespray"
echo "-----------------------------------------------------------------------"
cd $K8_XCI_PLAYBOOKS
- ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
- -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
- configure-targethosts.yml
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured target hosts for kubespray"
fi
+
echo "Info: Using kubespray to deploy the kubernetes cluster"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/kubespray;\
- ansible-playbook ${XCI_ANSIBLE_PARAMS} \
- -i opnfv_inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
- $LOG_PATH/setup-kubernetes.log
+ $LOG_PATH/setup-kubernetes.log
+
cd $K8_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
- -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
- configure-kubenet.yml
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
echo
echo "-----------------------------------------------------------------------"
echo "Info: Kubernetes installation is successfully completed!"
echo "-----------------------------------------------------------------------"
-# Configure the kubernetes authentication in opnfv host. In future releases
-# kubectl is no longer an artifact so we should not fail if it's not available.
-# This needs to be removed in the future
-ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
- cp -f ~/admin.conf ~/.kube/config; \
- cp -f ~/kubectl /usr/local/bin || true"
-
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/; \
+ cp -f ~/admin.conf ~/.kube/config"
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
echo "Login opnfv host ssh root@$OPNFV_HOST_IP
according to the user-guide to create a service
https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
-
echo
echo "-----------------------------------------------------------------------"
echo "Info: Kubernetes login details"
echo "-----------------------------------------------------------------------"
-
-# Get the dashborad URL
-DASHBOARD_SERVICE=$(ssh root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "known_hosts entry from opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
echo "Info: Kubernetes Dashboard URL:"
echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
-# Get the dashborad user and password
+# Get the dashboard user and password
MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
-USER_CSV=$(ssh root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
echo "Info: Dashboard username: ${USERNAME}"
diff --git a/xci/installer/kubespray/files/aio/inventory/inventory.cfg b/xci/installer/kubespray/files/aio/inventory/inventory.cfg
deleted file mode 100644
index a72d0fec..00000000
--- a/xci/installer/kubespray/files/aio/inventory/inventory.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[all]
-opnfv ansible_host=192.168.122.2 ip=192.168.122.2
-
-[kube-master]
-opnfv
-
-[kube-node]
-opnfv
-
-[etcd]
-opnfv
-
-[k8s-cluster:children]
-kube-node
-kube-master
-
-[calico-rr]
-
-[vault]
-opnfv
diff --git a/xci/installer/kubespray/files/ha/inventory/inventory.cfg b/xci/installer/kubespray/files/ha/inventory/inventory.cfg
deleted file mode 100644
index aae36329..00000000
--- a/xci/installer/kubespray/files/ha/inventory/inventory.cfg
+++ /dev/null
@@ -1,32 +0,0 @@
-[all]
-opnfv ansible_host=192.168.122.2 ip=192.168.122.2
-master1 ansible_host=192.168.122.3 ip=192.168.122.3
-master2 ansible_host=192.168.122.4 ip=192.168.122.4
-master3 ansible_host=192.168.122.5 ip=192.168.122.5
-node1 ansible_host=192.168.122.6 ip=192.168.122.6
-node2 ansible_host=192.168.122.7 ip=192.168.122.7
-
-[kube-master]
-master1
-master2
-master3
-
-[kube-node]
-node1
-node2
-
-[etcd]
-master1
-master2
-master3
-
-[k8s-cluster:children]
-kube-node
-kube-master
-
-[calico-rr]
-
-[vault]
-master1
-master2
-master3
diff --git a/xci/installer/kubespray/files/mini/inventory/inventory.cfg b/xci/installer/kubespray/files/mini/inventory/inventory.cfg
deleted file mode 100644
index bf8bf19b..00000000
--- a/xci/installer/kubespray/files/mini/inventory/inventory.cfg
+++ /dev/null
@@ -1,22 +0,0 @@
-[all]
-opnfv ansible_host=192.168.122.2 ip=192.168.122.2
-master1 ansible_host=192.168.122.3 ip=192.168.122.3
-node1 ansible_host=192.168.122.4 ip=192.168.122.4
-
-[kube-master]
-master1
-
-[kube-node]
-node1
-
-[etcd]
-master1
-
-[k8s-cluster:children]
-kube-node
-kube-master
-
-[calico-rr]
-
-[vault]
-master1
diff --git a/xci/installer/kubespray/files/noha/inventory/inventory.cfg b/xci/installer/kubespray/files/noha/inventory/inventory.cfg
deleted file mode 100644
index 73c1e0a1..00000000
--- a/xci/installer/kubespray/files/noha/inventory/inventory.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-[all]
-opnfv ansible_host=192.168.122.2 ip=192.168.122.2
-master1 ansible_host=192.168.122.3 ip=192.168.122.3
-node1 ansible_host=192.168.122.4 ip=192.168.122.4
-node2 ansible_host=192.168.122.5 ip=192.168.122.5
-
-[kube-master]
-master1
-
-[kube-node]
-node1
-node2
-
-[etcd]
-master1
-
-[k8s-cluster:children]
-kube-node
-kube-master
-
-[calico-rr]
-
-[vault]
-master1
diff --git a/xci/installer/kubespray/playbooks/configure-installer.yml b/xci/installer/kubespray/playbooks/configure-installer.yml
new file mode 100644
index 00000000..d88ee55c
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-installer.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
index 1c3740b2..18a126c1 100644
--- a/xci/installer/kubespray/playbooks/configure-kubenet.yml
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -13,6 +13,7 @@
# so cbr0 interfaces can talk to each other.
- name: Prepare networking for kubenet
hosts: k8s-cluster
+ remote_user: root
gather_facts: True
become: yes
vars_files:
@@ -37,14 +38,14 @@
with_items: "{{ kubenet_xci_static_routes }}"
loop_control:
label: "{{ item.network }}"
- when: deploy_scenario == 'k8-nosdn-nofeature'
+ when: deploy_scenario.find('k8-nosdn-') != -1
- name: Ensure rp_filter is disabled on localhost
sysctl:
name: net.ipv4.conf.all.rp_filter
sysctl_set: yes
state: present
- value: "{{ deploy_scenario == 'k8-nosdn-nofeature' | ternary(0, 1) }}"
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
reload: yes
delegate_to: localhost
run_once: True
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index ac8988da..52e42b06 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -11,7 +11,7 @@
vars_files:
- "{{ xci_path }}/xci/var/opnfv.yml"
- tasks:
+ pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ item }}"
@@ -23,40 +23,66 @@
remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
+ archive: yes
src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
- - name: delete the opnfv_inventory directory
- file:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
- state: absent
- - name: copy kubespray inventory directory
- command: "cp -rf {{ remote_xci_flavor_files }}/inventory \
- {{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
- args:
- creates: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
- - name: make sure kubespray/opnfv_inventory/group_vars/ exist
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
file:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
- state: directory
- - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
- name: Install required packages
package:
- name: "{{ kube_require_packages[ansible_pkg_mgr] }}"
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
state: present
- update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
when: xci_flavor == 'aio'
- - name: change dashboard server type to NodePort
- lineinfile:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
- insertafter: 'targetPort'
- line: " type: NodePort"
-
- name: pip install required packages
pip:
name: "{{ item.name }}"
@@ -64,9 +90,7 @@
with_items:
- { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
- { name: 'netaddr' }
-
- - name: Configure SSL certificates
- include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
+ - { name: 'ansible-modules-hashivault' }
- name: fetch xci environment
copy:
diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml
index c744eae6..2fde9877 100644
--- a/xci/installer/kubespray/playbooks/configure-targethosts.yml
+++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml
@@ -1,6 +1,19 @@
---
- hosts: k8s-cluster
remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
tasks:
- name: Manage SSH keys
include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
@@ -9,7 +22,7 @@
package:
name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
state: present
- update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
- hosts: kube-master
remote_user: root
@@ -24,6 +37,4 @@
when: xci_flavor == 'ha'
- role: "haproxy_server"
haproxy_service_configs: "{{ haproxy_default_services}}"
- haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
- haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
when: xci_flavor == 'ha'
diff --git a/xci/installer/kubespray/playbooks/post-deployment.yml b/xci/installer/kubespray/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index 6dada3f5..8b3a67d0 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -58,7 +58,7 @@ echo "Info: Configuring opnfv deployment host for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
ansible-galaxy install -r ${XCI_PATH}/xci/files/requirements.yml -p $HOME/.ansible/roles
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-opnfvhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured opnfv deployment host for openstack-ansible"
@@ -68,7 +68,6 @@ echo "Info: Configured opnfv deployment host for openstack-ansible"
#-------------------------------------------------------------------------------
# This playbook is only run for the all flavors except aio since aio is configured
# by an upstream script.
-
# This playbook
# - adds public keys to target hosts
# - configures network
@@ -78,7 +77,7 @@ if [[ $XCI_FLAVOR != "aio" ]]; then
echo "Info: Configuring target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
- ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-targethosts.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured target hosts"
@@ -91,7 +90,7 @@ fi
#-------------------------------------------------------------------------------
echo "Info: Setting up target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log "
scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log
echo "-----------------------------------------------------------------------"
@@ -113,7 +112,7 @@ echo "Info: Set up target hosts for openstack-ansible successfuly"
echo "Info: Gathering facts"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \
- ansible ${XCI_ANSIBLE_PARAMS} -m setup -a gather_subset=network,hardware,virtual all"
+ ansible -m setup -a gather_subset=network,hardware,virtual all"
echo "-----------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -124,7 +123,7 @@ echo "-----------------------------------------------------------------------"
echo "Info: Setting up infrastructure"
echo "-----------------------------------------------------------------------"
echo "xci: running ansible playbook setup-infrastructure.yml"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log"
scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log
echo "-----------------------------------------------------------------------"
@@ -153,7 +152,7 @@ echo "Info: Database cluster verification successful!"
#-------------------------------------------------------------------------------
echo "Info: Installing OpenStack on target hosts"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log"
scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log
echo "-----------------------------------------------------------------------"
@@ -161,6 +160,22 @@ echo
echo "Info: OpenStack installation is successfully completed!"
#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo
+echo "Info: Post-deployment scenario role execution done"
+
+#-------------------------------------------------------------------------------
# - Getting OpenStack login information
#-------------------------------------------------------------------------------
echo "Info: Openstack login details"
diff --git a/xci/installer/osa/files/aio/flavor-vars.yml b/xci/installer/osa/files/aio/flavor-vars.yml
deleted file mode 100644
index 6ac1e0fe..00000000
--- a/xci/installer/osa/files/aio/flavor-vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/xci/installer/osa/files/aio/inventory b/xci/installer/osa/files/aio/inventory
deleted file mode 100644
index fa2a1009..00000000
--- a/xci/installer/osa/files/aio/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
index 5ecbf155..e787aff5 100644
--- a/xci/installer/osa/files/ansible-role-requirements.yml
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -7,180 +7,180 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# these versions are based on the osa commit 85714acedb50ea65d7e7684c127984c8dc56afe4 on 2018-04-03
-# http://github.com/cgit/openstack/openstack-ansible/commit/?id=90d0679d209cb494b9a71817c56e2c26c7fc5ca1
+# these versions are based on the osa commit e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 on 2018-12-11
+# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?h=refs/heads/stable/rocky&id=e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02
- name: ansible-hardening
scm: git
- src: https://github.com/openstack/ansible-hardening
- version: 3f870c24f9bcd88ec1f1d7815c30cf2abfac39e5
+ src: https://git.openstack.org/openstack/ansible-hardening
+ version: 14e6bb6a411b6b03bf258144be66845a5831705c
- name: apt_package_pinning
scm: git
- src: https://github.com/openstack/openstack-ansible-apt_package_pinning
- version: b488ec5ee3092ba5b6765b5888c9ad2e44922ec5
+ src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
+ version: 4b2584d699c79ac65acfeb2157a97327df6f0fd6
- name: pip_install
scm: git
- src: https://github.com/openstack/openstack-ansible-pip_install
- version: 78e615c712771e33c1a7436e05bc91644318ece1
+ src: https://git.openstack.org/openstack/openstack-ansible-pip_install
+ version: 671e7129ad3dcf20bdda942842f9f76203bf5a5e
- name: galera_client
scm: git
- src: https://github.com/openstack/openstack-ansible-galera_client
- version: 4bc75a7b141fa0ff0ff1f35d26c09163df482b34
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_client
+ version: 6dbac51e5b74ffdee429375f6c22739e7a5ef017
- name: galera_server
scm: git
- src: https://github.com/openstack/openstack-ansible-galera_server
- version: 9b2c2e8098f0f02e206c0498fa466a6798f7c89d
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_server
+ version: 7a7036f6d15ce3117a925217b66cba806034bb96
- name: ceph_client
scm: git
- src: https://github.com/openstack/openstack-ansible-ceph_client
- version: 50ea8b644c0713d007f6f172cd7bbc850f44a55a
+ src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
+ version: 278aaca502533b33b9714393e47b536654055c58
- name: haproxy_server
scm: git
- src: https://github.com/openstack/openstack-ansible-haproxy_server
- version: 0c0c9453e8760fcbb0a126e6c97de83f004ae06b
+ src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
+ version: 6bc259471283162b3cb8ec0c4bc736f81254d050
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: 2b4a1f36c29b06b832bc4e6d112ca5559a98fd4a
+ version: 64764d25ab868417f1138a7b9605f2eb94cbfd02
- name: lxc_container_create
scm: git
- src: https://github.com/openstack/openstack-ansible-lxc_container_create
- version: 3d1e70d1be8d10a54da35ad97c3e750384f8a73b
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
+ version: 14a74f2fb60fa7865cf34f75e3196e802847b9d1
- name: lxc_hosts
scm: git
- src: https://github.com/openstack/openstack-ansible-lxc_hosts
- version: 400f0c80b9c531a792dc01ff12cf1f3b3bd69a2d
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
+ version: 83e20af591b00fc796eba0e0e1c7650faaa20cd7
- name: memcached_server
scm: git
- src: https://github.com/openstack/openstack-ansible-memcached_server
- version: 67ff6cd34d8158dde56a7a59b8ccbdd079effde5
+ src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
+ version: e058c81a44859c7bcd3eeaac49a8f25b423e38a4
- name: openstack_hosts
scm: git
- src: https://github.com/openstack/openstack-ansible-openstack_hosts
- version: 372be6bfc1169131c6607c4f0f5758563dc1265f
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
+ version: 0028cedcccc4913bd1c604404c84be16164d1fe5
- name: os_keystone
scm: git
- src: https://github.com/openstack/openstack-ansible-os_keystone
- version: 48019740f86570f8bcb14068a0e253b05ffb4336
+ src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
+ version: 5a54cc6ba50875c4068e4cdfe3cb23ae1603e257
- name: openstack_openrc
scm: git
- src: https://github.com/openstack/openstack-ansible-openstack_openrc
- version: e86c73ef9af547b30a4aab0d39aca96359bf5ce4
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
+ version: 805ef5349db7d8af0132b546ff56a36ec80ea7db
- name: os_aodh
scm: git
- src: https://github.com/openstack/openstack-ansible-os_aodh
- version: 75c8a1f07c0b0f8e8baa68198be789efd453183e
+ src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
+ version: 9b8d7483d69e60f4ae71ceb6a3336ff81f355c38
- name: os_barbican
scm: git
- src: https://github.com/openstack/openstack-ansible-os_barbican
- version: bd8b72cb68c2629f3d1c032f315eb9c25931920e
+ src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
+ version: f9ce44edb809c92735fa093334fa1d79cc538126
- name: os_ceilometer
scm: git
- src: https://github.com/openstack/openstack-ansible-os_ceilometer
- version: c9b2115cf7c38a5861a8126d45eddef9ea03d1ad
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
+ version: 221dcccfef3efa1a187678f71c59d81d7e930a92
- name: os_cinder
scm: git
- src: https://github.com/openstack/openstack-ansible-os_cinder
- version: 948305aa6bfeeb2abdda7351aa0a8ff292810e56
+ src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
+ version: a824d8d4dc6de6563f186449838e94c69a869e02
+- name: os_congress
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_congress
+ version: 0e6ccb63dba466bb1b7a11e94db7a420c716c06d
- name: os_designate
scm: git
- src: https://github.com/openstack/openstack-ansible-os_designate
- version: b1a08cc7e897e5b600415a69280a64f8f61dd66c
+ src: https://git.openstack.org/openstack/openstack-ansible-os_designate
+ version: 74c33e9788607f772d8402c4f5cfc79eb379278b
- name: os_glance
scm: git
- src: https://github.com/openstack/openstack-ansible-os_glance
- version: 9f2aa6478dadab3a4ec0cee6d23ffc86fa76a99b
+ src: https://git.openstack.org/openstack/openstack-ansible-os_glance
+ version: 7ec6a11b98715530e3cd5adbf682c2834e3122a8
- name: os_gnocchi
scm: git
- src: https://github.com/openstack/openstack-ansible-os_gnocchi
- version: 5beb9ca451812959f09c9f9235eee529c42b3805
+ src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
+ version: db881f143223723b38f5d197e8e4b6dd4e057c6f
- name: os_heat
scm: git
- src: https://github.com/openstack/openstack-ansible-os_heat
- version: 7eff32af7fae96096694d582589389c66d10a8a3
+ src: https://git.openstack.org/openstack/openstack-ansible-os_heat
+ version: 14b8927123aa9b0cf47f365c1ab9f82147ce4bdc
- name: os_horizon
scm: git
- src: https://github.com/openstack/openstack-ansible-os_horizon
- version: ea9a27931e6d7f22df23ea02e1c0938ba576fada
+ src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
+ version: b088034eeaa73ac781fe271588ba03871c88118e
- name: os_ironic
scm: git
- src: https://github.com/openstack/openstack-ansible-os_ironic
- version: 8c33498070489e2ff645cc1286df535e2b16726b
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
+ version: 6ecf38f1296080a33366528ad40d513539138925
- name: os_magnum
scm: git
- src: https://github.com/openstack/openstack-ansible-os_magnum
- version: 06087d8d193f4985ed8c33e996b02fa717628c27
-- name: os_molteniron
- scm: git
- src: https://github.com/openstack/openstack-ansible-os_molteniron
- version: 5102381790218c390438011f64e763016d335c61
+ src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
+ version: 316f22626d242e33ce56fad367ef3570e0d8ab8b
- name: os_neutron
scm: git
- src: https://github.com/openstack/openstack-ansible-os_neutron
- version: 4bbb681632f2d577585905982d81fa019332f993
+ src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
+ version: 3032836715b4055041554583fa2ed685ab076c25
- name: os_nova
scm: git
- src: https://github.com/openstack/openstack-ansible-os_nova
- version: 8c6a1b572e334bf185bf23759b90089a88a88b4b
+ src: https://git.openstack.org/openstack/openstack-ansible-os_nova
+ version: 9db5bf5ab6f82c1947d05a1ec7cd6e3ef304760f
- name: os_octavia
scm: git
- src: https://github.com/openstack/openstack-ansible-os_octavia
- version: c4cdbc5f36c43591cf729a5ce0f2a1e605c30be0
+ src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
+ version: 508ea6d834153d0eb6da5bd32d10472f483c6dfa
- name: os_rally
scm: git
- src: https://github.com/openstack/openstack-ansible-os_rally
- version: 083bbb8c1290506797d49c51ee91a344a481d25c
+ src: https://git.openstack.org/openstack/openstack-ansible-os_rally
+ version: 8e98112b858ecffbb92c6ae342237af87416b7fa
- name: os_sahara
scm: git
- src: https://github.com/openstack/openstack-ansible-os_sahara
- version: 9b5111884ebd64ec8088bbdfb3b9a58cdabf1edb
+ src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
+ version: ed7aa2d64a2ea3508c7d88a9e869524fdf0e9353
- name: os_swift
scm: git
- src: https://github.com/openstack/openstack-ansible-os_swift
- version: 5e88210fdd42d40960a14767fc662b3bd8a73c8a
+ src: https://git.openstack.org/openstack/openstack-ansible-os_swift
+ version: a88edf84964819870ef990d25b3bfa514186249a
- name: os_tacker
scm: git
- src: https://github.com/openstack/openstack-ansible-os_tacker
- version: d4acca1ce9ec3ce0c599a3424fa3c92ee318d270
+ src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
+ version: bbce8657c13d2545bf632eb81bb78329a5479798
- name: os_tempest
scm: git
- src: https://github.com/openstack/openstack-ansible-os_tempest
- version: f34582d887e8e26e99710b29ac35306938ca857c
+ src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
+ version: 08341f4a19b2ed2231b790496c9f7cf2b4eda2e6
- name: os_trove
scm: git
- src: https://github.com/openstack/openstack-ansible-os_trove
- version: 6cd21b625d9f3da5c537e98064f67001173c9174
+ src: https://git.openstack.org/openstack/openstack-ansible-os_trove
+ version: eaca0137de0d3d7bd57a68eecfecf52e3171f591
- name: plugins
scm: git
- src: https://github.com/openstack/openstack-ansible-plugins
- version: 2472c81eb3b065a7ed7dc4fd6bc4fef3f171089c
+ src: https://git.openstack.org/openstack/openstack-ansible-plugins
+ version: a84ae0d744047fe41a0c028213de8daa52f72aee
- name: rabbitmq_server
scm: git
- src: https://github.com/openstack/openstack-ansible-rabbitmq_server
- version: cffd1ebd45e20331ee505568cd34c277d3225138
+ src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
+ version: deccf93bdda1aa873b956418168368284509c99b
- name: repo_build
scm: git
- src: https://github.com/openstack/openstack-ansible-repo_build
- version: 0e50a282b09f62670494ada2f7d42509c148067f
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_build
+ version: 630a6dfdcb46ba719ddb7fd7a4875259c5602b15
- name: repo_server
scm: git
- src: https://github.com/openstack/openstack-ansible-repo_server
- version: 5979a638eade8523f113714f9fd5c0fb59353277
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_server
+ version: dd143b381b2fb94a3ba435f951e8b9338353a48d
- name: rsyslog_client
scm: git
- src: https://github.com/openstack/openstack-ansible-rsyslog_client
- version: ed8e178c38a28cab87b8d9bd4396caccf8c0e790
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
+ version: ed5e61c8bc2aabb905918bb2751ae985b1cfe229
- name: rsyslog_server
scm: git
- src: https://github.com/openstack/openstack-ansible-rsyslog_server
- version: d401a62d2f8ff7c8e6924b6fae0086e47ab37fa6
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
+ version: 9318bafbe60fed5f026c1e216d693bce745b9f99
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
+ version: d2ba81107ade1cf53c8b93590465c21ad2bc4530
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: 21d7d8de5af9e73c0853d3434a4b3d3f8dd39a70
+ version: 0fdb4848b5aca949ffade9be5a2ae254979e673e
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
@@ -188,20 +188,44 @@
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
+ version: 3bb7414f46b757e943507b65ca4c9f1080a008b0
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
+ version: '1.4'
- name: ceph-ansible
scm: git
src: https://github.com/ceph/ceph-ansible
- version: 0be60456ce98d11ca6acf73d7f7a76c4f9dc5309
+ version: a5aca6ebbc341feb34b9ec0d73e16aeeedae63ac
- name: opendaylight
scm: git
src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: 1f0f943499dcdd28a1b6971992c46bb4513ce8fb
+ version: 0aebbc250b34ac5ac14b37bdf9b1a2e1cfaa5a76
- name: haproxy_endpoints
scm: git
src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
+ version: 8e3a24a35beb16d717072dc83895c5a1f92689fb
+- name: nspawn_container_create
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create
+ scm: git
+ version: 2bcf03f1cca550731789d5b53c7d0806ef5f5ff7
+- name: nspawn_hosts
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts
+ scm: git
+ version: f69e101b9191682986272b766747f107b8a7a136
+- name: systemd_service
+ src: https://git.openstack.org/openstack/ansible-role-systemd_service
+ scm: git
+ version: a085a50c338b2eeaa87ed50eaaa22564d7c12968
+- name: systemd_mount
+ src: https://git.openstack.org/openstack/ansible-role-systemd_mount
+ scm: git
+ version: ee6263b3ce6502712ff4d6fb56474066df1773e4
+- name: systemd_networkd
+ src: https://git.openstack.org/openstack/ansible-role-systemd_networkd
+ scm: git
+ version: b024d0a3d97caf06b962a1f19450511b108dc5eb
+- name: python_venv_build
+ src: https://git.openstack.org/openstack/ansible-role-python_venv_build
+ scm: git
+ version: 5fdd8e00633f28606fc531a449d741e8c772a9fc
diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt
index fd401854..ec198a79 100644
--- a/xci/installer/osa/files/global-requirement-pins.txt
+++ b/xci/installer/osa/files/global-requirement-pins.txt
@@ -5,10 +5,17 @@
#
# Use this file with caution!
#
+
+### Pinned for gnocchi's dependency pycradox
+# https://github.com/sileht/pycradox/commit/2209f89fd65ecf31bea8eac6405acce2543e7b84
+Cython<0.28
+
###
### These are pinned to ensure exactly the same behaviour forever! ###
### These pins are updated through the sources-branch-updater script ###
###
-pip==9.0.1
-setuptools==38.5.1
-wheel==0.30.0
+# Bumping pip to version 10 fails in tempest when trying to install
+# packages with an empty list.
+pip==18.0
+setuptools==40.0.0
+wheel==0.31.1
diff --git a/xci/installer/osa/files/ha/flavor-vars.yml b/xci/installer/osa/files/ha/flavor-vars.yml
deleted file mode 100644
index 167502c9..00000000
--- a/xci/installer/osa/files/ha/flavor-vars.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'controller01': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'controller02': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.6',
- 'MGMT_IP': '172.29.236.14',
- 'VXLAN_IP': '172.29.240.14',
- 'STORAGE_IP': '172.29.244.14'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.7',
- 'MGMT_IP': '172.29.236.15',
- 'VXLAN_IP': '172.29.240.15',
- 'STORAGE_IP': '172.29.244.15'
- }
-}
diff --git a/xci/installer/osa/files/ha/inventory b/xci/installer/osa/files/ha/inventory
deleted file mode 100644
index f5d882ef..00000000
--- a/xci/installer/osa/files/ha/inventory
+++ /dev/null
@@ -1,15 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
-
-[openstack:children]
-controller
-compute
diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml
index 360aa5cb..dc2ec183 100644
--- a/xci/installer/osa/files/ha/openstack_user_config.yml
+++ b/xci/installer/osa/files/ha/openstack_user_config.yml
@@ -77,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -97,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -115,18 +115,18 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -139,27 +139,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -170,43 +170,43 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -225,10 +225,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -238,10 +238,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -251,5 +251,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/xci/installer/osa/files/ha/user_variables.yml b/xci/installer/osa/files/ha/user_variables.yml
index c6f1b065..8c2e9f0c 100644
--- a/xci/installer/osa/files/ha/user_variables.yml
+++ b/xci/installer/osa/files/ha/user_variables.yml
@@ -163,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/mini/flavor-vars.yml b/xci/installer/osa/files/mini/flavor-vars.yml
deleted file mode 100644
index 0d446ba2..00000000
--- a/xci/installer/osa/files/mini/flavor-vars.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
-}
diff --git a/xci/installer/osa/files/mini/inventory b/xci/installer/osa/files/mini/inventory
deleted file mode 100644
index 4224131f..00000000
--- a/xci/installer/osa/files/mini/inventory
+++ /dev/null
@@ -1,12 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-
-[openstack:children]
-controller
-compute
diff --git a/xci/installer/osa/files/mini/user_variables.yml b/xci/installer/osa/files/mini/user_variables.yml
index 9fb2001e..b4d847bc 100644
--- a/xci/installer/osa/files/mini/user_variables.yml
+++ b/xci/installer/osa/files/mini/user_variables.yml
@@ -163,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
deleted file mode 100644
index 3c69a34b..00000000
--- a/xci/installer/osa/files/noha/flavor-vars.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- }
-}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
deleted file mode 100644
index 0e3b8d84..00000000
--- a/xci/installer/osa/files/noha/inventory
+++ /dev/null
@@ -1,13 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
-
-[openstack:children]
-controller
-compute
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
index 95450937..5e7ed83c 100644
--- a/xci/installer/osa/files/noha/user_variables.yml
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -163,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml
index 95abec5c..64718e33 100644
--- a/xci/installer/osa/files/openstack_services.yml
+++ b/xci/installer/osa/files/openstack_services.yml
@@ -30,211 +30,271 @@
## Global Requirements
-requirements_git_repo: https://github.com/openstack/requirements
-requirements_git_install_branch: 207ac2e166f0874b7ff891535bdb78ecf36cabc6 # HEAD of "stable/queens" as of 01.03.2018
+requirements_git_repo: https://git.openstack.org/openstack/requirements
+requirements_git_install_branch: 32f8fa388d3b8367320a3308a350f28254a82d65 # HEAD of "stable/rocky" as of 11.12.2018
+requirements_git_track_branch: stable/rocky
## Aodh service
-aodh_git_repo: https://github.com/openstack/aodh
-aodh_git_install_branch: f549faea0ea19dad5bb3f1871b7d66ae5d9d80f2 # HEAD of "stable/queens" as of 01.03.2018
+aodh_git_repo: https://git.openstack.org/openstack/aodh
+aodh_git_install_branch: ae5e710cd5ade867ebd0e6666bad95f82d130210 # HEAD of "stable/rocky" as of 11.12.2018
aodh_git_project_group: aodh_all
+aodh_git_track_branch: stable/rocky
## Barbican service
-barbican_git_repo: https://github.com/openstack/barbican
-barbican_git_install_branch: 5b525f6b0a7cf5342a9ffa3ca3618028d6d53649 # HEAD of "stable/queens" as of 01.03.2018
+barbican_git_repo: https://git.openstack.org/openstack/barbican
+barbican_git_install_branch: 0a1a9917e791d0c6fc8534a052700af5f5cbe9d0 # HEAD of "stable/rocky" as of 11.12.2018
barbican_git_project_group: barbican_all
+barbican_git_track_branch: stable/rocky
## Ceilometer service
-ceilometer_git_repo: https://github.com/openstack/ceilometer
-ceilometer_git_install_branch: 24caac82528be7678165bf12fb5b997852727ecd # HEAD of "stable/queens" as of 01.03.2018
-ceilometer_git_project_group: ceilometer_all
+ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
+ceilometer_git_install_branch: 018ff32fe0200a041297c386eb8b381f1bec0e71 # HEAD of "stable/rocky" as of 11.12.2018
+ceilometer_git_project_group: all
+ceilometer_git_track_branch: stable/rocky
## Cinder service
-cinder_git_repo: https://github.com/openstack/cinder
-cinder_git_install_branch: b61a02de56c1b9cc6d5003b5304ce66ee930f37b # HEAD of "stable/queens" as of 01.03.2018
+cinder_git_repo: https://git.openstack.org/openstack/cinder
+cinder_git_install_branch: 8dbf5d7882a6271514a3075a02cd080e44b709d5 # HEAD of "stable/rocky" as of 11.12.2018
cinder_git_project_group: cinder_all
+cinder_git_track_branch: stable/rocky
## Designate service
-designate_git_repo: https://github.com/openstack/designate
-designate_git_install_branch: 6ca9446bdcf04ba80787348892937cf19eefbf5a # HEAD of "stable/queens" as of 01.03.2018
+designate_git_repo: https://git.openstack.org/openstack/designate
+designate_git_install_branch: af1bb8a36a704bb1a226fe5154f828e152ef23e1 # HEAD of "stable/rocky" as of 11.12.2018
designate_git_project_group: designate_all
+designate_git_track_branch: stable/rocky
## Horizon Designate dashboard plugin
-designate_dashboard_git_repo: https://github.com/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 5570a2dd51ccd3750012bfde9991f0689a02323b # HEAD of "stable/queens" as of 01.03.2018
+designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
+designate_dashboard_git_install_branch: faa67c87ad3cd5563da722f13b3adaee5bfe350f # HEAD of "stable/rocky" as of 11.12.2018
designate_dashboard_git_project_group: horizon_all
+designate_dashboard_git_track_branch: stable/rocky
## Dragonflow service
-dragonflow_git_repo: https://github.com/openstack/dragonflow
-dragonflow_git_install_branch: a2f50a8e8222ae1de04e44a6fd6f7e00d5864fc0 # HEAD of "master" as of 01.03.2018
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
+dragonflow_git_install_branch: 945b1e368c651ffa3655f42df724d9f13a7b6b96 # FROZEN HEAD of "master" as of 17.08.2018
dragonflow_git_project_group: neutron_all
+dragonflow_git_track_branch: None
## Glance service
-glance_git_repo: https://github.com/openstack/glance
-glance_git_install_branch: 968f4ae9ce244d9372cb3e8f45acea9d557f317d # HEAD of "stable/queens" as of 01.03.2018
+glance_git_repo: https://git.openstack.org/openstack/glance
+glance_git_install_branch: 4982c24f0aeb64f9d20159e543a90e31fc325dce # HEAD of "stable/rocky" as of 11.12.2018
glance_git_project_group: glance_all
+glance_git_track_branch: stable/rocky
## Heat service
-heat_git_repo: https://github.com/openstack/heat
-heat_git_install_branch: 43f122be13736f15fbc38cb6e6ce29545f784c86 # HEAD of "stable/queens" as of 01.03.2018
+heat_git_repo: https://git.openstack.org/openstack/heat
+heat_git_install_branch: 98eea44d5d91b74e1ab28c052e4fbc4b533d5f83 # HEAD of "stable/rocky" as of 11.12.2018
heat_git_project_group: heat_all
+heat_git_track_branch: stable/rocky
+## Horizon Heat dashboard plugin
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+heat_dashboard_git_repo: https://git.openstack.org/openstack/heat-dashboard
+heat_dashboard_git_install_branch: bc7f5068bbb6f7974eaffa2d865a859ff0fd0069 # FROZEN HEAD of "master" as of 17.08.2018
+heat_dashboard_git_project_group: horizon_all
+heat_dashboard_git_track_branch: None
## Horizon service
-horizon_git_repo: https://github.com/openstack/horizon
-horizon_git_install_branch: d017fde2a0fdc48e4687f0f5ae0362ba6c5ad66a # HEAD of "stable/queens" as of 01.03.2018
+horizon_git_repo: https://git.openstack.org/openstack/horizon
+horizon_git_install_branch: 0ccfce882749998f3a6a7f9bfc6fa74ea346ca53 # HEAD of "stable/rocky" as of 11.12.2018
horizon_git_project_group: horizon_all
+horizon_git_track_branch: stable/rocky
## Horizon Ironic dashboard plugin
-ironic_dashboard_git_repo: https://github.com/openstack/ironic-ui
-ironic_dashboard_git_install_branch: 1c4cbd2b90270f65d04b91ddc5f86efa35bbc622 # HEAD of "stable/queens" as of 01.03.2018
+ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
+ironic_dashboard_git_install_branch: c700f3a613f3d78875caf7588e7bdf42a5db83cb # HEAD of "stable/rocky" as of 11.12.2018
ironic_dashboard_git_project_group: horizon_all
+ironic_dashboard_git_track_branch: stable/rocky
## Horizon Magnum dashboard plugin
-magnum_dashboard_git_repo: https://github.com/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 051408e5b86615f74e5fa4cd2e4284b6d1e6a3f2 # HEAD of "stable/queens" as of 01.03.2018
+magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
+magnum_dashboard_git_install_branch: 2e9cb253eaee45a57f07369e432369dbff8fc173 # HEAD of "stable/rocky" as of 11.12.2018
magnum_dashboard_git_project_group: horizon_all
+magnum_dashboard_git_track_branch: stable/rocky
## Horizon LBaaS dashboard plugin
-neutron_lbaas_dashboard_git_repo: https://github.com/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: a42434a21bf95566472dc6c8ce078ca84432423d # HEAD of "stable/queens" as of 01.03.2018
+neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
+neutron_lbaas_dashboard_git_install_branch: 84fd20a474e8165ddbf5cf4bd14b7eb7da63ed41 # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_dashboard_git_project_group: horizon_all
+neutron_lbaas_dashboard_git_track_branch: stable/rocky
## Horizon FWaaS dashboard plugin
-neutron_fwaas_dashboard_git_repo: https://github.com//openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: a710e7c4f48afe0261ef25efc44088346124de1c # HEAD of "stable/queens" as of 01.03.2018
+neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
+neutron_fwaas_dashboard_git_install_branch: 4adf5599211ef90696da94b2fee3aac730f3b7bc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_dashboard_git_project_group: horizon_all
+neutron_fwaas_dashboard_git_track_branch: stable/rocky
## Horizon Sahara dashboard plugin
-sahara_dashboard_git_repo: https://github.com/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 707059ff4e372ae66b21b82050a9e16295176782 # HEAD of "stable/queens" as of 01.03.2018
+sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
+sahara_dashboard_git_install_branch: 6e3f7538ce7779612d8e82b069597c06c2225a77 # HEAD of "stable/rocky" as of 11.12.2018
sahara_dashboard_git_project_group: horizon_all
+sahara_dashboard_git_track_branch: stable/rocky
## Keystone service
-keystone_git_repo: https://github.com/openstack/keystone
-keystone_git_install_branch: c06d74fcf4cf5338db6572265c609036f6817466 # HEAD of "stable/queens" as of 01.03.2018
+keystone_git_repo: https://git.openstack.org/openstack/keystone
+keystone_git_install_branch: 295ccda8190b39a505c397d2f4d9e4896dc538cf # HEAD of "stable/rocky" as of 11.12.2018
keystone_git_project_group: keystone_all
+keystone_git_track_branch: stable/rocky
## Neutron service
-neutron_git_repo: https://github.com/openstack/neutron
-neutron_git_install_branch: abb60c6175af435964028ce7c97bb4803aeab004 # HEAD of "stable/queens" as of 01.03.2018
+neutron_git_repo: https://git.openstack.org/openstack/neutron
+neutron_git_install_branch: ae2ef681403d1f103170ea70df1010f006244752 # HEAD of "stable/rocky" as of 11.12.2018
neutron_git_project_group: neutron_all
+neutron_git_track_branch: stable/rocky
-neutron_lbaas_git_repo: https://github.com/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: f6b8b5b0ad2c19ddf6a7c102c706cbfdb0b2bf05 # HEAD of "stable/queens" as of 01.03.2018
+neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
+neutron_lbaas_git_install_branch: 1353bad713fd97418a9984016da49df8cfa8825b # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_git_project_group: neutron_all
+neutron_lbaas_git_track_branch: stable/rocky
-neutron_vpnaas_git_repo: https://github.com/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 8b01dcabb456d2d0bdf905b23f0bdb3ff2530f4d # HEAD of "stable/queens" as of 01.03.2018
+neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
+neutron_vpnaas_git_install_branch: 0876f4dfe7e2f57305110e035efa753bfb711a3f # HEAD of "stable/rocky" as of 11.12.2018
neutron_vpnaas_git_project_group: neutron_all
+neutron_vpnaas_git_track_branch: stable/rocky
-neutron_fwaas_git_repo: https://github.com/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 43f56b794b19bb0f362e1d0a1449ee24bb16156e # HEAD of "stable/queens" as of 01.03.2018
+neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
+neutron_fwaas_git_install_branch: 5ece265b65247ee81a9335d5a685fa9f0a68b0fc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_git_project_group: neutron_all
+neutron_fwaas_git_track_branch: stable/rocky
-neutron_dynamic_routing_git_repo: https://github.com/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 386b5e4c33ab765eb7a72e9a9d4ffc1524d7d0c8 # HEAD of "stable/queens" as of 01.03.2018
+neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
+neutron_dynamic_routing_git_install_branch: ae3a01ca1fd6270fc27b3c6bae11afc0f17563d5 # HEAD of "stable/rocky" as of 11.12.2018
neutron_dynamic_routing_git_project_group: neutron_all
+neutron_dynamic_routing_git_track_branch: stable/rocky
-networking_calico_git_repo: https://github.com/openstack/networking-calico
-networking_calico_git_install_branch: 10626324b597585cc781197133d4b12f890b8081 # HEAD of "master" as of 01.03.2018
+# Networking Calico is following master
+networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
+networking_calico_git_install_branch: 79c7e00360ddb5fd3c38e60e5bbb3399928d9172 # HEAD of "master" as of 11.12.2018
networking_calico_git_project_group: neutron_all
+networking_calico_git_track_branch: stable/rocky
-networking_odl_git_repo: https://github.com/openstack/networking-odl
-networking_odl_git_install_branch: 8733cf68cbc827a4dd458e3328b5fd2c23a07bcf # HEAD of "stable/queens" as of 01.03.2018
+networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
+networking_odl_git_install_branch: 1cef1f0939a405eea4cb87e712794e8fa26b5166 # HEAD of "stable/rocky" as of 11.12.2018
networking_odl_git_project_group: neutron_all
+networking_odl_git_track_branch: stable/rocky
-networking_bgpvpn_git_repo: https://github.com/openstack/networking-bgpvpn
-networking_bgpvpn_git_install_branch: a15c091d8a616c1fd1d3741f32c5d135b5db594f # HEAD of "stable/queens" as of 01.03.2018
+networking_ovn_git_repo: https://git.openstack.org/openstack/networking-ovn
+networking_ovn_git_install_branch: e077aa93b1dc244b59864236d7c673f852e4e3ba # HEAD of "stable/rocky" as of 11.12.2018
+networking_ovn_git_project_group: neutron_all
+
+# BGPVPN is frozen until further notice due to
+# https://github.com/openstack/networking-bgpvpn/commit/e9a0ea199b47f76f69545e04bdb4db44869c388b#diff-b4ef698db8ca845e5845c4618278f29a
+networking_bgpvpn_git_repo: https://git.openstack.org/openstack/networking-bgpvpn
+networking_bgpvpn_git_install_branch: 3b93ddacd390d92fb144e5660324d4da064ad9a4 # FROZEN HEAD of "stable/rocky" as of 31.03.2018
networking_bgpvpn_git_project_group: neutron_all
+networking_bgpvpn_git_track_branch: None
-networking_sfc_git_repo: https://github.com/openstack/networking-sfc
-networking_sfc_git_install_branch: cbb68837a38428766ed4d22c5adfe3b2bc6c5f99 # HEAD of "stable/queens" as of 01.03.2018
+networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
+networking_sfc_git_install_branch: f0eddef3d53bbad417038f9d32b196ace2ebd0b2 # HEAD of "stable/rocky" as of 11.12.2018
networking_sfc_git_project_group: neutron_all
+networking_sfc_git_track_branch: stable/rocky
## Nova service
-nova_git_repo: https://github.com/openstack/nova
-nova_git_install_branch: 5039511840bd64151f3111d9c8d7d8a01344193b # HEAD of "stable/queens" as of 01.03.2018
+nova_git_repo: https://git.openstack.org/openstack/nova
+nova_git_install_branch: 8066142a1e381536291232250b3237e5c01ed1f4 # HEAD of "stable/rocky" as of 11.12.2018
nova_git_project_group: nova_all
+nova_git_track_branch: stable/rocky
## PowerVM Virt Driver
-nova_powervm_git_repo: https://github.com/openstack/nova-powervm
-nova_powervm_git_install_branch: 2999bff2d0e651cc091757d0501f82af2691daf6 # HEAD of "stable/queens" as of 01.03.2018
+nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
+nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 11.12.2018
nova_powervm_git_project_group: nova_all
+nova_powervm_git_track_branch: stable/rocky
## LXD Virt Driver
-nova_lxd_git_repo: https://github.com/openstack/nova-lxd
-nova_lxd_git_install_branch: 01b6a8e07558678505e3fa2b6f9ea2d10f821642 # HEAD of "stable/queens" as of 01.03.2018
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
+nova_lxd_git_install_branch: bc8d540c95b3209321658000fd74b0e5065a7ee2 # FROZEN HEAD of "master" as of 17.08.2018
nova_lxd_git_project_group: nova_all
+nova_lxd_git_track_branch: None
## Sahara service
-sahara_git_repo: https://github.com/openstack/sahara
-sahara_git_install_branch: abcc07a70f2da288548aa96abb16c8380e46dcf9 # HEAD of "stable/queens" as of 01.03.2018
+sahara_git_repo: https://git.openstack.org/openstack/sahara
+sahara_git_install_branch: ddb518fd81b82308bdd01e58ebf6ed7a48c544ae # HEAD of "stable/rocky" as of 11.12.2018
sahara_git_project_group: sahara_all
+sahara_git_track_branch: stable/rocky
## Swift service
-swift_git_repo: https://github.com/openstack/swift
-swift_git_install_branch: bd4b3c5dc9256fc0d6cca8f925705740c2395efd # HEAD of "stable/queens" as of 01.03.2018
+swift_git_repo: https://git.openstack.org/openstack/swift
+swift_git_install_branch: 7fdf66ab70da705774a4ae9c328a3e762bb2f3b4 # HEAD of "stable/rocky" as of 11.12.2018
swift_git_project_group: swift_all
+swift_git_track_branch: stable/rocky
## Swift3 middleware
-swift_swift3_git_repo: https://github.com/openstack/swift3
-swift_swift3_git_install_branch: 1c117c96dda8113c3398c16e68b61efef397de74 # HEAD of "master" as of 01.03.2018
+# please remove this when swift role is configured without this middleware (and uses swift code only)
+swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
+swift_swift3_git_install_branch: 90db5d1510b2a770387961e7bf0fbeae8101ba45 # FROZEN HEAD of "master" as of 17.08.2018
swift_swift3_git_project_group: swift_all
+swift_swift3_git_track_branch: None
## Ironic service
-ironic_git_repo: https://github.com/openstack/ironic
-ironic_git_install_branch: 4c3a611ac3803a17dd584eb319f0bb40d6ee5ba3 # HEAD of "stable/queens" as of 01.03.2018
+ironic_git_repo: https://git.openstack.org/openstack/ironic
+ironic_git_install_branch: 6a6c0d882fe8ac299d18df75d2bbd111b170ad48 # HEAD of "stable/rocky" as of 11.12.2018
ironic_git_project_group: ironic_all
+ironic_git_track_branch: stable/rocky
## Magnum service
-magnum_git_repo: https://github.com/openstack/magnum
-magnum_git_install_branch: 0b3133280fd7dbde65c8581b7be03cd1e3686bc4 # HEAD of "stable/queens" as of 01.03.2018
+magnum_git_repo: https://git.openstack.org/openstack/magnum
+magnum_git_install_branch: 765e207a5d3a45b8523cb2c34e5d74541da481e6 # HEAD of "stable/rocky" as of 11.12.2018
magnum_git_project_group: magnum_all
+magnum_git_track_branch: stable/rocky
## Trove service
-trove_git_repo: https://github.com/openstack/trove
-trove_git_install_branch: 43d2b96f86a5365d69c885738ea1c3642f4e5aa1 # HEAD of "stable/queens" as of 01.03.2018
+trove_git_repo: https://git.openstack.org/openstack/trove
+trove_git_install_branch: 2953676e81fc22099e72ea7d0f27002a59aa779f # HEAD of "stable/rocky" as of 11.12.2018
trove_git_project_group: trove_all
+trove_git_track_branch: stable/rocky
## Horizon Trove dashboard plugin
-trove_dashboard_git_repo: https://github.com/openstack/trove-dashboard
-trove_dashboard_git_install_branch: f7cf9d5bbe8b04fc9ea95e79b9bec21842d324f9 # HEAD of "stable/queens" as of 01.03.2018
+trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
+trove_dashboard_git_install_branch: c6482d8f7ebeb980a99cc89593245be381675984 # HEAD of "stable/rocky" as of 11.12.2018
trove_dashboard_git_project_group: horizon_all
+trove_dashboard_git_track_branch: stable/rocky
## Octavia service
-octavia_git_repo: https://github.com/openstack/octavia
-octavia_git_install_branch: 9f379aef7c0665d4183ac549ed7a0dbc0e5d3aca # HEAD of "stable/queens" as of 01.03.2018
+octavia_git_repo: https://git.openstack.org/openstack/octavia
+octavia_git_install_branch: ec4c88e23ebeb786491158682f9a7dd42928f97a # HEAD of "stable/rocky" as of 12.14.2018
octavia_git_project_group: octavia_all
-
-
-## Molteniron service
-molteniron_git_repo: https://github.com/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 01.03.2018
-molteniron_git_project_group: molteniron_all
+octavia_git_track_branch: stable/rocky
## Tacker service
-tacker_git_repo: https://github.com/openstack/tacker
-tacker_git_install_branch: 6932f5642598d53d93f94514eaed55cc93ea19d7 # HEAD of "stable/queens" as of 01.03.2018
+tacker_git_repo: https://git.openstack.org/openstack/tacker
+tacker_git_install_branch: 279b1a2840b9f28377476e0d11ca83ce2e88a0b2 # HEAD of "stable/rocky" as of 11.12.2018
tacker_git_project_group: tacker_all
+tacker_git_track_branch: stable/rocky
+
+## Congress service
+congress_git_repo: https://git.openstack.org/openstack/congress
+congress_git_install_branch: 6862ac9f356a5403e1e37050e12f032f661bae96 # HEAD of "stable/rocky" as of 11.12.2018
+congress_git_project_group: congress_all
+congress_git_track_branch: stable/rocky
+
+## Horizon Octavia dashboard plugin
+octavia_dashboard_git_repo: https://git.openstack.org/openstack/octavia-dashboard
+octavia_dashboard_git_install_branch: 80766f9390492c24de38911d7240c5490c7ef562 # HEAD of "stable/rocky" as of 11.12.2018
+octavia_dashboard_git_project_group: horizon_all
+octavia_dashboard_git_track_branch: stable/rocky
diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml
index 544a9999..904215b7 100644
--- a/xci/installer/osa/files/setup-openstack.yml
+++ b/xci/installer/osa/files/setup-openstack.yml
@@ -19,11 +19,13 @@
- include: os-nova-install.yml
- include: os-neutron-install.yml
- include: os-heat-install.yml
+- include: os-ceilometer-install.yml
- include: os-horizon-install.yml
when: not core_openstack | default(False)
- include: os-swift-install.yml
- include: os-ironic-install.yml
when: not core_openstack | default(False)
+- include: os-barbican-install.yml
- include: os-tacker-install.yml
- include: os-tempest-install.yml
when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml b/xci/installer/osa/files/user_variables_xci.yml
index 25cd6839..1d69f532 100644
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml
+++ b/xci/installer/osa/files/user_variables_xci.yml
@@ -1,5 +1,5 @@
---
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright 2018, SUSE LINUX GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Enable clustering for opendaylight
-cluster: true \ No newline at end of file
+debug: False
+install_method: source
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
index 96bd9e5e..07ad683b 100644
--- a/xci/installer/osa/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -26,7 +26,6 @@
file: "{{ item }}"
with_items:
- "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ xci_flavor_ansible_file_path }}/flavor-vars.yml"
- name: Set facts for remote deployment
set_fact:
remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
@@ -36,19 +35,36 @@
roles:
- role: bootstrap-host
configure_network: xci_flavor != 'aio'
- - role: peru.proxy_settings
+ - role: ruzickap.proxy_settings
proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
+ archive: yes
src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
- name: Re-create OpenStack-Ansible /etc directory
file:
@@ -74,13 +90,13 @@
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/env.d", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/conf.d", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/user_secrets.yml", dest: "{{ openstack_osa_etc_path }}" }
- - { src: "{{ remote_xci_flavor_files }}/inventory", dest: "{{ remote_xci_playbooks }}" }
- { src: "{{ remote_xci_flavor_files }}/openstack_user_config.yml", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ remote_xci_flavor_files }}/user_variables.yml", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ remote_xci_flavor_files }}/ceph.yml", dest: "{{ openstack_osa_etc_path }}/conf.d/", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_flavor_files }}/user_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_ceph.yml", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_flavor_files }}/user_variables_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_ceph.yml", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/cinder.yml", dest: "{{ openstack_osa_etc_path }}/env.d" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_xci.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_xci.yml" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_proxy.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_proxy.yml", cond: "{{ lookup('env', 'http_proxy') != '' }}" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml", dest: "{{ openstack_osa_path }}/playbooks" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/ansible-role-requirements.yml", dest: "{{openstack_osa_path}}/ansible-role-requirements.yml", cond: "{{ openstack_osa_version != 'master' }}" }
@@ -108,7 +124,15 @@
when:
- lookup('env','http_proxy') != ""
- - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
- name: bootstrap ansible on opnfv host
command: "/bin/bash ./scripts/bootstrap-ansible.sh"
@@ -128,6 +152,7 @@
- name: Install ARA callback plugin in OSA virtualenv
pip:
name: ara
+ version: 0.16.4
state: present
extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
executable: '/opt/ansible-runtime/bin/pip'
@@ -151,11 +176,6 @@
chdir: "{{openstack_osa_path}}/scripts"
changed_when: True
- - name: Configure SSL certificates
- include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
- vars:
- extra_args: "-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt"
-
- name: fetch xci environment
copy:
src: "{{ xci_path }}/.cache/xci.env"
@@ -169,12 +189,6 @@
include_role:
name: "openstack-ansible-openstack_openrc"
- - name: add extra insecure flag to generated openrc
- blockinfile:
- dest: "{{ ansible_env.HOME }}/openrc"
- block: |
- export OS_INSECURE=true
-
- name: fetch generated openrc
fetch:
src: "{{ ansible_env.HOME }}/openrc"
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
index cb817cfc..dfa17696 100644
--- a/xci/installer/osa/playbooks/configure-targethosts.yml
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -17,9 +17,8 @@
file: "{{ item }}"
with_items:
- "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ xci_flavor_ansible_file_path }}/flavor-vars.yml"
roles:
- - role: peru.proxy_settings
+ - role: ruzickap.proxy_settings
proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
diff --git a/xci/installer/osa/playbooks/post-deployment.yml b/xci/installer/osa/playbooks/post-deployment.yml
new file mode 100644
index 00000000..36c052c9
--- /dev/null
+++ b/xci/installer/osa/playbooks/post-deployment.yml
@@ -0,0 +1,66 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/{{ xci_flavor }}/user_variables.yml"
+
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+
+ tasks:
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/README b/xci/installer/osh/README
new file mode 100644
index 00000000..902ac10e
--- /dev/null
+++ b/xci/installer/osh/README
@@ -0,0 +1,50 @@
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. mini: One deployment host, 1 master host and 1 node host.
+ 2. noha: One deployment host, 1 master host and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:
+ - calico
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run openstack-helm
+
+ export INSTALLER_TYPE=osh
+ export DEPLOY_SCENARIO=k8-calico-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/osh/deploy.sh b/xci/installer/osh/deploy.sh
new file mode 100755
index 00000000..e56845b8
--- /dev/null
+++ b/xci/installer/osh/deploy.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+OSH_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $OSH_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
+ cp -f ~/admin.conf ~/.kube/config;"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "Info: known_hosts entry for opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Continue with running the openstack-helm installation"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -v -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ install-openstack-helm.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Openstack-helm installation execution done"
+echo "-----------------------------------------------------------------------"
+echo
+
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/osh/files/ha/inventory/group_vars/all.yml b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/osh/playbooks/configure-installer.yml b/xci/installer/osh/playbooks/configure-installer.yml
new file mode 100644
index 00000000..383f55fc
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-installer.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2019 Ericsson Software Technology and Others
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/osh/playbooks/configure-kubenet.yml b/xci/installer/osh/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/osh/playbooks/configure-opnfvhost.yml b/xci/installer/osh/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osh/playbooks/configure-targethosts.yml b/xci/installer/osh/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/osh/playbooks/group_vars/all.yml b/xci/installer/osh/playbooks/group_vars/all.yml
new file mode 100644
index 00000000..7453bdab
--- /dev/null
+++ b/xci/installer/osh/playbooks/group_vars/all.yml
@@ -0,0 +1,55 @@
+---
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/osh/playbooks/install-openstack-helm.yml b/xci/installer/osh/playbooks/install-openstack-helm.yml
new file mode 100644
index 00000000..a16572a5
--- /dev/null
+++ b/xci/installer/osh/playbooks/install-openstack-helm.yml
@@ -0,0 +1,24 @@
+---
+- hosts: kube-node
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ roles:
+ - role: prepare-kube-nodes-osh
+
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ roles:
+ - role: prepare-opnfvhost-osh
+ - role: prepare-osh
+ - role: install-osh-mini
+ when: xci_flavor == 'mini'
+ environment:
+ - CONTAINER_DISTRO_NAME: "{{ container_distro_name }}"
+ - CONTAINER_DISTRO_VERSION: "{{ container_distro_version }}"
+ - OPENSTACK_RELEASE: "{{ openstack_osh_version }}"
+ - role: install-osh-noha
+ when: xci_flavor == 'noha'
diff --git a/xci/installer/osh/playbooks/post-deployment.yml b/xci/installer/osh/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/osh/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
new file mode 100644
index 00000000..e5df54fa
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
@@ -0,0 +1,109 @@
+---
+
+- name: Setup Clients
+ command: ./tools/deployment/common/setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/component/common/ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/component/common/mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/component/common/memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/component/common/rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Update nfs-provisioner helm-chart
+ shell: helm dependency update nfs-provisioner
+ args:
+ chdir: /root/repos/openstack-helm-infra
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+- name: Deploy nfs-provisioner
+ command: ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/component/keystone/keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/component/heat/heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/component/glance/glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/component/compute-kit/openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/component/compute-kit/libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/component/compute-kit/compute-kit.sh
+ regexp: 'tunnel: docker0'
+ line: ' tunnel: br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/component/compute-kit/compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
new file mode 100644
index 00000000..03c02a83
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright 2019, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cacheable: yes
+container_distro_name: "{{ (osh_distro=='opensuse') | ternary('opensuse', 'ubuntu') }}"
+container_distro_version: "{{ (osh_distro=='opensuse') | ternary('15', 'xenial') }}"
diff --git a/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
new file mode 100644
index 00000000..befdcfce
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Setup Clients
+ command: ./tools/deployment/multinode/010-setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/multinode/020-ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Ceph
+ command: ./tools/deployment/multinode/030-ceph.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Activate the openstack namespace to be able to use Ceph
+ command: ./tools/deployment/multinode/040-ceph-ns-activate.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/multinode/050-mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/multinode/060-rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/multinode/070-memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/multinode/080-keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Horizon
+ command: ./tools/deployment/multinode/085-horizon.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Rados Gateway for object store
+ command: ./tools/deployment/multinode/090-ceph-radosgateway.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/multinode/100-glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Cinder
+ command: ./tools/deployment/multinode/110-cinder.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/multinode/120-openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/multinode/130-libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/multinode/140-compute-kit.sh
+ regexp: 'NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"'
+ line: 'NETWORK_TUNNEL_DEV=br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/multinode/140-compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/multinode/150-heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Barbican
+ command: ./tools/deployment/multinode/160-barbican.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
new file mode 100644
index 00000000..ff0aff60
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Install packages in kubernetes nodes
+ package:
+ name: "{{ packages }}"
+ state: present
+ changed_when: false
+ vars:
+ packages:
+ - ceph-common
+ - rbd-nbd
+ - apparmor
+ - nfs-common
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
new file mode 100644
index 00000000..c3988d6f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Helm Server
+After=network.target
+
+[Service]
+User=root
+Restart=always
+ExecStart=/usr/bin/helm serve
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
new file mode 100644
index 00000000..72ae821f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Set kubernetes service account permissions
+ command: "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default"
+ changed_when: false
+
+- name: Set kubernetes node labels
+ command: "kubectl label nodes {{ item }} {{ node_labels[item]|join(' ') }}"
+ changed_when: false
+ with_items: "{{ groups['kube-node'] }}"
+
+- name: Create directories
+ file:
+ path: /root/{{ item }}
+ state: directory
+ with_items:
+ ['repos','tmp', '.helm/repository/local']
+
+- name: Rename bifrost clouds file to get it out of precedence
+ command: "mv .config/openstack/clouds.yaml .config/openstack/clouds.yaml.bifrost"
+ changed_when: false
+
+- name: Clone openstack-helm
+ git:
+ repo: "{{ osh_git_url }}"
+ dest: /root/repos/openstack-helm
+ version: "{{ osh_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Fix dns nameserver for openstack installation (mini flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/gate/files/heat-public-net-deployment.yaml
+ regexp: '10\.96\.0\.10'
+ line: " - 10.233.0.3"
+
+- name: Fix dns nameserver for openstack installation (noha flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tempest/values.yaml
+ regexp: 'dns_servers'
+ line: " dns_servers: 10.233.0.3"
+
+- name: Clone openstack-helm-infra
+ git:
+ repo: "{{ osh_infra_git_url }}"
+ dest: /root/repos/openstack-helm-infra
+ version: "{{ osh_infra_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Get helm
+ get_url:
+ url: "{{ osh_helm_binary_url }}/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz"
+ dest: tmp
+
+- name: Uncompress helm package
+ command: "tar zxvf tmp/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz --strip-components=1 -C tmp/"
+ changed_when: false
+ tags:
+ - skip_ansible_lint
+
+- name: Put helm in system binaries
+ copy:
+ src: tmp/helm
+ dest: /usr/bin/helm
+ remote_src: yes
+ mode: 0755
+
+- name: Create helm-serve service file
+ copy:
+ src: helm-serve.service
+ dest: "/etc/systemd/system/helm-serve.service"
+ mode: 0640
+
+- name: Start helm-serve service
+ service:
+ name: helm-serve
+ state: started
+ enabled: yes
+
+- name: Wait for helm-serve service to start
+ wait_for:
+ port: 8879
+ host: 127.0.0.1
+
+- name: Install pyhelm
+ pip:
+ name: pyhelm
+
+- name: Init helm
+ command: "helm init"
+ changed_when: false
+
+- name: Remove stable (external) service from helm
+ command: "helm repo remove stable"
+ changed_when: false
+
+- name: Add local repositories service to helm
+ command: "helm repo add local http://localhost:8879/charts"
+ changed_when: false
+
+- name: Make charts from infra
+ make:
+ chdir: /root/repos/openstack-helm-infra
+ target: "{{ item }}"
+ with_items:
+ - helm-toolkit
+ - ingress
+ - mariadb
+ - rabbitmq
+ - memcached
+ - ceph-mon
+ - ceph-osd
+ - ceph-client
+ - ceph-provisioners
+ - ceph-rgw
+ - openvswitch
+ - libvirt
+
+- name: Install packages
+ package:
+ name: "{{ required_packages }}"
+ state: present
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
new file mode 100644
index 00000000..979c3329
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
@@ -0,0 +1,31 @@
+---
+required_packages:
+- patch
+- ipcalc
+- jq
+- nmap
+- bc
+
+node_labels:
+ node1:
+ - openstack-control-plane=enabled
+ - openstack-compute-node={{ (xci_flavor == 'mini') | ternary('enabled', 'disable') }}
+ - openstack-helm-node-class=primary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
+ node2:
+ - openstack-control-plane={{ (xci_flavor == 'noha') | ternary('disable', 'enabled') }}
+ - openstack-compute-node=enabled
+ - openstack-helm-node-class=secondary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
new file mode 100644
index 00000000..453a815c
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+- name: Write new resolv.conf file
+ template:
+ src: resolv.conf.j2
+ dest: /etc/resolv.conf
+
+- name: Make resolv.conf immutable
+ shell: "chattr +i /etc/resolv.conf"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+#TODO Fetch the value from a file generated by k8s deployer
+- name: Get kube service addresses
+ shell: "grep -r 'kube_service_addresses:' /root/releng-xci/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml | awk '{print $2}'"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ register: kube_service_addresses
+ tags:
+ - skip_ansible_lint
+
+#This rule allows openstack client in OPNFV VM to reach openstack
+- name: Update routing table with kube service addresses
+ shell: "ip route add {{ kube_service_addresses.stdout }} via 192.168.122.3 dev br-vlan onlink"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
new file mode 100644
index 00000000..ae706e02
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+{{ dns_var }}
+{% for nameserver in external_dns_nameservers %}
+nameserver {{ nameserver }}
+{% endfor %}
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
new file mode 100644
index 00000000..4d6f9cbb
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
@@ -0,0 +1,7 @@
+---
+kube_dns_ip: "10.233.0.3"
+external_dns_nameservers:
+- '{{kube_dns_ip}}'
+- '192.168.122.1'
+dns_var: "search svc.cluster.local cluster.local"
+
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index 5f82539d..98abf528 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -11,11 +11,13 @@
# OPNFV scenarios participating in XCI must create their own entry in this file so
# XCI can make use of them.
#
-- scenario: os-odl-sfc
+
+# OpenStack based scenarios
+- scenario: os-nosdn-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/sfc
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
version: master
- role: scenarios/os-odl-sfc/role/os-odl-sfc
+ role: scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
installers:
- installer: osa
flavors:
@@ -25,12 +27,33 @@
distros:
- opensuse
- ubuntu
+ - centos
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - opensuse
+ - ubuntu-bionic
-- scenario: os-nosdn-nofeature
+- scenario: os-nosdn-osm
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
version: master
- role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
+ role: scenarios/os-nosdn-osm/role/os-nosdn-osm
+ installers:
+ - installer: osa
+ flavors:
+ - mini
+ distros:
+ - ubuntu
+
+- scenario: os-odl-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/os-odl-nofeature/role/os-odl-nofeature
installers:
- installer: osa
flavors:
@@ -40,13 +63,12 @@
distros:
- opensuse
- ubuntu
- - centos
-- scenario: os-odl-nofeature
+- scenario: os-odl-sfc
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/sfc
version: master
- role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature
+ role: scenarios/os-odl-sfc/role/os-odl-sfc
installers:
- installer: osa
flavors:
@@ -57,22 +79,19 @@
- opensuse
- ubuntu
-- scenario: k8-nosdn-nofeature
+- scenario: os-odl-sfc_osm
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/sfc
version: master
- role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
+ role: scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm
installers:
- - installer: kubespray
+ - installer: osa
flavors:
- - aio
- ha
- mini
- noha
distros:
- ubuntu
- - centos
- - opensuse
- scenario: os-odl-bgpvpn
scm: git
@@ -89,15 +108,32 @@
- ubuntu
- centos
+
+# Kubernetes based scenarios
+- scenario: k8-nosdn-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
- scenario: k8-canal-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
version: master
- role: xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature
+ role: scenarios/k8-canal-nofeature/role/k8-canal-nofeature
installers:
- installer: kubespray
flavors:
- - aio
- ha
- mini
- noha
@@ -108,13 +144,12 @@
- scenario: k8-calico-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
version: master
- role: xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature
+ role: scenarios/k8-calico-nofeature/role/k8-calico-nofeature
installers:
- installer: kubespray
flavors:
- - aio
- ha
- mini
- noha
@@ -122,16 +157,39 @@
- ubuntu
- centos
- opensuse
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - opensuse
+ - ubuntu-bionic
- scenario: k8-flannel-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - noha
+ - mini
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
+- scenario: k8-contiv-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
version: master
- role: xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature
+ role: scenarios/k8-contiv-nofeature/role/k8-contiv-nofeature
installers:
- installer: kubespray
flavors:
- - aio
- ha
- noha
- mini
@@ -139,3 +197,19 @@
- ubuntu
- centos
- opensuse
+
+- scenario: k8-nosdn-istio
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-nosdn-istio/role/k8-nosdn-istio
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
diff --git a/xci/playbooks/bootstrap-scenarios.yml b/xci/playbooks/bootstrap-scenarios.yml
deleted file mode 100644
index d1331252..00000000
--- a/xci/playbooks/bootstrap-scenarios.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-#
-# This file is aimed to be used by scenarios to plug into the XCI.
-# Ideally, all they need to do at this point is to include their
-# role using a statement like the following one
-#
-# - name: Include foobar role
-# include_role:
-# name: "foobar"
-# when: deploy_scenario == "foobar"
-
-- name: Prepare everything to run the os-nosdn-nofeature scenario
- include_role:
- name: "os-nosdn-nofeature"
- when: deploy_scenario == 'os-nosdn-nofeature'
-- name: Prepare everything to run the os-odl-nofeature scenario
- include_role:
- name: "os-odl-nofeature"
- when: deploy_scenario == 'os-odl-nofeature'
-- name: Prepare everything to run the os-odl-sfc scenario
- include_role:
- name: "os-odl-sfc"
- when: deploy_scenario == 'os-odl-sfc'
-- name: Prepare everything to run the os-odl-bgpvpn scenario
- include_role:
- name: "os-odl-bgpvpn"
- when: deploy_scenario == 'os-odl-bgpvpn'
-- name: Prepare everything to run the k8-canal-nofeature scenario
- include_role:
- name: "k8-canal-nofeature"
- when: deploy_scenario == 'k8-canal-nofeature'
-- name: Prepare everything to run the k8-canal-nofeature scenario
- include_role:
- name: "k8-calico-nofeature"
- when: deploy_scenario == 'k8-calico-nofeature'
-- name: Prepare everything to run the k8-flannel-nofeature scenario
- include_role:
- name: "k8-flannel-nofeature"
- when: deploy_scenario == 'k8-flannel-nofeature'
-- name: Prepare everything to run the k8-nosdn-nofeature scenario
- include_role:
- name: "k8-nosdn-nofeature"
- when: deploy_scenario == 'k8-nosdn-nofeature'
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
index 5f091c92..7aab18f3 100644
--- a/xci/playbooks/configure-localhost.yml
+++ b/xci/playbooks/configure-localhost.yml
@@ -25,7 +25,6 @@
state: absent
recurse: no
with_items:
- - "{{ xci_cache }}/repos"
- "{{ log_path }} "
- "{{ opnfv_ssh_host_keys_path }}"
@@ -47,21 +46,21 @@
repo: "{{ kubespray_git_url }}"
dest: "{{ xci_cache }}/repos/kubespray"
version: "{{ kubespray_version }}"
- when: installer_type == "kubespray"
+ when: installer_type in ["kubespray", "osh"]
- role: clone-repository
project: "openstack/openstack-ansible-haproxy_server"
repo: "{{ openstack_osa_haproxy_git_url }}"
dest: roles/haproxy_server
version: "{{ haproxy_version }}"
when:
- - installer_type == "kubespray"
+ - installer_type == "kubespray" or installer_type == "osh"
- role: clone-repository
project: "ansible-keepalived"
repo: "{{ keepalived_git_url }}"
dest: roles/keepalived
version: "{{ keepalived_version }}"
when:
- - installer_type == "kubespray"
+ - installer_type == "kubespray" or installer_type == "osh"
tasks:
- name: create log directory {{log_path}}
@@ -100,3 +99,18 @@
args:
executable: /bin/bash
creates: "{{ xci_path }}/.cache/xci.env"
+
+ #TODO: Create an Ansible variable for
+ # kube_service_addresses(10.233.0.0/18)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 10.233.0.0/18 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
+
+ #Provide access to the external network (for tests)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 172.24.4.0/24 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py
new file mode 100755
index 00000000..ed63141c
--- /dev/null
+++ b/xci/playbooks/dynamic_inventory.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# coding utf-8
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Based on https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/cobbler.py
+
+import argparse
+import glob
+import os
+import sys
+import yaml
+import json
+
+
+class XCIInventory(object):
+ """
+
+ Generates the ansible inventory based on the idf and pdf files provided
+ when executing the deployment script
+
+ """
+ def __init__(self):
+ super(XCIInventory, self).__init__()
+ self.inventory = {}
+ self.inventory['all'] = {}
+ self.inventory['all']['hosts'] = []
+ self.inventory['all']['vars'] = {}
+ self.inventory['_meta'] = {}
+ self.inventory['_meta']['hostvars'] = {}
+ self.installer = os.environ.get('INSTALLER_TYPE', 'osa')
+ self.flavor = os.environ.get('XCI_FLAVOR', 'mini')
+ self.flavor_files = os.path.dirname(os.path.realpath(__file__)) + "/../installer/" + self.installer + "/files/" + self.flavor
+
+ # Static information for opnfv host for now
+ self.add_host('opnfv')
+ self.add_hostvar('opnfv', 'ansible_host', '192.168.122.2')
+ self.add_hostvar('opnfv', 'ip', '192.168.122.2')
+ self.add_to_group('deployment', 'opnfv')
+ self.add_to_group('opnfv', 'opnfv')
+
+ self.opnfv_networks = {}
+ self.opnfv_networks['opnfv'] = {}
+ self.opnfv_networks['opnfv']['mgmt'] = {}
+ self.opnfv_networks['opnfv']['mgmt']['address'] = '172.29.236.10/22'
+ self.opnfv_networks['opnfv']['public'] = {}
+ self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24'
+ self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1'
+ self.opnfv_networks['opnfv']['public']['dns'] = ['192.168.122.1']
+ self.opnfv_networks['opnfv']['private'] = {}
+ self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22'
+ self.opnfv_networks['opnfv']['storage'] = {}
+ self.opnfv_networks['opnfv']['storage']['address'] = '172.29.244.10/24'
+
+ # Add localhost
+ self.add_host('deployment_host')
+ self.add_hostvar('deployment_host', 'ansible_ssh_host', '127.0.0.1')
+ self.add_hostvar('deployment_host', 'ansible_connection', 'local')
+
+ self.read_pdf_idf()
+
+ self.parse_args()
+
+ if self.args.host:
+ self.dump(self.get_host_info(self.args.host))
+ else:
+ self.dump(self.inventory)
+
+ def parse_args(self):
+ parser = argparse.ArgumentParser(description='Produce an Ansible inventory based on PDF/IDF XCI files')
+ parser.add_argument('--list', action='store_true', default=True, help='List XCI hosts (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific host')
+ self.args = parser.parse_args()
+
+ def read_pdf_idf(self):
+ pdf_file = os.environ['PDF']
+ idf_file = os.environ['IDF']
+ opnfv_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_pdf.yml"
+ opnfv_idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_idf.yml"
+ nodes = []
+ host_networks = {}
+
+ with open(pdf_file) as f:
+ try:
+ pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(idf_file) as f:
+ try:
+ idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_file) as f:
+ try:
+ opnfv_pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_idf_file) as f:
+ try:
+ opnfv_idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+
+ valid_host = (host for host in idf['xci']['installers'][self.installer]['nodes_roles'] \
+ if host in idf['xci']['flavors'][self.flavor] \
+ and host != 'opnfv')
+
+ for host in valid_host:
+ nodes.append(host)
+ hostname = idf['xci']['installers'][self.installer]['hostnames'][host]
+ self.add_host(hostname)
+ for role in idf['xci']['installers'][self.installer]['nodes_roles'][host]:
+ self.add_to_group(role, hostname)
+
+ pdf_host_info = list(filter(lambda x: x['name'] == host, pdf['nodes']))[0]
+ native_vlan_if = list(filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces']))
+ self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address'])
+ self.add_hostvar(hostname, 'ip', native_vlan_if[0]['address'])
+ host_networks[hostname] = {}
+ # And now record the rest of the information
+ for network, ndata in idf['idf']['net_config'].items():
+ network_interface_num = idf['idf']['net_config'][network]['interface']
+ host_networks[hostname][network] = {}
+ host_networks[hostname][network]['address'] = pdf_host_info['interfaces'][int(network_interface_num)]['address'] + "/" + str(ndata['mask'])
+ if 'gateway' in ndata.keys():
+ host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask'])
+ if 'dns' in ndata.keys():
+ host_networks[hostname][network]['dns'] = []
+ for d in ndata['dns']:
+ host_networks[hostname][network]['dns'].append(str(d))
+
+ # Get also vlan and mac_address from pdf
+ host_networks[hostname][network]['mac_address'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['mac_address'])
+ host_networks[hostname][network]['vlan'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['vlan'])
+
+ # Get also vlan and mac_address from opnfv_pdf
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ opnfv_mgmt = opnfv_pdf['opnfv_vm_pdf']['interfaces'][mgmt_idf_index]
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ opnfv_public = opnfv_pdf['opnfv_vm_pdf']['interfaces'][admin_idf_index]
+ self.opnfv_networks['opnfv']['mgmt']['mac_address'] = str(opnfv_mgmt['mac_address'])
+ self.opnfv_networks['opnfv']['mgmt']['vlan'] = str(opnfv_mgmt['vlan'])
+ self.opnfv_networks['opnfv']['public']['mac_address'] = str(opnfv_public['mac_address'])
+ self.opnfv_networks['opnfv']['public']['vlan'] = str(opnfv_public['vlan'])
+
+ # Add the interfaces from idf
+
+
+ host_networks.update(self.opnfv_networks)
+
+ self.add_groupvar('all', 'host_info', host_networks)
+
+ if 'deployment_host_interfaces' in idf['xci']['installers'][self.installer]['network']:
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ self.add_hostvar('deployment_host', 'network_interface_admin', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][admin_idf_index])
+ self.add_hostvar('deployment_host', 'network_interface_mgmt', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][mgmt_idf_index])
+
+ # Now add the additional groups
+ for parent in idf['xci']['installers'][self.installer]['groups'].keys():
+ for host in idf['xci']['installers'][self.installer]['groups'][parent]:
+ self.add_group(host, parent)
+
+ # Read additional group variables
+ self.read_additional_group_vars()
+
+ def read_additional_group_vars(self):
+ if not os.path.exists(self.flavor_files + "/inventory/group_vars"):
+ return
+ group_dir = self.flavor_files + "/inventory/group_vars/*.yml"
+ group_file = glob.glob(group_dir)
+ for g in group_file:
+ with open(g) as f:
+ try:
+ group_vars = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+ for k,v in group_vars.items():
+ self.add_groupvar(os.path.basename(g.replace('.yml', '')), k, v)
+
+ def dump(self, data):
+ print (json.dumps(data, sort_keys=True, indent=2))
+
+ def add_host(self, host):
+ self.inventory['all']['hosts'].append(host)
+
+ def hosts(self):
+ return self.inventory['all']['hosts']
+
+ def add_group(self, group, parent = 'all'):
+ if parent not in self.inventory.keys():
+ self.inventory[parent] = {}
+ if 'children' not in self.inventory[parent]:
+ self.inventory[parent]['children'] = []
+ self.inventory[parent]['children'].append(group)
+
+ def add_to_group(self, group, host):
+ if group not in self.inventory.keys():
+ self.inventory[group] = []
+ self.inventory[group].append(host)
+
+ def add_hostvar(self, host, param, value):
+ if host not in self.hostvars():
+ self.inventory['_meta']['hostvars'][host] = {}
+ self.inventory['_meta']['hostvars'][host].update({param: value})
+
+ def add_groupvar(self, group, param, value):
+ if param not in self.groupvars(group):
+ self.inventory[group]['vars'][param] = {}
+ self.inventory[group]['vars'].update({param: value})
+
+ def hostvars(self):
+ return iter(self.inventory['_meta']['hostvars'].keys())
+
+ def groupvars(self, group):
+ return iter(self.inventory[group]['vars'].keys())
+
+ def get_host_info(self, host):
+ return self.inventory['_meta']['hostvars'][host]
+
+if __name__ == '__main__':
+ XCIInventory()
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/playbooks/get-opnfv-scenario-requirements.yml b/xci/playbooks/get-opnfv-scenario-requirements.yml
index af97ceb2..a9165709 100644
--- a/xci/playbooks/get-opnfv-scenario-requirements.yml
+++ b/xci/playbooks/get-opnfv-scenario-requirements.yml
@@ -31,114 +31,76 @@
loop_control:
label: "{{ item[0].scenario }}"
- - name: Create scenario directories
- file:
- path: "{{ role_path_default }}/{{ item.scenario }}"
- state: directory
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
-
- - name: Clone git repos (with git)
- git:
- repo: "{{ item.src }}"
- dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
- version: "{{ item.version | default('master') }}"
- refspec: "{{ item.refspec | default(omit) }}"
- update: true
- force: true
- when:
- - item.scm == "git" or item.scm is undefined
- with_items: "{{ scenarios }}"
- register: git_clone
- until: git_clone | success
- retries: "{{ git_clone_retries }}"
- delay: "{{ git_clone_retry_delay }}"
+ - name: Update scenarios with local overrides
+ set_fact:
+ scenarios: >
+ {%- for z in xci_scenarios_overrides -%}
+ {%- for x in scenarios if x.scenario == z.scenario -%}
+ {%- set _ = x.update(z) -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- scenarios -}}
+ with_items: "{{ xci_scenarios_overrides }}"
loop_control:
label: "{{ item.scenario }}"
+ when: xci_scenarios_overrides is defined
- - name: Check that scenarios exist
- stat:
- path: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}"
- register: scenarios_list_exists
+ - name: Collect list of known scenarions
+ set_fact:
+ known_scenarios: >
+ {%- set scenario_names = [] -%}
+ {%- for x in scenarios -%}
+ {%- set _ = scenario_names.append(x.scenario) -%}
+ {%- endfor -%}
+ {{- scenario_names -}}
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- - name: Plug in the scenario to XCI
- synchronize:
- src: "{{ scenario_path_default }}/{{ item.item.scenario }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- when: item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Synchronize local changes to scenarios' master branch
- synchronize:
- src: "{{ xci_path }}/xci/scenarios/{{ item.item.scenario }}/{{ item.item.role | replace('xci/scenarios/' ~ item.item.scenario ~ '/', '') }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- archive: no
- times: no
- recursive: yes
- checksum: yes
- owner: yes
- group: yes
- perms: yes
- links: yes
- failed_when: false
- when:
- - item.stat.exists
- - item.item.version == 'master'
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
+ - name: Fail if 'DEPLOY_SCENARIO' is not defined
+ fail:
+ msg: "DEPLOY_SCENARIO env variable is not defined so no scenario can be deployed"
+ when: deploy_scenario is not defined
- - name: Plug in the scenario to XCI (fallback)
- synchronize:
- src: "{{ xci_path }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- when: not item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
+ - name: Ensure {{ deploy_scenario }} is a known XCI scenario
+ fail:
+ msg: "{{ deploy_scenario }} does not exist"
+ when: deploy_scenario not in known_scenarios
- - name: Gather information about the selected {{ deploy_scenario }} scenario
+ - name: Collect scenario information
set_fact:
- deploy_scenario: "{{ item }}"
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
- when: deploy_scenario | lower == item.scenario
+ xci_scenario: >
+ {%- set xci_scenario = {} -%}
+ {%- for x in scenarios if x.scenario == deploy_scenario -%}
+ {%- for z in x.installers if z.installer == installer_type -%}
+ {%- set _ = xci_scenario.update({'flavors': z.flavors}) -%}
+ {%- set _ = xci_scenario.update({'distros': z.distros}) -%}
+ {%- endfor -%}
+ {%- set _ = xci_scenario.update({'role': x.role | basename}) -%}
+ {%- endfor -%}
+ {{ xci_scenario }}
+
+ - name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+ become: true
- - name: Determine if the selected {{ deploy_scenario }} scenario can be deployed
- block:
- - name: Set scenario installer fact
- set_fact:
- deploy_scenario_installer: "{{ item }}"
- with_items: "{{ deploy_scenario.installers }}"
- loop_control:
- label: "{{ item.installer }}"
- when: item.installer == installer_type
- - name: Set scenario flavor fact
- set_fact:
- deploy_scenario_flavor: "{{ (xci_flavor in deploy_scenario_installer.flavors) | bool }}"
- when:
- - deploy_scenario_installer is defined
- - deploy_scenario_installer
- - name: Set scenario distro flavor fact
- set_fact:
- deploy_scenario_distro: "{{ (xci_distro in deploy_scenario_installer.distros) | bool }}"
- when:
- - deploy_scenario_flavor is defined
- - deploy_scenario_flavor
- when: deploy_scenario is defined
+ - name: Record scenario information
+ ini_file:
+ create: yes
+ section: scenarios
+ state: present
+ option: role
+ value: "{{ xci_scenario.role | basename }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ become: true
- - name: Fail if {{ deploy_scenario.scenario }} is not supported
+ - name: Fail if {{ deploy_scenario }} is not supported
fail:
msg:
- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ERROR! The {{ deploy_scenario.scenario }} scenario can't be deployed. This is because
+ - ERROR! The {{ deploy_scenario }} scenario can't be deployed. This is because
- the {{ installer_type }} XCI installer or the {{ xci_flavor }} flavor or the {{ xci_distro }}
- distribution is not supported by this scenario. It may also be possible that
- this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
@@ -147,9 +109,32 @@
- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- ''
when:
- - deploy_scenario is not defined or
- deploy_scenario_distro is not defined or
- (deploy_scenario_distro is defined and not deploy_scenario_distro)
+ (xci_scenario['flavors'] is defined and xci_flavor not in xci_scenario['flavors']) or
+ (xci_scenario['distros'] is defined and xci_distro not in xci_scenario['distros'])
+
+ - name: Clone git repos
+ git:
+ repo: "{{ item.src }}"
+ dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
+ version: "{{ item.version | default('master') }}"
+ refspec: "{{ item.refspec | default(omit) }}"
+ update: true
+ force: true
+ with_items: "{{ scenarios }}"
+ register: git_clone
+ until: git_clone | success
+ retries: "{{ git_clone_retries }}"
+ delay: "{{ git_clone_retry_delay }}"
+ loop_control:
+ label: "{{ item.scenario }}"
+
+ - name: Plug in the scenario Ansible roles to XCI
+ synchronize:
+ src: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}/"
+ dest: "{{ role_path_default }}/{{ item.role | basename }}"
+ with_items: "{{ scenarios }}"
+ loop_control:
+ label: "{{ item.scenario }}"
vars:
ansible_python_interpreter: "/usr/bin/python"
diff --git a/xci/playbooks/manage-ssh-keys.yml b/xci/playbooks/manage-ssh-keys.yml
index ff797aad..999215d8 100644
--- a/xci/playbooks/manage-ssh-keys.yml
+++ b/xci/playbooks/manage-ssh-keys.yml
@@ -6,6 +6,15 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+- name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
- name: Configure SSH key for root user
user:
name: root
diff --git a/xci/playbooks/manage-ssl-certs.yml b/xci/playbooks/manage-ssl-certs.yml
deleted file mode 100644
index d0c5c518..00000000
--- a/xci/playbooks/manage-ssl-certs.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2018 SUSE Linux GmbH and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: Install required pip packages for SSL
- pip:
- name: pyOpenSSL
- state: present
- extra_args: "{{ extra_args | default(omit) }}"
-
-- name: Generate XCI private key
- openssl_privatekey:
- path: /etc/ssl/private/xci.key
- size: 2048
-
-- name: Generate XCI certificate request
- openssl_csr:
- privatekey_path: /etc/ssl/private/xci.key
- path: /etc/ssl/private/xci.csr
- common_name: "{{ xci_ssl_subject }}"
-
-- name: Generate XCI self signed certificate
- openssl_certificate:
- path: /etc/ssl/certs/xci.crt
- privatekey_path: /etc/ssl/private/xci.key
- csr_path: /etc/ssl/private/xci.csr
- provider: selfsigned
- selfsigned_not_after: 20800101000000Z
diff --git a/xci/playbooks/prepare-functest.yml b/xci/playbooks/prepare-tests.yml
index a4cb664b..1a1935aa 100644
--- a/xci/playbooks/prepare-functest.yml
+++ b/xci/playbooks/prepare-tests.yml
@@ -13,11 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-- name: Prepare the environment for functest
+- name: Prepare the environment for testing
hosts: opnfv
user: root
vars_files:
- ../var/opnfv.yml
- ../installer/osa/files/openstack_services.yml
roles:
- - role: "prepare-functest"
+ - role: "prepare-tests"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network.yml b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
index 92e9195e..a4f260c4 100644
--- a/xci/playbooks/roles/bootstrap-host/tasks/network.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
@@ -50,99 +50,8 @@
- name: Run handlers
meta: flush_handlers
-- block:
- - name: configure modules
- lineinfile:
- dest: /etc/modules
- state: present
- create: yes
- line: "8021q"
- - name: add modules
- modprobe:
- name: 8021q
- state: present
- - name: ensure interfaces.d folder is empty
- file:
- state: "{{ item }}"
- path: "/etc/network/interfaces.d"
- with_items:
- - absent
- - directory
- - name: ensure interfaces file is updated
- template:
- src: "{{ ansible_os_family | lower }}/{{ ansible_hostname }}.interface.j2"
- dest: "/etc/network/interfaces"
- - name: restart network service
- shell: "/sbin/ifconfig {{ ansible_local.xci.network.xci_interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
- async: 15
- poll: 0
- when: ansible_os_family | lower == "debian"
-
-- block:
- - name: Configure networking on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.interface.j2"
- dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ ansible_local.xci.network.xci_interface }}" }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", ip: "{{ host_info[inventory_hostname].MGMT_IP }}/22" }
- - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", ip: "{{ host_info[inventory_hostname].VXLAN_IP }}/22" }
- - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", ip: "{{ host_info[inventory_hostname].VLAN_IP }}/24" }
- - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}/22" }
-
- - name: Add postup/postdown scripts on SUSE
- copy:
- src: "network-config-suse"
- dest: "/etc/sysconfig/network/scripts/network-config-suse"
- mode: 0755
-
- - name: Configure routes on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.routes.j2"
- dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
- with_items:
- - { name: "br-vlan", gateway: "192.168.122.1", route: "default" }
- - name: restart network service
- service:
- name: network
- state: restarted
- async: 15
- poll: 0
- when: ansible_os_family | lower == "suse"
-
-- block:
- - name: Configure networking on CentOS for interfaces
- template:
- src: "{{ ansible_os_family | lower }}/interface.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
- - name: Configure networking on CentOS for bridges
- template:
- src: "{{ ansible_os_family | lower }}/bridge.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "br-vlan" , ip: "{{ host_info[inventory_hostname].VLAN_IP }}", prefix: 24 }
- - { name: "br-mgmt" , ip: "{{ host_info[inventory_hostname].MGMT_IP }}", prefix: 22 }
- - { name: "br-storage", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}", prefix: 22 }
- - { name: "br-vxlan" , ip: "{{ host_info[inventory_hostname].VXLAN_IP }}", prefix: 22 }
- - name: Add default route through br-vlan
- lineinfile:
- path: "/etc/sysconfig/network-scripts/ifcfg-br-vlan"
- line: "GATEWAY=192.168.122.1"
- - name: restart network service
- service:
- name: network
- state: restarted
- async: 15
- poll: 0
- when: ansible_os_family | lower == "redhat"
+- name: "Configure networking on {{ ansible_os_family }}"
+ include_tasks: "network_{{ ansible_os_family | lower }}.yml"
- name: Wait for host to come back to life
local_action:
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
new file mode 100644
index 00000000..176c7eb1
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
@@ -0,0 +1,98 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+
+- name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+
+- name: ensure interfaces.d folder is empty
+ file:
+ state: "{{ item }}"
+ path: "/etc/network/interfaces.d"
+ with_items:
+ - absent
+ - directory
+
+- name: Ensure /etc/interfaces can source additional files
+ copy:
+ content: |
+ auto lo
+ iface lo inet loopback
+ source /etc/network/interfaces.d/*.cfg
+ dest: "/etc/network/interfaces"
+
+- name: "Configure networking for {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: restart network service
+ shell: "/sbin/ip addr flush dev {{ item }}; /sbin/ifdown -a; /sbin/ifup -a"
+ async: 15
+ poll: 0
+ with_items:
+ - "{{ public_interface }}"
+ - "{{ mgmt_interface }}"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
new file mode 100644
index 00000000..288fdf65
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.ifcfg.j2"
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
+ - { name: "br-vlan" , network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-storage", network: "{{ host_info[inventory_hostname].storage }}" }
+ - { name: "br-vxlan" , network: "{{ host_info[inventory_hostname].private }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
new file mode 100644
index 00000000..a8f1bf59
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
@@ -0,0 +1,93 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ mgmt_interface }}.30", vlan_id: 30 }
+ - { name: "{{ mgmt_interface }}.20", vlan_id: 20 }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ mgmt_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-storage", bridge_ports: "{{ mgmt_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: Add postup/postdown scripts on SUSE
+ copy:
+ src: "network-config-suse"
+ dest: "/etc/sysconfig/network/scripts/network-config-suse"
+ mode: 0755
+
+- name: Configure static DNS on SUSE
+ lineinfile:
+ regexp: '^NETCONFIG_DNS_STATIC_SERVERS=.*'
+ line: "NETCONFIG_DNS_STATIC_SERVERS=\"{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}\""
+ path: "/etc/sysconfig/network/config"
+ state: present
+ when: host_info[inventory_hostname]['public']['dns'] is defined
+
+- name: Configure routes on SUSE
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.routes.j2"
+ dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
+ with_items:
+ - { name: "br-vlan", gateway: "{{ host_info[inventory_hostname]['public']['gateway'] }}", route: "default" }
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2
deleted file mode 100644
index 2da12f20..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2
+++ /dev/null
@@ -1,75 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- offload-sg off
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2
deleted file mode 120000
index a74df1c2..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-compute00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2
deleted file mode 100644
index c540f66e..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2
deleted file mode 100644
index 03f81dbb..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{ host_info[inventory_hostname].VXLAN_IP }}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/kubespray b/xci/playbooks/roles/bootstrap-host/templates/kubespray
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/kubespray
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
new file mode 100644
index 00000000..2f976002
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
@@ -0,0 +1,39 @@
+# {{ ansible_managed }}
+
+# Physical interface
+{% if item.bridge_ports is not defined %}
+auto {{ item.name }}
+iface {{ item.name }} inet manual
+{% if item.vlan_id is defined %}
+ vlan-raw-device {{ item.name|replace('.' ~ item.vlan_id, '') }}
+{% endif %}
+
+{% else %}
+auto {{ item.name }}
+iface {{ item.name }} inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports {{ item.bridge_ports }}
+{% if item.name == 'br-vlan' %}
+ # Create veth pair, don't bomb if already exists
+ pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+ # Set both ends UP
+ pre-up ip link set br-vlan-veth up
+ pre-up ip link set eth12 up
+ # Delete veth pair on DOWN
+ post-down ip link del br-vlan-veth || true
+ bridge_ports br-vlan-veth
+{% endif %}
+{% if item.network is defined %}
+ address {{ item.network.address | ipaddr('address') }}
+ netmask {{ item.network.address | ipaddr('netmask') }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+ gateway {{ item.network.gateway | ipaddr('address') }}
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+ dns-nameservers {{ item.network.dns | join(' ') }}
+{% endif %}
+
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
new file mode 100644
index 00000000..525686d9
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
@@ -0,0 +1,26 @@
+DEVICE={{ item.name }}
+NM_CONTROLLED=no
+ONBOOT=yes
+BOOTPROTO=none
+{% if item.vlan_id is defined %}
+VLAN=yes
+ETHERDEVICE={{ ansible_local.xci.network.xci_interface }}
+VLAN_ID={{ item.vlan_id }}
+{% endif %}
+{% if item.bridge is not defined %}
+BRIDGE={{ item.bridge }}
+{% else %}
+TYPE=Bridge
+DELAY=0
+STP=off
+{% endif %}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+GATEWAY="{{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}"
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+DNS="{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}"
+{% endif %}
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
index 27b01eb4..7c2929d6 100644
--- a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
@@ -1,8 +1,7 @@
STARTMODE='auto'
BOOTPROTO='static'
{% if item.vlan_id is defined %}
-ETHERDEVICE={{ ansible_default_ipv4.interface }}
-VLAN_ID={{ item.vlan_id }}
+ETHERDEVICE={{ item.name.split('.')[0] }}
{% endif %}
{% if item.bridge_ports is defined %}
BRIDGE='yes'
@@ -10,8 +9,8 @@ BRIDGE_FORWARDDELAY='0'
BRIDGE_STP=off
BRIDGE_PORTS={{ item.bridge_ports }}
{% endif %}
-{% if item.ip is defined %}
-IPADDR={{ item.ip }}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
{% endif %}
PRE_UP_SCRIPT="compat:suse:network-config-suse"
POST_DOWN_SCRIPT="compat:suse:network-config-suse"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
new file mode 100644
index 00000000..93941fad
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
@@ -0,0 +1 @@
+{{ item.route }} {{ item.gateway | ipaddr('address') }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osh b/xci/playbooks/roles/bootstrap-host/templates/osh
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osh
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2 b/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2
deleted file mode 100644
index 06b5f177..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-IPADDR={{ item.ip }}
-PREFIX={{ item.prefix }}
-ONBOOT=yes
-BOOTPROTO=none
-TYPE=Bridge
-DELAY=0
-STP=off
diff --git a/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2 b/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2
deleted file mode 100644
index a97ad0cf..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-ONBOOT=yes
-BOOTPROTO=none
-{% if item.vlan_id is defined %}
-VLAN=yes
-ETHERDEVICE={{ ansible_default_ipv4.interface }}
-VLAN_ID={{ item.vlan_id }}
-{% endif %}
-BRIDGE={{ item.bridge }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2
deleted file mode 100644
index 7c868447..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ item.route }} {{ item.gateway }}
diff --git a/xci/playbooks/roles/bootstrap-host/vars/main.yml b/xci/playbooks/roles/bootstrap-host/vars/main.yml
new file mode 100644
index 00000000..1730ad57
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/vars/main.yml
@@ -0,0 +1,70 @@
+---
+# admin network information
+admin_mac: "{{ host_info[inventory_hostname].admin.mac_address }}"
+admin_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == admin_mac -%}
+ {%- if admin_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ admin_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+admin_vlan: "{{ host_info[inventory_hostname].admin.vlan }}"
+
+# mgmt network information
+mgmt_mac: "{{ host_info[inventory_hostname].mgmt.mac_address }}"
+mgmt_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == mgmt_mac -%}
+ {%- if mgmt_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ mgmt_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+mgmt_vlan: "{{ host_info[inventory_hostname].mgmt.vlan }}"
+
+# storage network information
+storage_mac: "{{ host_info[inventory_hostname].storage.mac_address }}"
+storage_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == storage_mac -%}
+ {%- if storage_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ storage_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+storage_vlan: "{{ host_info[inventory_hostname].storage.vlan }}"
+
+# public vlan netwrk information
+public_mac: "{{ host_info[inventory_hostname].public.mac_address }}"
+public_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == public_mac -%}
+ {%- if public_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ public_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+public_vlan: "{{ host_info[inventory_hostname].public.vlan }}"
+
+# private vxlan network information
+private_mac: "{{ host_info[inventory_hostname].private.mac_address }}"
+private_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == private_mac -%}
+ {%- if private_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{x.device}}.{{ private_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+private_vlan: "{{ host_info[inventory_hostname].private.vlan }}"
diff --git a/xci/playbooks/roles/create-nodes/README.md b/xci/playbooks/roles/create-nodes/README.md
new file mode 100644
index 00000000..bf190296
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/README.md
@@ -0,0 +1,160 @@
+create-nodes
+================
+
+This role creates the all nodes required for the XCI deployment. In a baremetal
+deployment, it creates the OPNFV VM and provisions the physical servers. In a
+non-baremetal deployment, it creates the OPNFV VM and the rest of VMs used to
+deploy scenarios. It is based on the bifrost role:
+
+https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
+
+It creates the VMs or provisions the physical servers based on the pdf and idf
+document which describes the characteristics of the VMs or physical servers.
+For more information check the spec:
+
+https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
+
+
+Flow
+----
+
+The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
+playbook that starts executing the role:
+
+xci-setup-nodes.yaml
+
+Note that at this stage the pdf and the opnfv_pdf_vm.yml are loaded.
+
+Some distro specific tasks related to variables are done and then the
+prepare_libvirt playbook is run. This playbook, as the name says,
+gets everything ready to run libvirt.
+
+After that, the nodes_json_data dictionary is initialized. This will collect
+the data and finally dump it all into the baremetal_json_file which will be
+read by bifrost in the subsequent role.
+
+The opnfv vm and the rest of vms get created using the xml libvirt template,
+which gets filled with the pdf and opnfv_pdf_vm.yml variables. If there is a
+baremetal deployment, the nodes_json_data gets filled in the
+baremetalhoststojson.yml playbook which basically reads the pdf info.
+
+Finally nodes_json_data is dumped.
+
+Requirements
+------------
+
+The following packages are required and ensured to be present:
+- libvirt-bin
+- qemu-utils
+- qemu-kvm
+- sgabios
+
+
+Warning
+-------
+
+- It is assumed that the opnfv VM characteristics are not described in the pdf
+but in a similar document called opnfv_pdf_vm.yml. There is also an idf
+document opnfv_idf_vm.yml
+
+- All references to csv from bifrost-create-vm-nodes were removed
+
+Role Variables
+--------------
+
+baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the
+ required information for bifrost to configure the
+ VMs appropriately
+
+vm_disk_cache: Disk cache mode to use by VMs disk.
+ Defaults to shell variable 'VM_DISK_CACHE', or,
+ if that is not set, to 'writeback'.
+
+node_names: Space-separated names for nodes to be created.
+ It is taken from the hostnames variable in idf.
+ If not set, VM names will be autogenerated.
+ Note that independent on the number of names in this list,
+ at most 'test_vm_num_nodes' VMs will be created.
+
+vm_network: Name of the libvirt network to create the nodes on.
+ Defaults to shell variable 'VM_NET_BRIDGE', or,
+ if that is not set, to 'default'.
+
+node_storage_pool: Name of the libvirt storage pool to create disks
+ for VMs in.
+ Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or,
+ if that is not set, to 'default'.
+ If absent, this pool will be created.
+
+node_storage_pool_path: Path used by the libvirt storage pool
+ 'node_storage_pool' if it has to be created.
+ Defaults to "/var/lib/libvirt/images".
+
+node_logdir: Folder where to store VM logs.
+ Defaults to "/var/log/libvirt/baremetal_logs".
+
+vm_emulator: Path to emulator executable used to define VMs in libvirt.
+ Defaults to "/usr/bin/qemu-system-x86_64".
+ Generally users should not need to modify this setting,
+ as it is OS-specific and is overwritten by
+ os/distribution-specific defaults in this role when needed.
+
+vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM
+ related actions.
+ Defaults to shell variable 'LIBVIRT_CONNECT_URI', or,
+ if that is not set, to 'qemu:///system'.
+ Note that currently connecting to remote libvirt is
+ not tested and is unsupported.
+
+network_interface: Name of the bridge to create when creating
+ 'vm_network' libvirt network.
+ Defaults to "virbr0".
+ Name and default of this option are chosen to be the same
+ as in 'bifrost-ironic-install' role.
+
+opnfv_vm_network_ip: IP for the 'network_interface' bridge.
+ Defaults to '192.168.122.1'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+node_network_netmask: Subnet mask for 'network_interface' bridge.
+ Defaults to '255.255.255.0'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+Dependencies
+------------
+
+None at this time.
+
+Example Playbook
+----------------
+
+- hosts: localhost
+ connection: local
+ become: yes
+ gather_facts: yes
+ roles:
+ - role: create-vm-nodes
+
+License
+-------
+
+Copyright (c) 2018 SUSE Linux GmbH.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author Information
+------------------
+
+mbuil@suse.com
diff --git a/xci/playbooks/roles/create-nodes/defaults/main.yml b/xci/playbooks/roles/create-nodes/defaults/main.yml
new file mode 100644
index 00000000..889f9c10
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/defaults/main.yml
@@ -0,0 +1,31 @@
+---
+# defaults file for bifrost-create-vm-nodes
+baremetal_json_file: '/tmp/baremetal.json'
+
+# We collect these parameters from the pdf
+vm_nic: "virtio"
+vm_disk_cache: unsafe
+node_groups: {}
+node_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+
+network_bridge_admin: 'br-admin'
+network_bridge_mgmt: 'br-mgmt'
+
+vm_network_admin: "{{ lookup('env', 'VM_NET_BRIDGE') | default('admin', true) }}"
+vm_network_mgmt: "{{ lookup('env', 'VM_NET_BRIDGE_MGMT') | default('mgmt', true) }}"
+
+node_network_netmask: "255.255.255.0"
+
+node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
+node_storage_pool_path: "/var/lib/libvirt/images"
+node_logdir: "/var/log/libvirt/baremetal_logs"
+# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars
+vm_emulator: "/usr/bin/qemu-system-x86_64"
+# NOTE(pas-ha) not really tested with non-local qemu connections
+vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
+
+opnfv_image_path: "/var/lib/libvirt/images"
+
+vms_to_create: "{{ (baremetal | bool) | ternary([opnfv_vm_pdf], [opnfv_vm_pdf] + nodes) }}"
+baremetal_nodes: "{{ (baremetal | bool) | ternary(nodes, omit) }}"
+libvirt_networks: "{{ (baremetal | bool) | ternary([vm_network_admin,vm_network_mgmt],[vm_network_admin]) }}"
diff --git a/xci/playbooks/roles/create-nodes/files/virtualbmc.conf b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
new file mode 100644
index 00000000..f8351dc1
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
@@ -0,0 +1,3 @@
+[log]
+logfile: /var/log/vbmc.log
+debug: true
diff --git a/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
new file mode 100644
index 00000000..ef6ec345
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
@@ -0,0 +1,91 @@
+---
+# Copyright 2018, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This playbook builds the json file with information about the baremetal nodes
+# which is read by ironic to start the pxe booting
+
+
+- name: BAREMETAL - Create file for static ip
+ file:
+ path: /tmp/baremetalstaticips
+ state: touch
+ group: root
+ owner: root
+ mode: 0644
+
+- name: "Generating the json describing baremetal nodes"
+ block:
+
+ - set_fact:
+ node_name: "{{ idf.kubespray.hostnames[item.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ node_name: "{{ idf.osa.hostnames[item.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ node_name: "{{ idf.osh.hostnames[item.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ host_group: "{{ node_default_groups | union(node_groups[node_name]) }}"
+ when: node_groups[node_name] is defined
+
+ - name: BAREMETAL - Fetch the ip
+ set_fact:
+ admin_ip: "{{ item.interfaces[idf.net_config.admin.interface].address }}"
+
+ - name: BAREMETAL - Fetch the mac
+ set_fact:
+ admin_mac: "{{ item.interfaces[idf.net_config.admin.interface].mac_address }}"
+
+ - name: BAREMETAL - set the json entry for baremetal nodes
+ set_fact:
+ node_data:
+ name: "{{ node_name }}"
+ uuid: "{{ node_name | to_uuid }}"
+ host_groups: "{{ host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "{{ item.remote_management.address }}"
+ ipmi_port: "{{ virtual_ipmi_port| default('623') }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ admin_mac }}"
+ ansible_ssh_host: "{{ admin_ip }}"
+ ipv4_address: "{{ admin_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: BAREMETAL - Static ip config for dnsmasq
+ lineinfile:
+ path: /tmp/baremetalstaticips
+ state: present
+ line: '{{ admin_mac }},{{ admin_ip }}'
+
+ - name: BAREMETAL - add created node info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({node_name: node_data}) }}"
+
+ when: (num_nodes | int) > (nodes_json_data | length | int) + 1
diff --git a/xci/playbooks/roles/create-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
new file mode 100644
index 00000000..ac55bf32
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
@@ -0,0 +1,198 @@
+---
+- name: "Creating VM"
+ block:
+ - set_fact:
+ vm_name: "{{ idf.kubespray.hostnames[item.1.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ vm_name: "{{ idf.osa.hostnames[item.1.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ vm_name: "{{ idf.osh.hostnames[item.1.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
+ vm_host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ vm_host_group: "{{ node_default_groups | union(node_groups[vm_name]) }}"
+ when: node_groups[vm_name] is defined
+
+ - name: set prealloc arg for Debian
+ set_fact:
+ prealloc: "--prealloc-metadata"
+ when:
+ - ansible_os_family == 'Debian'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ - name: list info on pools
+ virt_pool:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+ - name: list existing vms
+ virt:
+ command: list_vms
+ register: existing_vms
+
+ - block:
+ - name: Check if volume exists
+ stat:
+ path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
+ register: _vm_volume_prepared
+
+ - name: Resize opnfv VM image to {{ item.1.disks[0].disk_capacity }}
+ command: "qemu-img resize {{ opnfv_image_path }}/opnfv.qcow2 {{ item.1.disks[0].disk_capacity }}"
+ when:
+ - vm_name == 'opnfv'
+ - _vm_volume_prepared.stat.exists
+
+ # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
+ # mbuil: Assuming there is only one disk [0]
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.1.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+ when:
+ - not _vm_volume_prepared.stat.exists
+ - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes
+
+ - name: set path to the volume created
+ set_fact:
+ vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2"
+
+ - name: pre-touch the vm volume
+ file:
+ state: touch
+ path: "{{ vm_volume_path }}"
+ when: vm_libvirt_uri == 'qemu:///system'
+
+ # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr
+ # fails to set +C on XFS. This could be more elegant, however the use
+ # case is for CI testing.
+ - name: set copy-on-write for volume on non-CentOS systems
+ command: chattr +C {{ vm_volume_path }}
+ ignore_errors: yes
+ when:
+ - ansible_distribution != 'CentOS'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ # Fetches the xml descriptor from the template
+ - name: create_vm
+ virt:
+ command: define
+ name: "{{ vm_name }}"
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+ rescue:
+ - name: "Execute `dmesg` to collect debugging output should VM creation fail."
+ command: dmesg
+ - name: >
+ "Execute `virsh capabilities` to collect debugging output
+ should VM creation fail."
+ command: virsh capabilities
+ - name: "Abort due to failed VM creation"
+ fail: >
+ msg="VM creation step failed, please review dmesg
+ output for additional details"
+ when: vm_name not in existing_vms.list_vms
+
+ # TODO(pas-ha) replace 'command: vbmc ...' tasks
+ # with a custom Ansible module using vbmc Python API
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list
+
+ - debug: var=vbmc_list
+
+ # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
+ - name: delete vm from virtualbmc if it is there
+ command: vbmc delete {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ when: vbmc_list.stdout.find(vm_name) != -1
+
+ - set_fact:
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (item.0 | int) }}"
+
+ - name: plug vm into vbmc
+ command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: start virtualbmc
+ command: vbmc start {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list2
+
+ - debug: var=vbmc_list2
+
+ - name: get XML of the vm
+ virt:
+ name: "{{ vm_name }}"
+ command: get_xml
+ register: vm_xml
+
+ - name: Fetch the index for admin network
+ set_fact:
+ admin_index: "{{ (vm_name == 'opnfv') | ternary(opnfv_vm_idf.net_config.admin.interface, idf.net_config.admin.interface) | int }}"
+
+ - name: Fetch the ip
+ set_fact:
+ vm_ip: "{{ item.1.interfaces[admin_index | int].address }}"
+
+ # Assumes there is only a single NIC per VM
+ - name: get MAC from vm XML
+ set_fact:
+ vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"<mac address='.*'/>\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}"
+
+ # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively
+ # see vbmc add --help
+ - name: set the json entry for vm
+ set_fact:
+ vm_data:
+ name: "{{ vm_name }}"
+ uuid: "{{ vm_name | to_uuid }}"
+ host_groups: "{{ vm_host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "192.168.122.1"
+ ipmi_port: "{{ virtual_ipmi_port }}"
+ ipmi_username: "{{ item.1.remote_management.user }}"
+ ipmi_password: "{{ item.1.remote_management.pass }}"
+ nics:
+ - mac: "{{ vm_mac }}"
+ ansible_ssh_host: "{{ vm_ip }}"
+ ipv4_address: "{{ vm_ip }}"
+ properties:
+ cpu_arch: "{{ item.1.node.arch }}"
+ ram: "{{ item.1.node.memory.rstrip('G') }}"
+ cpus: "{{ item.1.node.cpus }}"
+ disk_size: "{{ item.1.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: add created vm info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}"
+ when: vm_name != 'opnfv'
+
+ - name: Record OPNFV VM ip
+ set_fact:
+ opnfv_vm_ip: "{{ vm_ip }}"
+ when: vm_name == 'opnfv'
+
+ when: (num_nodes | int) > (item.0 | int)
diff --git a/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
new file mode 100644
index 00000000..a227bc4f
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
@@ -0,0 +1,32 @@
+---
+- name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+- name: Extract checksum
+ shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ register: _image_checksum
+- fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+- set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+- name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: "{{ xci_cache }}/deployment_image.qcow2"
+ force: no
+- name: Set correct mode for deployment_image.qcow2 file
+ file:
+ path: "{{ xci_cache }}/deployment_image.qcow2"
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+
+- name: Create copy of original deployment image
+ shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2"
+ become: yes
diff --git a/xci/playbooks/roles/create-nodes/tasks/main.yml b/xci/playbooks/roles/create-nodes/tasks/main.yml
new file mode 100644
index 00000000..607ac494
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+# baremetal_json_file could be the file coming from pdf/idf
+
+- name: "Load distribution defaults"
+ include_vars: "{{ ansible_os_family | lower }}.yml"
+
+# From the previous list
+- name: "Install required packages"
+ package:
+ name: "{{ required_packages }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
+
+- include_tasks: prepare_libvirt.yml
+ with_items: "{{ libvirt_networks }}"
+
+- include_tasks: download_opnfvimage.yml
+
+- name: create placeholder var for vm entries in JSON format
+ set_fact:
+ nodes_json_data: {}
+
+# First we create the opnfv_vm
+- include_tasks: create_vm.yml
+ with_indexed_items: "{{ vms_to_create }}"
+
+- include_tasks: baremetalhoststojson.yml
+ with_items: "{{ baremetal_nodes }}"
+
+- name: Start the opnfv vm
+ virt:
+ command: start
+ name: opnfv
+
+- name: remove previous baremetal data file
+ file:
+ state: absent
+ path: "{{ baremetal_json_file }}"
+
+# We got nodes_json_data from the create_vm playbook
+- name: write to baremetal json file
+ copy:
+ dest: "{{ baremetal_json_file }}"
+ content: "{{ nodes_json_data | to_nice_json }}"
+
+- name: >
+ "Set file permissions such that the baremetal data file
+ can be read by the user executing Ansible"
+ file:
+ path: "{{ baremetal_json_file }}"
+ owner: "{{ ansible_env.SUDO_USER }}"
+ when: >
+ ansible_env.SUDO_USER is defined and
+ baremetal_json_file != ""
diff --git a/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
new file mode 100644
index 00000000..06afaec3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
@@ -0,0 +1,139 @@
+---
+- name: "Restart libvirt service"
+ service: name="{{libvirt_service_name}}" state=restarted
+
+# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to
+# operate properly with dnsmasq. This should be done before starting dnsmasq.
+- name: "Enable IP forwarding in sysctl"
+ sysctl:
+ name: "net.ipv4.ip_forward"
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading
+# the ROM from /usr/share/misc.
+- name: "Look for sgabios in {{ sgabios_dir }}"
+ stat: path={{ sgabios_dir }}/sgabios.bin
+ register: test_sgabios_qemu
+
+- name: "Look for sgabios in /usr/share/misc"
+ stat: path=/usr/share/misc/sgabios.bin
+ register: test_sgabios_misc
+
+- name: "Place sgabios.bin"
+ command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
+ when: >
+ test_sgabios_qemu == false and
+ test_sgabios_misc == true
+
+# NOTE(TheJulia): In order to prevent conflicts, stop
+# dnsmasq to prevent conflicts with libvirt restarting.
+# TODO(TheJulia): We shouldn't need to do this, but the
+# libvirt dhcp instance conflicts withour specific config
+# and taking this path allows us to not refactor dhcp at
+# this moment. Our DHCP serving should be refactored
+# so we don't need to do this.
+- name: "Stop default dnsmasq service"
+ service:
+ name: dnsmasq
+ state: stopped
+ ignore_errors: true
+
+# NOTE(TheJulia): Seems if you test in a VM, this might
+# be helpful if your installed your host originally
+# with the default 192.168.122/0/24 network
+- name: destroy libvirt network
+ virt_net:
+ name: "{{ item }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+# Ubuntu creates a default network when installing libvirt.
+# This network uses the 192.168.122.0/24 range and thus
+# conflicts with our admin network
+- name: destroy libvirt network
+ virt_net:
+ name: "default"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is present
+ virt_net:
+ name: "{{ item }}"
+ state: present
+ xml: "{{ lookup('template', 'net-'+item+'.xml.j2') }}"
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: find facts on libvirt networks
+ virt_net:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: "Delete network interface if virtual network is not active"
+ command: ip link del {{ ansible_libvirt_networks[item].bridge }}
+ when:
+ - ansible_libvirt_networks[item].state != 'active'
+ - vm_libvirt_uri == 'qemu:///system'
+ ignore_errors: yes
+
+- name: set libvirt network to autostart
+ virt_net:
+ name: "{{ item }}"
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is running
+ virt_net:
+ name: "{{ item }}"
+ state: active
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: get libvirt network status
+ virt_net:
+ name: "{{ item }}"
+ command: status
+ uri: "{{ vm_libvirt_uri }}"
+ register: test_vm_net_status
+
+- name: fail if libvirt network is not active
+ assert:
+ that: test_vm_net_status.status == 'active'
+
+- name: define a libvirt pool if not set
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: present
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'pool_dir.xml.j2') }}"
+
+- name: ensure libvirt pool is running
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: active
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: create dir for bm logs
+ file:
+ state: directory
+ path: "{{ node_logdir }}"
+ recurse: yes
+ mode: "0755"
+
+- name: install virtualbmc
+ pip:
+ name: virtualbmc
+ version: 1.5 # >1.3 needs zmq dependency.
+ virtualenv: "{{ lookup('env', 'XCI_VENV') }}"
+
+- name: Create directory for the config of vbmc
+ file:
+ path: /etc/virtualbmc
+ state: directory
+
+- name: Place the config for virtualbmc
+ copy:
+ src: virtualbmc.conf
+ dest: /etc/virtualbmc/virtualbmc.conf
diff --git a/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
new file mode 100644
index 00000000..aedbbeb7
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
new file mode 100644
index 00000000..4a9964c3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
@@ -0,0 +1,11 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='route'>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..7e372ffe
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
new file mode 100644
index 00000000..e4645deb
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
@@ -0,0 +1,7 @@
+<pool type='dir'>
+ <name>{{ node_storage_pool }}</name>
+ <target>
+ <path>{{ node_storage_pool_path }}</path>
+ </target>
+</pool>
+
diff --git a/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
new file mode 100644
index 00000000..9fad42b8
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
@@ -0,0 +1,69 @@
+<domain type='{{ vm_domain_type }}'>
+ <name>{{ vm_name }}</name>
+ <memory unit='GiB'>{{ item.1.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.1.node.cpus }}</vcpu>
+ <os>
+ <type arch='{{ item.1.node.arch }}' machine='{{ item.1.node.model }}'>hvm</type>
+ {%- if 'opnfv' in vm_name -%}
+ <boot dev='hd'/>
+ {%- else -%}
+ <boot dev='network'/>
+ {% endif -%}
+ <bootmenu enable='no'/>
+ <bios useserial='yes' rebootTimeout='10000'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='{{ item.1.node.cpu_cflags }}'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>{{ vm_emulator }}</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='{{ vm_disk_cache }}'/>
+ <source file='{{ vm_volume_path }}'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <source network='{{ vm_network_admin }}'/>
+ <model type='{{ vm_nic }}'/>
+ {%- if vm_name == 'opnfv' -%}
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}'/>
+ {%- else -%}
+ <mac address='{{ item.1.interfaces[idf.net_config.admin.interface].mac_address }}'/>
+ {%- endif -%}
+ </interface>
+ {%- if baremetal | bool -%}
+ <interface type='network'>
+ <source network='{{ vm_network_mgmt }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}'/>
+ </interface>
+ {%- endif -%}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <serial type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/xci/playbooks/roles/create-nodes/vars/debian.yml b/xci/playbooks/roles/create-nodes/vars/debian.yml
new file mode 100644
index 00000000..bcfc47d5
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/debian.yml
@@ -0,0 +1,13 @@
+---
+sgabios_dir: /usr/share/qemu/
+libvirt_service_name: libvirt-bin
+required_packages:
+ - libvirt-bin
+ - qemu-utils
+ - qemu-kvm
+ - qemu-system-x86
+ - sgabios
+ - pkg-config
+ - libvirt-dev
+ - python-lxml
+ - python-libvirt
diff --git a/xci/playbooks/roles/create-nodes/vars/redhat.yml b/xci/playbooks/roles/create-nodes/vars/redhat.yml
new file mode 100644
index 00000000..2b285110
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/redhat.yml
@@ -0,0 +1,17 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-img
+ - qemu-kvm-tools
+ - qemu-kvm
+ - qemu-kvm-common
+ - qemu-system-x86
+ - sgabios-bin
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkgconfig
+ - libvirt-devel
+ - libvirt-python
+ - python-lxml
diff --git a/xci/playbooks/roles/create-nodes/vars/suse.yml b/xci/playbooks/roles/create-nodes/vars/suse.yml
new file mode 100644
index 00000000..7e4c41ef
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-tools
+ - qemu-kvm
+ - qemu-x86
+ - qemu-sgabios
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkg-config
+ - libvirt-devel
+ - python-lxml
+ - libvirt-python
diff --git a/xci/playbooks/roles/prepare-functest/defaults/main.yml b/xci/playbooks/roles/prepare-functest/defaults/main.yml
deleted file mode 100644
index a3638302..00000000
--- a/xci/playbooks/roles/prepare-functest/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Gateway parameters
-gateway_ip: "10.10.10.1"
-gateway_ip_mask: "10.10.10.1/24"
-broadcast_ip: "10.10.10.255"
-gateway_interface: "br-vlan"
-
-# Network parameters
-external_network: "ext-net"
-
-# Subnet parameters
-subnet_name: "ext-subnet"
-allocation_pool: "start=10.10.10.5,end=10.10.10.254"
-subnet_cidr: "10.10.10.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
deleted file mode 100644
index 7856cb0e..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-# Variables that we need to pass from XCI to functest
-XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME)
-
-# Create directory to store functest logs
-mkdir -p ~/results/
-
-# Extract variables from xci.env file
-if [[ -e /root/xci.env ]]; then
- for x in ${XCI_ENV[@]}; do
- grep "^${x}=" /root/xci.env >> /root/env
- done
- # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
- # set the functest container's DEPLOY_SCENARIO variable in the
- # following format <scenario>-<flavor>. But the XCI's mini flavor
- # is converted into noha.
- DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
- XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
- XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
- echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
-fi
-
-# Dump the env file
-echo "------------------------------------------------------"
-echo "------------- functest environment file --------------"
-cat /root/env
-echo "------------------------------------------------------"
-
-# we need to ensure the necessary environment variables are sourced
-source /root/env
-
-{% if 'os-' in deploy_scenario %}
-{# stuff needed for OpenStack based scenarios #}
-source /root/openrc
-
-openstack --insecure network create --external \
- --provider-physical-network flat \
- --provider-network-type flat {{ external_network }}
-
-openstack --insecure subnet create --network {{ external_network }} \
- --allocation-pool {{ allocation_pool }} \
- --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
- --no-dhcp {{ subnet_name }}
-
-# the needed images differ between the suites so avoid downloading unnecessary images
-if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
- mkdir ~/images && cd ~/images && wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img && cd ~
-elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
- mkdir -p images && wget -q -O- https://git.opnfv.org/functest/plain/functest/ci/download_images.sh | bash -s -- images && ls -1 images/*
-else
- echo "Unsupported test suite for functest"
- exit 1
-fi
-
-# docker image to use will be different for healthcheck and smoke test
-DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}"
-
-sudo docker run --env-file env \
- -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
- -v $(pwd)/images:/home/opnfv/functest/images \
- -v $(pwd)/results:/home/opnfv/functest/results \
- $DOCKER_IMAGE_NAME
-{% else %}
-{# stuff needed for Kubernetes based scenarios #}
-# Create k8s.creds file for functest
-KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
-KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
-cat << EOF > ~/k8s.creds
-KUBERNETES_PROVIDER=local
-KUBE_MASTER_URL=$KUBE_MASTER_URL
-KUBE_MASTER_IP=$KUBE_MASTER_IP
-EOF
-
-# docker image to use will be different for healthcheck and smoke test
-DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
-
-sudo docker run --env-file env \
- -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
- -v $(pwd)/.kube/config:/root/.kube/config \
- -v $(pwd)/results:/home/opnfv/functest/results \
- $DOCKER_IMAGE_NAME
-{% endif %}
-
diff --git a/xci/playbooks/roles/prepare-tests/defaults/main.yml b/xci/playbooks/roles/prepare-tests/defaults/main.yml
new file mode 100644
index 00000000..7002586c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# Gateway parameters
+gateway_ip: "192.168.122.1"
+gateway_ip_mask: "192.168.122.1/24"
+broadcast_ip: "192.168.122.255"
+gateway_interface: "br-vlan"
+
+# Network parameters
+external_network: "ext-net"
+
+# Subnet parameters
+subnet_name: "ext-subnet"
+allocation_pool: "start=192.168.122.100,end=192.168.122.254"
+subnet_cidr: "192.168.122.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/tasks/main.yml b/xci/playbooks/roles/prepare-tests/tasks/main.yml
index c29baca9..a543ac1f 100644
--- a/xci/playbooks/roles/prepare-functest/tasks/main.yml
+++ b/xci/playbooks/roles/prepare-tests/tasks/main.yml
@@ -8,46 +8,49 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- name: install functest required packages
+- name: install required packages
package:
- name: "{{ functest_required_packages[ansible_pkg_mgr] }}"
+ name: "{{ required_packages[ansible_pkg_mgr] }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
state: present
-# Docker is needed for functest
+# Docker is needed for test frameworks
- name: Ensure Docker service is started and enabled
service:
name: docker
state: started
enabled: yes
-- name: install functest required pip packages
+- name: install required pip packages
pip:
- name: "{{ functest_required_pip }}"
+ name: "{{ required_pip }}"
state: present
extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
-- name: create public network gateway for functest
- block:
- - name: check if the gateway was already set
- shell: "ip a | grep {{ gateway_ip }}"
- register: gateway_ip_result
- ignore_errors: True
- changed_when: False
-
- - name: add public network gateway
- command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
- changed_when: False
- when: gateway_ip_result|failed
- when: deploy_scenario is match("os-.*")
-
-- name: prepare environment file for functest
+# odl scenarios require to add odl variables to env
+- include_tasks: process_neutron_conf.yml
+ when: "'-odl-' in deploy_scenario"
+
+- name: prepare environment file for tests
template:
src: env.j2
dest: /root/env
mode: 0755
-- name: prepare the script to create networks and run functest
+- name: create the script to prepare for testing
+ template:
+ src: prepare-tests.sh.j2
+ dest: /root/prepare-tests.sh
+ mode: 0755
+
+- name: create the script to run functest
template:
src: run-functest.sh.j2
dest: /root/run-functest.sh
mode: 0755
+
+- name: create the script to run yardstick
+ template:
+ src: run-yardstick.sh.j2
+ dest: /root/run-yardstick.sh
+ mode: 0755
diff --git a/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
new file mode 100644
index 00000000..45608df3
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
@@ -0,0 +1,19 @@
+---
+- name: Collecting ODL variables
+ block:
+ - name: Fetch odl_password variable
+ shell: "cat /tmp/ml2_conf.ini | grep password | cut -d ' ' -f3"
+ register: odl_password
+
+ - name: Fetch odl_username variable
+ shell: "cat /tmp/ml2_conf.ini | grep username | cut -d ' ' -f3"
+ register: odl_username
+
+ - name: Fetch odl_port variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f3 | cut -d '/' -f1"
+ register: odl_port
+
+ - name: Fetch odl_ip variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f2 | cut -d '/' -f3"
+ register: odl_ip
+ when: "'-odl-' in deploy_scenario"
diff --git a/xci/playbooks/roles/prepare-functest/templates/env.j2 b/xci/playbooks/roles/prepare-tests/templates/env.j2
index d9a3bf32..d4f8f86c 100644
--- a/xci/playbooks/roles/prepare-functest/templates/env.j2
+++ b/xci/playbooks/roles/prepare-tests/templates/env.j2
@@ -5,3 +5,11 @@ ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
{% if 'os-' in deploy_scenario %}
EXTERNAL_NETWORK={{ external_network }}
{% endif %}
+{% if '-odl-' in deploy_scenario %}
+SDN_CONTROLLER_IP={{ odl_ip.stdout }}
+SDN_CONTROLLER_USER={{ odl_username.stdout }}
+SDN_CONTROLLER_PASSWORD={{ odl_password.stdout }}
+SDN_CONTROLLER_RESTCONFPORT={{ odl_port.stdout }}
+SDN_CONTROLLER_WEBPORT={{ odl_port.stdout }}
+{% endif %}
+
diff --git a/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
new file mode 100644
index 00000000..1b779cb9
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Variables that we need to pass from XCI to testing
+XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME FUNCTEST_VERSION)
+
+# Extract variables from xci.env file
+if [[ -e /root/xci.env ]]; then
+ for x in ${XCI_ENV[@]}; do
+ grep "^${x}=" /root/xci.env >> /root/env
+ done
+ # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
+ # set the functest container's DEPLOY_SCENARIO variable in the
+ # following format <scenario>-<flavor>. But the XCI's mini flavor
+ # is converted into noha.
+ DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
+ XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
+ XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
+ echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
+fi
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+source /root/openrc
+
+openstack --insecure network create --external \
+ --provider-physical-network flat \
+ --provider-network-type flat {{ external_network }}
+
+openstack --insecure subnet create --network {{ external_network }} \
+ --allocation-pool {{ allocation_pool }} \
+ --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
+ --no-dhcp {{ subnet_name }}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# Create k8s.creds file for testing
+KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
+KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
+cat << EOF > ~/k8s.creds
+KUBERNETES_PROVIDER=local
+KUBE_MASTER_URL=$KUBE_MASTER_URL
+KUBE_MASTER_IP=$KUBE_MASTER_IP
+EOF
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
new file mode 100644
index 00000000..b4cf46d7
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Create directory to store functest logs
+mkdir -p /root/functest-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- functest environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+# the needed images differ between the suites so avoid downloading unnecessary images
+echo "Downloading the images needed for functest-$FUNCTEST_SUITE_NAME"
+mkdir ~/images && cd ~/images
+if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img \
+ http://testresults.opnfv.org/functest/shaker-image.qcow2 \
+ https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+else
+ echo "Unsupported test suite for functest"
+ exit 1
+fi
+echo "------------------------------------------------------"
+ls -al . && cd ~
+echo "------------------------------------------------------"
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}:${FUNCTEST_VERSION}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ ${DOCKER_IMAGE_NAME}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/.kube/config:/root/.kube/config \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ $DOCKER_IMAGE_NAME
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
new file mode 100644
index 00000000..6a7fd8be
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Create directory to store yardstick logs
+mkdir -p /root/yardstick-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- yardstick environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+rc_file_vol="-v /root/openrc:/etc/yardstick/openstack.creds"
+{% else %}
+{# k8 scenario name is hardcoded for the timebeing until we clarify #}
+{# which suite name we should use for the scenarios without yardstick suites #}
+DEPLOY_SCENARIO="k8-nosdn-nofeature-noha"
+rc_file_vol="-v /root/admin.conf:/etc/yardstick/admin.conf"
+{% endif %}
+
+OS_CACERT="/etc/ssl/certs/haproxy.cert"
+DOCKER_IMAGE_NAME="opnfv/yardstick"
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_${DEPLOY_SCENARIO}_daily.yaml"
+
+# add OS_CACERT to openrc
+echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ~/openrc
+
+opts="--privileged=true --rm"
+envs="-e INSTALLER_TYPE=$INSTALLER_TYPE -e INSTALLER_IP=$INSTALLER_IP \
+ -e NODE_NAME=$NODE_NAME -e EXTERNAL_NETWORK=$EXTERNAL_NETWORK \
+ -e YARDSTICK_BRANCH=master -e BRANCH=master \
+ -e DEPLOY_SCENARIO=$DEPLOY_SCENARIO -e CI_DEBUG=true"
+cacert_file_vol="-v $OS_CACERT:/etc/yardstick/os_cacert"
+map_log_dir="-v /root/yardstick-results:/tmp/yardstick"
+sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} \
+ ${map_log_dir} ${sshkey} ${DOCKER_IMAGE_NAME} \
+ exec_tests.sh ${YARDSTICK_SCENARIO_SUITE_NAME}"
+echo "Running yardstick with the command"
+echo "------------------------------------------------------"
+echo $cmd
+echo "------------------------------------------------------"
+$cmd
diff --git a/xci/playbooks/roles/prepare-functest/vars/main.yml b/xci/playbooks/roles/prepare-tests/vars/main.yml
index 3a6c8a4d..83638466 100644
--- a/xci/playbooks/roles/prepare-functest/vars/main.yml
+++ b/xci/playbooks/roles/prepare-tests/vars/main.yml
@@ -1,14 +1,17 @@
---
-functest_required_packages:
+required_packages:
apt:
- docker.io
- wget
+ - xz-utils
zypper:
- docker
- wget
+ - xz
yum:
- docker
- wget
+ - xz
-functest_required_pip:
+required_pip:
- docker-py
diff --git a/xci/scenarios/README.rst b/xci/scenarios/README.rst
deleted file mode 100644
index 5d9bdf06..00000000
--- a/xci/scenarios/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-This folder keeps the roles for the generic scenarios.
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 20d3091d..00000000
--- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: calico
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 7646aefa..00000000
--- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: canal
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml b/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
deleted file mode 100644
index 3c3dc5d9..00000000
--- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: flannel
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 614d784e..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: cloud
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/os-nosdn-nofeature/README.rst b/xci/scenarios/os-nosdn-nofeature/README.rst
deleted file mode 100644
index dcdc83fc..00000000
--- a/xci/scenarios/os-nosdn-nofeature/README.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-This scenario is currently incomplete. In order for it to be
-complete, changes for CEPH must be moved here, combining OVS + CEPH.
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 1aaf84d8..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,255 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 86b87c15..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,170 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 99b768c4..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
deleted file mode 100644
index 2f678544..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# neutron specific config
-neutron_plugin_type: ml2.ovs
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_provider_networks:
- network_flat_networks: "*"
- network_types: "vxlan"
- network_vxlan_ranges: "1:1000" \ No newline at end of file
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
deleted file mode 100644
index 79aa3aa1..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- name: copy user_variables_os-nosdn-nofeature.yml
- copy:
- src: "user_variables_os-nosdn-nofeature.yml"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-nosdn-nofeature.yml"
-- name: copy os-nosdn-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{xci_flavor}}/openstack_user_config.yml"
- dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/xci/scenarios/os-odl-nofeature/.gitkeep b/xci/scenarios/os-odl-nofeature/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/os-odl-nofeature/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 2ca5a987..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,256 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 0f8ccd18..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,171 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 7ed9cd32..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
deleted file mode 100644
index 7e872787..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- name: copy user_variables_os-odl-nofeature.yml
- template:
- src: "user_variables_os-odl-nofeature.yml.j2"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature.yml"
-
-- name: copy user_variables_os-odl-nofeature-ha.yml
- copy:
- src: "{{xci_flavor}}/user_variables_os-odl-nofeature-ha.yml"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature-ha.yml"
- when:
- - xci_flavor == "ha"
-
-- name: copy os-odl-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{xci_flavor}}/openstack_user_config.yml"
- dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
deleted file mode 100644
index eb08adc0..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
+++ /dev/null
@@ -1,45 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-{% raw %}
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron"
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_plugin_base:
- - odl-router_v2
-{% endraw %}
-
-{% if odl_repo_version is defined %}
-odl_version: "{{ odl_repo_version }}"
-{% endif %}
diff --git a/xci/scenarios/os-odl-nofeature/vars/main.yml b/xci/scenarios/os-odl-nofeature/vars/main.yml
deleted file mode 100644
index 629b50c7..00000000
--- a/xci/scenarios/os-odl-nofeature/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
diff --git a/xci/scenarios/os-odl-nofeature/xci_overrides b/xci/scenarios/os-odl-nofeature/xci_overrides
deleted file mode 100644
index 2c65df0d..00000000
--- a/xci/scenarios/os-odl-nofeature/xci_overrides
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-if [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]] && [[ $XCI_FLAVOR == "ha" ]]; then
- export VM_MEMORY_SIZE=20480
-elif [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]]; then
- export VM_MEMORY_SIZE=16384
-fi
diff --git a/xci/scripts/update-osa-version-files.sh b/xci/scripts/update-osa-version-files.sh
index bb15fcba..bb0d82ab 100755
--- a/xci/scripts/update-osa-version-files.sh
+++ b/xci/scripts/update-osa-version-files.sh
@@ -76,30 +76,26 @@ cat $tempdir/openstack-ansible/ansible-role-requirements.yml >> $releng_xci_base
# Update the pinned OSA version
sed -i -e "/^export OPENSTACK_OSA_VERSION/s@:-\"[a-z0-9]*@:-\"${1}@" \
- -e "s/\(^# HEAD of osa.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s@\(^# HEAD of osa \).*@\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)@" $releng_xci_base/config/pinned-versions
# Update the pinned bifrost version
if [[ -n ${2:-} ]]; then
echo "Updating bifrost..."
sed -i -e "/^export OPENSTACK_BIFROST_VERSION/s@:-\"[a-z0-9]*@:-\"${2}@" \
- -e "s/\(^# HEAD of bifrost.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s/\(^# HEAD of bifrost \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
# Get ironic shas
for ironic in ironic_git_url ironic_client_git_url ironic_inspector_git_url ironic_inspector_client_git_url; do
- ironic_sha=$(git ls-remote ${!ironic} | grep master | awk '{print $1}')
+ ironic_sha=$(git ls-remote ${!ironic} | grep "${OPENSTACK_OSA_VERSION:-master}" | awk '{print $1}')
ironic=${ironic/_git*/}
echo "... updating ${ironic}"
sed -i -e "/^export BIFROST_${ironic^^}_VERSION/s@:-\"[a-z0-9]*@:-\"${ironic_sha}@" \
- -e "s/\(^# HEAD of ${ironic/_/-}.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s/\(^# HEAD of ${ironic/_/-} \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
done
fi
cp $tempdir/openstack-ansible/playbooks/defaults/repo_packages/openstack_services.yml ${releng_xci_base}/installer/osa/files/.
cp $tempdir/openstack-ansible/global-requirement-pins.txt ${releng_xci_base}/installer/osa/files/.
-# Switch sources from git.openstack.org to github.com
-sed -i "s@^src:.*git\.openstack\.org/\(.*\)@src: https://github\.com/\1@g" ${releng_xci_base}/installer/osa/files/ansible-role-requirements.yml
-sed -i "s@\(^.*git_repo:\).*git\.openstack\.org/\(.*\)@\1 https://github\.com/\2@g" ${releng_xci_base}/installer/osa/files/openstack_services.yml
-
popd &> /dev/null
printme ""
diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh
index f266d64f..965cfe4c 100755
--- a/xci/scripts/vm/start-new-vm.sh
+++ b/xci/scripts/vm/start-new-vm.sh
@@ -118,18 +118,15 @@ fi
COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-manager qemu-kvm)
case ${ID,,} in
- *suse)
- pkg_mgr_cmd="sudo zypper -q -n ref"
- pkg_mgr_cmd+=" && sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu"
+ *suse*)
+ pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu > /dev/null"
;;
centos)
- pkg_mgr_cmd="yum updateinfo"
- pkg_mgr_cmd+=" && sudo yum install -q -y epel-release"
- pkg_mgr_cmd+=" && sudo yum install -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm"
+ pkg_mgr_cmd="sudo yum install -C -q -y epel-release > /dev/null"
+ pkg_mgr_cmd+=" && sudo yum install -C -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm > /dev/null"
;;
ubuntu)
- pkg_mgr_cmd="sudo apt-get update"
- pkg_mgr_cmd+=" && sudo apt-get install -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io"
+ pkg_mgr_cmd="sudo apt-get install --no-upgrade -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io > /dev/null"
;;
esac
@@ -353,6 +350,9 @@ $vm_ssh ${VM_NAME} "sudo mv /home/devuser/releng-xci/vm_hosts.txt /etc/hosts"
# Disable 3-level nested virtualization since it makes things terribly slow
$vm_ssh ${VM_NAME} "sudo bash -c 'echo \"options kvm_intel nested=0\" > /etc/modprobe.d/qemu-system-x86.conf'"
$vm_ssh ${VM_NAME} "sudo modprobe -r kvm_intel && sudo modprobe -a kvm_intel"
+$vm_ssh ${VM_NAME} "sudo bash -c 'mkdir -p /root/.ssh && cat /home/devuser/.ssh/id_rsa.pub > /root/.ssh/authorized_keys'"
+$vm_ssh ${VM_NAME} "sudo bash -c 'mkdir -p /var/lib/libvirt/images'"
+rsync -a -e "$vm_ssh" --include "${BASE_PATH}/${XCI_DEPLOYMENT_IMAGE}*" --exclude '*' root@${VM_NAME}:/var/lib/libvirt/images/
set +e
@@ -367,7 +367,7 @@ if [[ $? != 0 ]]; then
#!/bin/bash
set -o pipefail
export XCI_FLAVOR=mini
-export BIFROST_USE_PREBUILT_IMAGES=true
+export BIFROST_CREATE_IMAGE_VIA_DIB=false
cd ~/releng-xci/xci
./xci-deploy.sh | ts
EOF
diff --git a/xci/var/ericsson-pod2-idf.yml b/xci/var/ericsson-pod2-idf.yml
new file mode 100644
index 00000000..2839b120
--- /dev/null
+++ b/xci/var/ericsson-pod2-idf.yml
@@ -0,0 +1,187 @@
+##############################################################################
+# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ERICSSON POD 2 installer descriptor file ###
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'osa', 'osh']
+ net_config: &net_config
+ admin:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'PXE/Admin' on wiki, different IP
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0 # Tagged, not the same with 'storage' on wiki
+ mask: 22
+ private:
+ interface: 4
+ network: 172.29.242.0 # Tagged, not the same with 'private' on wiki
+ mask: 22
+ public:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'public' on wiki
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+
+xci:
+ pod_name: pod1
+ net_config: *net_config
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
+
+ # net_config network to be used by the PXE
+ pxe_network: public
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: virbr0
+ ip: 192.168.122.1
+
+ extra_addresses:
+ opnfv: 192.168.122.2
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/ericsson-pod2-pdf.yml b/xci/var/ericsson-pod2-pdf.yml
new file mode 100644
index 00000000..4c7271ec
--- /dev/null
+++ b/xci/var/ericsson-pod2-pdf.yml
@@ -0,0 +1,269 @@
+---
+### POD descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Jose Lausuch
+ contact: jose.lausuch@ericsson.com
+ lab: Ericsson
+ location: Rosersberg, Sweden
+ type: production
+ link: https://wiki.opnfv.org/display/pharos/CI-ERICSSON-POD2
+##############################################################################
+jumphost:
+ name: CI-POD2-HOST
+ node: &nodeparams
+ type: baremetal
+ vendor: HP
+ model: ProLiant BL460c Gen9
+ arch: x86_64
+ cpus: 2
+ cpu_cflags: haswell
+ cores: 12
+ memory: 128G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 1200G
+ disk_type: hdd
+ disk_interface: scsi
+ disk_rotation: 15000
+ os: ubuntu-16.04
+ remote_params: &remoteparas
+ type: ipmi
+ versions:
+ - 1.0
+ - 2.0
+ user: opnfv
+ pass: Winter2017
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.11
+ mac_address: "58:20:B1:01:8A:F2"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ address: 172.16.2.11
+ mac_address: "ec:b1:d7:a1:a1:10"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.236.10
+ mac_address: "5c:b9:01:8b:9f:e8"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 192.168.122.2
+ mac_address: "5c:b9:01:8b:9f:e9"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.240.10
+ mac_address: "5c:b9:01:8b:9f:ec"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.242.10
+ mac_address: "5c:b9:01:8b:9f:ed"
+ vlan: 3010
+##############################################################################
+nodes:
+ - name: node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.12
+ mac_address: "58:20:B1:01:8B:F0"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:a0"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:94"
+ address: "172.29.236.11"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:95"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:80"
+ address: "172.29.240.11"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:81"
+ address: "172.29.242.11"
+ vlan: 3010
+ ############################################################################
+ - name: node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.13
+ mac_address: "58:20:B1:01:8E:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:80"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:30"
+ address: "172.29.236.12"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:31"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:64"
+ address: "172.29.240.12"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:65"
+ address: "172.29.242.12"
+ vlan: 3010
+ ############################################################################
+ - name: node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.14
+ mac_address: "58:20:B1:01:8D:32"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:43:c0"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4c"
+ address: "172.29.236.13"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4d"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6c"
+ address: "172.29.240.13"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6d"
+ address: "172.29.242.13"
+ vlan: 3010
+ ############################################################################
+ - name: node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.15
+ mac_address: "58:20:B1:01:8B:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:8b:d0"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fc"
+ address: "172.29.236.14"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fd"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:08"
+ address: "172.29.240.14"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:09"
+ address: "172.29.242.14"
+ vlan: 3010
+ ############################################################################
+ - name: node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.16
+ mac_address: "58:20:B1:01:8F:EA"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:bd:60"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e8"
+ address: "172.29.236.15"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e9"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:14"
+ address: "172.29.240.15"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:15"
+ address: "172.29.242.15"
+ vlan: 3010
+
diff --git a/xci/var/idf.yml b/xci/var/idf.yml
index 8d9352b6..8ed55f6f 100644
--- a/xci/var/idf.yml
+++ b/xci/var/idf.yml
@@ -11,11 +11,12 @@
idf:
version: 0.1
- osa:
- kolla:
- k8s:
net_config: &net_config
admin:
+ interface: 2
+ network: 192.168.122.0
+ mask: 22
+ mgmt:
interface: 0
network: 172.29.236.0
mask: 22
@@ -28,23 +29,123 @@ idf:
network: 192.168.122.0
mask: 24
gateway: 192.168.122.1
- dns: 8.8.8.8
+ dns:
+ - 192.168.122.1
private:
interface: 3
network: 172.29.244.0
mask: 22
-
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnfv: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ osh: &idf_osh
+ nodes_roles:
+ opnfv: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
xci:
pod_name: vpod1
net_config: *net_config
- nodes_roles:
- opnfv_host: [opnfv_host]
- node1: [compute, storage]
- node2: [compute, storage]
- node3: [controller]
- node4: [controller]
- node5: [controller]
-
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
# net_config network to be used by the PXE
pxe_network: public
@@ -55,15 +156,9 @@ xci:
ip: 192.168.122.1
extra_addresses:
- opnfv_host: 192.168.122.2
+ opnfv: 192.168.122.2
- # network mapping
- network_mapping:
- # Management network used by installer components to communicate
- net-mgmt: admin
- # Storage Network
- net-storage: storage
- # Internal network for communication between VNF
- net-internal: private
- # Public network for VNF remote acces (ext-net in Openstack)
- net-vnf: public
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-idf.yml b/xci/var/lf-pod4-idf.yml
new file mode 100644
index 00000000..55ca6b63
--- /dev/null
+++ b/xci/var/lf-pod4-idf.yml
@@ -0,0 +1,222 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 installer descriptor file ###
+
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'fuel', 'osa', 'osh']
+ net_config: &net_config
+ oob:
+ interface: 0
+ ip-range: 172.30.8.83-172.30.8.88
+ vlan: 410
+ mask: 24
+ admin:
+ interface: 0
+ vlan: native
+ network: 192.168.122.0
+ gateway: 192.168.122.1
+ dns: 8.8.8.8
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0
+ mask: 24
+ private:
+ interface: 2
+ network: 172.29.242.0
+ mask: 24
+ public:
+ interface: 4
+ network: 192.168.122.0
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ pod4-node1: [controller]
+ pod4-node2: [compute, storage]
+ pod4-node3: [compute, storage]
+ pod4-node4: [controller]
+ pod4-node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: controller00
+ pod4-node2: compute00
+ pod4-node3: compute01
+ pod4-node4: controller01
+ pod4-node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ fuel:
+ jumphost:
+ bridges:
+ admin: 'pxebr'
+ mgmt: 'br-ctl'
+ private: ~
+ public: ~
+ network:
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3'
+ - 'eno4'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:04:00.0'
+ - '0000:02:00.0'
+ - '0000:02:00.1'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+xci:
+ pod_name: lf-pod4
+ net_config: *net_config
+ nodes_roles:
+ opnfv_host: [opnfv_host]
+ pod4-node1: [compute, storage]
+ pod4-node2: [compute, storage]
+ pod4-node3: [controller, storage]
+ pod4-node4: [controller, storage]
+ pod4-node5: [controller, storage]
+
+ # net_config network to be used by the PXE
+ pxe_network: admin
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: br_admin
+ ip:
+
+ extra_addresses:
+ opnfv_host: 192.168.12.2
+
+ flavors:
+ mini:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ noha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ ha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ - pod4-node4
+ - pod4-node5
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-pdf.yml b/xci/var/lf-pod4-pdf.yml
new file mode 100644
index 00000000..9607e4db
--- /dev/null
+++ b/xci/var/lf-pod4-pdf.yml
@@ -0,0 +1,198 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Trevor Bramwell
+ contact: tbramwell@linuxfoundation.org
+ lab: Linux Foundation
+ location: Portland, Oregon, USA
+ type: development
+ link: https://wiki.opnfv.org/display/pharos/LF+POD+4
+jumphost:
+ name: pod4-jump
+ node: &nodeparams
+ type: baremetal
+ vendor: Intel Corporation
+ model: S2600WT2R
+ arch: x86_64
+ cpus: 88
+ cpu_cflags: haswell
+ cores: 22
+ memory: 62G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 480G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ os: centos-7
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: admin
+ pass: octopus
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.83
+ mac_address: "a4:bf:01:01:b0:bb"
+ interfaces:
+ - name: nic1
+ speed: 1gb
+ features: 'dpdk|sriov'
+ vlan: native
+ mac_address: "a4:bf:01:01:b0:b9"
+ address: 192.168.12.1
+ - name: nic2
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 450
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.0.2
+ - name: nic3
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 452
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.2.2
+ - name: nic4
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 451
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 192.168.1.2
+ - name: nic5
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 414
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 172.30.12.83
+##############################################################################
+nodes:
+ - name: pod4-node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.84
+ mac_address: "a4:bf:01:01:ab:b6"
+ interfaces:
+ - mac_address: "a4:bf:01:01:ab:b4"
+ address: 192.168.122.3
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 172.29.236.11
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 192.168.122.3
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.240.11
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.242.11
+ vlan: 414
+ ############################################################################
+ - name: pod4-node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.85
+ mac_address: "a4:bf:01:01:b6:97"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b6:95"
+ address: 192.168.122.4
+ vlan: native
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 172.29.236.12
+ vlan: 450
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 192.168.122.4
+ vlan: 452
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.240.12
+ vlan: 451
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.242.12
+ vlan: 414
+ ############################################################################
+ - name: pod4-node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.86
+ mac_address: "a4:bf:01:01:66:fe"
+ interfaces:
+ - mac_address: "a4:bf:01:01:66:fc"
+ address: 192.168.122.5
+ vlan: native
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 172.29.236.13
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 192.168.122.5
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.240.13
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.242.13
+ vlan: 414
+ ############################################################################
+ - name: pod4-node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.87
+ mac_address: "a4:bf:01:01:b2:f5"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b2:f3"
+ address: 192.168.122.6
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 172.29.236.14
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 192.168.122.6
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.240.14
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.242.14
+ vlan: 414
+ ############################################################################
+ - name: pod4-node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.88
+ mac_address: "a4:bf:01:01:b5:11"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b5:0f"
+ address: 192.168.122.7
+ vlan: native
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 172.29.236.15
+ vlan: 450
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 192.168.122.7
+ vlan: 452
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.240.15
+ vlan: 451
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.242.14
+ vlan: 414
diff --git a/xci/var/opnfv.yml b/xci/var/opnfv.yml
index e7e3b76c..91b9ee38 100644
--- a/xci/var/opnfv.yml
+++ b/xci/var/opnfv.yml
@@ -28,8 +28,18 @@ openstack_osa_haproxy_git_url: "{{ lookup('env','OPENSTACK_OSA_HAPROXY_GIT_URL')
# kubespray variables
kubespray_git_url: "{{ lookup('env','KUBESPRAY_GIT_URL') }}"
kubespray_version: "{{ lookup('env','KUBESPRAY_VERSION') }}"
+kubernetes_version: "{{ lookup('env','KUBERNETES_VERSION') }}"
xci_kube_ansible_pip_version: "{{ lookup('env','XCI_KUBE_ANSIBLE_PIP_VERSION') }}"
+# openstack-helm variables
+osh_git_url: "{{ lookup('env','OSH_GIT_URL') }}"
+osh_version: "{{ lookup('env','OSH_VERSION') }}"
+osh_infra_git_url: "{{ lookup('env','OSH_INFRA_GIT_URL') }}"
+osh_infra_version: "{{ lookup('env','OSH_INFRA_VERSION') }}"
+osh_helm_binary_url: "{{ lookup('env','OSH_HELM_BINARY_URL') }}"
+osh_helm_binary_version: "{{ lookup('env','OSH_HELM_BINARY_VERSION') }}"
+openstack_osh_version: "{{ lookup('env','OPENSTACK_OSH_VERSION') }}"
+
# variables for other components
keepalived_git_url: "{{ lookup('env','KEEPALIVED_GIT_URL') }}"
haproxy_version: "{{ lookup('env','HAPROXY_VERSION') }}"
@@ -49,3 +59,7 @@ run_tempest: "{{ lookup('env', 'RUN_TEMPEST') }}"
core_openstack_install: "{{ lookup('env', 'CORE_OPENSTACK_INSTALL') }}"
deploy_scenario: "{{ lookup('env','DEPLOY_SCENARIO') }}"
installer_type: "{{ lookup('env','INSTALLER_TYPE') }}"
+osh_distro: "{{ lookup('env', 'OSH_DISTRO') }}"
+
+# baremetal variables
+baremetal: "{{ lookup('env','BAREMETAL') }}"
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml b/xci/var/opnfv_vm_idf.yml
index 5b2939f1..fa647287 100644
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml
+++ b/xci/var/opnfv_vm_idf.yml
@@ -1,14 +1,19 @@
+---
##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
+# Copyright (c) 2017 Ericsson AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+opnfv_vm_idf:
+ version: 0.1
+ net_config: &net_config
+ admin:
+ interface: 0
+ network: 192.168.122.0
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
diff --git a/xci/var/opnfv_vm_pdf.yml b/xci/var/opnfv_vm_pdf.yml
new file mode 100644
index 00000000..51371388
--- /dev/null
+++ b/xci/var/opnfv_vm_pdf.yml
@@ -0,0 +1,53 @@
+---
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+opnfv_vm_pdf:
+ name: opnfv
+ node: &nodeparams
+ type: virtual
+ vendor: libvirt
+ model: pc
+ arch: x86_64
+ cpus: 6
+ cpu_cflags: host-model
+ cores: 6
+ memory: 12G
+ disks: &disks
+ - name: disk1
+ disk_capacity: 80G
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation:
+ remote_params: &remote_params
+ type:
+ - ipmi: [2.0]
+ user: admin
+ pass: password
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:625
+ mac_address: "52:54:00:fe:3b:01"
+ interface_common_nic1: &interface_common_nic1
+ name: nic1
+ speed:
+ features:
+ vlan: native
+ interface_common_nic2: &interface_common_nic2
+ name: nic2
+ speed:
+ features:
+ vlan: native
+ interfaces:
+ - mac_address: "52:54:00:33:82:d0"
+ address: 192.168.122.2
+ gateway: 192.168.122.1
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:33:82:d1"
+ address: 172.29.236.10
+ gateway: 172.29.236.1
+ <<: *interface_common_nic2
diff --git a/xci/xci-deploy.sh b/xci/xci-deploy.sh
index a8b7adbd..d9c41968 100755
--- a/xci/xci-deploy.sh
+++ b/xci/xci-deploy.sh
@@ -3,37 +3,6 @@ set -o errexit
set -o nounset
set -o pipefail
-submit_bug_report() {
- cd ${XCI_PATH}
- echo ""
- echo "-------------------------------------------------------------------------"
- echo "Oh nooooo! The XCI deployment failed miserably :-("
- echo ""
- echo "If you need help, please choose one of the following options"
- echo "* #opnfv-pharos @ freenode network"
- echo "* opnfv-tech-discuss mailing list (https://lists.opnfv.org/mailman/listinfo/opnfv-tech-discuss)"
- echo " - Please prefix the subject with [XCI]"
- echo "* https://jira.opnfv.org (Release Engineering project)"
- echo ""
- echo "Do not forget to submit the following information on your bug report:"
- echo ""
- git diff --quiet && echo "releng-xci tree status: clean" || echo "releng-xci tree status: local modifications"
- echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
- echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
- echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
- echo "xci flavor: $XCI_FLAVOR"
- echo "xci installer: $INSTALLER_TYPE"
- echo "xci scenario: $DEPLOY_SCENARIO"
- echo "Environment variables:"
- env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\)'
- echo "-------------------------------------------------------------------------"
-}
-
-exit_trap() {
- submit_bug_report
- collect_xci_logs
-}
-
#-------------------------------------------------------------------------------
# This script should not be run as root
#-------------------------------------------------------------------------------
@@ -59,20 +28,8 @@ fi
#-------------------------------------------------------------------------------
# find where are we
export XCI_PATH="$(git rev-parse --show-toplevel)"
-# Declare our virtualenv
-export XCI_VENV=${XCI_PATH}/venv/
-# source user vars
-source $XCI_PATH/xci/config/user-vars
-# source pinned versions
-source $XCI_PATH/xci/config/pinned-versions
-# source flavor configuration
-source "$XCI_PATH/xci/config/${XCI_FLAVOR}-vars"
-# source installer configuration
-source "$XCI_PATH/xci/installer/${INSTALLER_TYPE}/env" &>/dev/null || true
-# source xci configuration
-source $XCI_PATH/xci/config/env-vars
# source helpers library
-source ${XCI_PATH}/xci/files/install-lib.sh
+source ${XCI_PATH}/xci/files/xci-lib.sh
# Make sure we pass XCI_PATH everywhere
export XCI_ANSIBLE_PARAMS+=" -e xci_path=${XCI_PATH}"
@@ -92,6 +49,18 @@ for local_user_var in ${user_local_dev_vars[@]}; do
done
unset user_local_dev_vars local_user_var
+#
+# Parse command line options
+#
+parse_cmdline_opts $*
+
+#
+# Bootstrap environment for XCI Deployment
+#
+echo "Info: Preparing host environment for the XCI deployment"
+echo "-------------------------------------------------------------------------"
+bootstrap_xci_env
+
# register our handler
trap exit_trap ERR
@@ -99,22 +68,6 @@ trap exit_trap ERR
sudo sed -i "s/^Defaults.*env_reset/#&/" /etc/sudoers
#-------------------------------------------------------------------------------
-# Log info to console
-#-------------------------------------------------------------------------------
-echo "Info: Starting XCI Deployment"
-echo "Info: Deployment parameters"
-echo "-------------------------------------------------------------------------"
-echo "OPNFV scenario: $DEPLOY_SCENARIO"
-echo "xci flavor: $XCI_FLAVOR"
-echo "xci installer: $INSTALLER_TYPE"
-echo "infra deployment: $INFRA_DEPLOYMENT"
-echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
-[[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
-[[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
-echo "-------------------------------------------------------------------------"
-
-#-------------------------------------------------------------------------------
# Clean up environment
#-------------------------------------------------------------------------------
echo "Info: Cleaning up previous XCI artifacts"
@@ -150,10 +103,15 @@ echo "-------------------------------------------------------------------------"
ansible_lint
echo "-------------------------------------------------------------------------"
-#-------------------------------------------------------------------------------
# Get scenario variables overrides
#-------------------------------------------------------------------------------
-source $(find $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null || :
+source $(find $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null &&
+ echo "Sourced ${DEPLOY_SCENARIO} overrides files successfully!" || :
+
+#-------------------------------------------------------------------------------
+# Log info to console
+#-------------------------------------------------------------------------------
+log_xci_information
# Deploy infrastructure based on the selected deloyment method
echo "Info: Deploying hardware using '${INFRA_DEPLOYMENT}'"