summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--INFO.yaml22
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/requirements.txt3
-rw-r--r--docs/xci-overview.rst2
-rw-r--r--docs/xci-user-guide.rst21
-rw-r--r--tox.ini25
-rw-r--r--xci/README.rst31
-rwxr-xr-xxci/config/env-vars23
-rwxr-xr-xxci/config/ha-vars1
-rwxr-xr-xxci/config/mini-vars1
-rwxr-xr-xxci/config/noha-vars1
-rwxr-xr-xxci/config/pinned-versions38
-rwxr-xr-xxci/config/user-vars12
-rw-r--r--xci/files/requirements.yml2
-rwxr-xr-xxci/files/xci-destroy-env.sh20
-rw-r--r--xci/files/xci-lib.sh55
-rw-r--r--xci/infra/bifrost/infra-provision.sh25
-rw-r--r--xci/infra/bifrost/playbooks/opnfv-virtual.yml19
-rw-r--r--xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml34
-rw-r--r--xci/infra/bifrost/playbooks/wait-for-baremetal.yml17
-rw-r--r--xci/infra/bifrost/playbooks/xci-prepare-env.yml (renamed from xci/infra/bifrost/playbooks/xci-prepare-virtual.yml)33
-rw-r--r--xci/infra/bifrost/playbooks/xci-setup-nodes.yml (renamed from xci/infra/bifrost/playbooks/xci-create-virtual.yml)20
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-env.sh10
-rwxr-xr-xxci/installer/kubespray/deploy.sh66
-rw-r--r--xci/installer/kubespray/playbooks/configure-installer.yml50
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml4
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml52
-rw-r--r--xci/installer/kubespray/playbooks/configure-targethosts.yml4
-rw-r--r--xci/installer/kubespray/playbooks/post-deployment.yml42
-rwxr-xr-xxci/installer/osa/deploy.sh17
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml131
-rw-r--r--xci/installer/osa/files/global-requirement-pins.txt13
-rw-r--r--xci/installer/osa/files/ha/openstack_user_config.yml60
-rw-r--r--xci/installer/osa/files/ha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/mini/user_variables.yml7
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/openstack_services.yml152
-rw-r--r--xci/installer/osa/files/setup-openstack.yml2
-rw-r--r--xci/installer/osa/files/user_variables_xci.yml4
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml33
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml2
-rw-r--r--xci/installer/osa/playbooks/post-deployment.yml66
-rw-r--r--xci/installer/osh/README50
-rwxr-xr-xxci/installer/osh/deploy.sh170
-rw-r--r--xci/installer/osh/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/osh/playbooks/configure-installer.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/osh/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/osh/playbooks/group_vars/all.yml55
-rw-r--r--xci/installer/osh/playbooks/install-openstack-helm.yml24
-rw-r--r--xci/installer/osh/playbooks/post-deployment.yml42
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml109
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml18
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml12
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service11
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml31
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml33
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j24
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml7
-rw-r--r--xci/opnfv-scenario-requirements.yml58
-rw-r--r--xci/playbooks/configure-localhost.yml21
-rwxr-xr-xxci/playbooks/dynamic_inventory.py75
-rw-r--r--xci/playbooks/manage-ssl-certs.yml32
-rw-r--r--xci/playbooks/prepare-tests.yml (renamed from xci/playbooks/prepare-functest.yml)4
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml44
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml2
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml44
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j22
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j22
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j23
l---------xci/playbooks/roles/bootstrap-host/templates/osh1
-rw-r--r--xci/playbooks/roles/bootstrap-host/vars/main.yml70
-rw-r--r--xci/playbooks/roles/create-nodes/README.md (renamed from xci/playbooks/roles/create-vm-nodes/README.md)37
-rw-r--r--xci/playbooks/roles/create-nodes/defaults/main.yml (renamed from xci/playbooks/roles/create-vm-nodes/defaults/main.yml)20
-rw-r--r--xci/playbooks/roles/create-nodes/files/virtualbmc.conf3
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml91
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/create_vm.yml (renamed from xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml)72
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml (renamed from xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml)0
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/main.yml (renamed from xci/playbooks/roles/create-vm-nodes/tasks/main.yml)13
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml (renamed from xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml)38
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-admin.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j211
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2 (renamed from xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2)0
-rw-r--r--xci/playbooks/roles/create-nodes/templates/vm.xml.j2 (renamed from xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2)41
-rw-r--r--xci/playbooks/roles/create-nodes/vars/debian.yml (renamed from xci/playbooks/roles/create-vm-nodes/vars/debian.yml)0
-rw-r--r--xci/playbooks/roles/create-nodes/vars/redhat.yml (renamed from xci/playbooks/roles/create-vm-nodes/vars/redhat.yml)0
-rw-r--r--xci/playbooks/roles/create-nodes/vars/suse.yml (renamed from xci/playbooks/roles/create-vm-nodes/vars/suse.yml)0
-rw-r--r--xci/playbooks/roles/create-vm-nodes/templates/net.xml.j218
-rw-r--r--xci/playbooks/roles/prepare-functest/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j284
-rw-r--r--xci/playbooks/roles/prepare-tests/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/main.yml (renamed from xci/playbooks/roles/prepare-functest/tasks/main.yml)45
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml19
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/env.j2 (renamed from xci/playbooks/roles/prepare-functest/templates/env.j2)8
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j246
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j252
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j247
-rw-r--r--xci/playbooks/roles/prepare-tests/vars/main.yml (renamed from xci/playbooks/roles/prepare-functest/vars/main.yml)7
-rwxr-xr-xxci/scripts/update-osa-version-files.sh8
-rwxr-xr-xxci/scripts/vm/start-new-vm.sh13
-rw-r--r--xci/var/ericsson-pod2-idf.yml187
-rw-r--r--xci/var/ericsson-pod2-pdf.yml269
-rw-r--r--xci/var/idf.yml122
-rw-r--r--xci/var/lf-pod4-idf.yml222
-rw-r--r--xci/var/lf-pod4-pdf.yml198
-rw-r--r--xci/var/opnfv.yml14
-rw-r--r--xci/var/opnfv_vm_idf.yml19
-rw-r--r--xci/var/opnfv_vm_pdf.yml (renamed from xci/var/opnfv_vm.yml)34
-rwxr-xr-xxci/xci-deploy.sh5
115 files changed, 3696 insertions, 682 deletions
diff --git a/.gitignore b/.gitignore
index af9d0080..925736c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
*,~
.*.sw?
-/docs_build/
+docs_build/*
/docs_output/
/releng/
.idea
@@ -33,7 +33,7 @@ coverage.xml
nosetests.xml
testapi_venv/
.cache
-.tox
+.tox/
*.retry
job_output/
# Clear VM files
@@ -42,7 +42,7 @@ job_output/
build.log
*.d/
_static/
-conf.py
*.html
html/
xci/logs/
+docs/_build/*
diff --git a/INFO.yaml b/INFO.yaml
index e3fae0a4..43c73870 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -30,28 +30,18 @@ meetings:
time: '14:00 UTC'
committers:
- <<: *opnfv_releng_ptl
- - name: 'Yolanda Robla Mota'
- company: 'Red Hat'
- email: 'yroblamo@redhat.com'
- id: 'yrobla'
- timezone: 'Europe/Barcelona'
- name: 'Markos Chandras'
company: 'SUSE'
email: 'mchandras@suse.de'
id: 'mchandras'
timezone: 'Europe/London'
- - name: 'Tianwei Wu'
- company: 'Huawei'
- email: 'wutianwei1@huawei.com'
- id: 'hw_wutianwei'
- timezone: 'Asia/Shanghai'
- name: 'Manuel Buil'
company: 'SUSE'
email: 'mbuil@suse.com'
- id: 'mbuild'
+ id: 'mbuil'
timezone: 'Europe/Madrid'
- - name: 'Periyasamy Palanisamy'
- company: 'Ericsson'
- email: 'periyasamy.palanisamy@ericsson.com'
- id: 'epalper'
- timezone: 'Europe/Aachen'
+ - name: 'Panagiotis Karalis'
+ company: 'Intracom Telecom'
+ email: 'panos.pkaralis@gmail.com'
+ id: 'pkaralis'
+ timezone: 'Europe/Athens'
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..86ab8c57
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # flake8: noqa
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..305b679e
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: releng-xci
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..f26b0414
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,3 @@
+lfdocs-conf
+sphinxcontrib-httpdomain
+sphinx-opnfv-theme
diff --git a/docs/xci-overview.rst b/docs/xci-overview.rst
index 575eb37c..9b225ec1 100644
--- a/docs/xci-overview.rst
+++ b/docs/xci-overview.rst
@@ -138,7 +138,7 @@ Multi-distro Support
--------------------
Giving choice and not imposing things on developers and users are two
-of the important aspects of XCI. This means that if they want to have all in one
+of the important aspects of XCI. This means that if they want to have smaller
deployments, they should be able to do that by using
:ref:`different flavors <sandbox-flavors>` provided by XCI.
diff --git a/docs/xci-user-guide.rst b/docs/xci-user-guide.rst
index 8f506fc4..5e76ca16 100644
--- a/docs/xci-user-guide.rst
+++ b/docs/xci-user-guide.rst
@@ -97,11 +97,6 @@ Available flavors are listed on the table below.
+------------------+------------------------+---------------------+--------------------------+--------------------------+
| Flavor | Number of VM Nodes | VM Specs Per Node | Time Estimates Openstack | Time Estimates Kubernetes|
+==================+========================+=====================+==========================+==========================+
-| All in One (aio) | | 1 VM Node | | vCPUs: 8 | | Provisioning: 10 mins | | Provisioning: 10 mins |
-| | | controller & compute | | RAM: 12GB | | Deployment: 90 mins | | Deployment: 30 mins |
-| | | on single/same node | | Disk: 80GB | | Total: 100 mins | | Total: 40 mins |
-| | | 1 compute node | | NICs: 1 | | | | |
-+------------------+------------------------+---------------------+--------------------------+--------------------------+
| Mini | | 3 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins | | Provisioning: 12 mins |
| | | 1 deployment node | | RAM: 12GB | | Deployment: 65 mins | | Deployment: 35 mins |
| | | 1 controller node | | Disk: 80GB | | Total: 77 mins | | Total: 47 mins |
@@ -150,14 +145,6 @@ ongoing.
The differences between the flavors are documented below.
-**All in One**
-
-As shown on the table in the previous section, this flavor consists of a single
-node. All the OpenStack services, including compute run on the same node.
-
-The flavor All in One (aio) is deployed based on the process described in the
-upstream documentation. Please check `OpenStack Ansible Developer Quick Start <https://docs.openstack.org/openstack-ansible/pike/contributor/quickstart-aio.html>`_ for details.
-
**Mini/No HA/HA**
These flavors consist of multiple nodes.
@@ -184,12 +171,6 @@ are supported currently
The differences between the flavors are documented below.
-**All in One**
-
-As shown on the table in the previous section, this flavor consists of a single
-node. All the kubernetes services run on the same node, which acts as master
-and worker at the same time.
-
**Mini/No HA/HA**
These flavors consist of multiple nodes.
@@ -257,7 +238,7 @@ How to Use
| ``./xci-deploy.sh``
Issuing above command will start the sandbox deployment using the default
-flavor ``aio`` and the verified versions of upstream components.
+flavor ``mini`` and the verified versions of upstream components.
(`pinned-versions <https://git.opnfv.org/releng-xci/tree/xci/config/pinned-versions>`_).
The sandbox should be ready between 1,5 and 2 hours depending on the host
machine.
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..6aa16066
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,25 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = docs,docs-linkcheck
+skipsdist = True
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+
+[testenv:docs]
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/linkcheck
diff --git a/xci/README.rst b/xci/README.rst
index d7555d46..a18d92ee 100644
--- a/xci/README.rst
+++ b/xci/README.rst
@@ -160,6 +160,37 @@ execute sandbox script
./xci-deploy.sh
+Baremetal Usage
+--------------
+
+The previous deployments are based on VMs, i.e. controllers and computes are
+VMs. It is also possible to deploy on baremetal and for that a pdf and idf file
+which describes the hardware needs to be provided to the sandbox script:
+
+clone OPNFV releng-xci repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+change into directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+set the sandbox flavor
+
+ export XCI_FLAVOR=noha
+
+set the version to use for openstack-ansible
+
+ export OPENSTACK_OSA_VERSION=master
+
+set where the logs should be stored
+
+ export LOG_PATH=/home/jenkins/xcilogs
+
+execute sandbox script
+
+ ./xci-deploy.sh -i var/ericsson-pod2-idf.yml -p var/ericsson-pod2-pdf.yml
+
==============
User Variables
==============
diff --git a/xci/config/env-vars b/xci/config/env-vars
index fe75cb80..a90e8533 100755
--- a/xci/config/env-vars
+++ b/xci/config/env-vars
@@ -8,10 +8,16 @@ export OPNFV_RELENG_GIT_URL=${OPNFV_RELENG_GIT_URL:-https://gerrit.opnfv.org/ger
export OPENSTACK_BIFROST_GIT_URL=${OPENSTACK_BIFROST_GIT_URL:-https://git.openstack.org/openstack/bifrost}
export OPENSTACK_OSA_GIT_URL=${OPENSTACK_OSA_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible}
export OPENSTACK_OSA_OPENRC_GIT_URL=${OPENSTACK_OSA_OPENRC_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-openstack_openrc}
-export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-incubator/kubespray.git}
+export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-sigs/kubespray.git}
+export OSH_GIT_URL=${OSH_GIT_URL:-https://github.com/openstack/openstack-helm.git}
+export OSH_INFRA_GIT_URL=${OSH_INFRA_GIT_URL:-https://github.com/openstack/openstack-helm-infra.git}
export OPENSTACK_OSA_HAPROXY_GIT_URL=${OPENSTACK_OSA_HAPROXY_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-haproxy_server}
export KEEPALIVED_GIT_URL=${KEEPALIVED_GIT_URL:-https://github.com/evrardjp/ansible-keepalived}
+export OSH_HELM_BINARY_URL=${OSH_HELM_BINARY_URL:-https://storage.googleapis.com/kubernetes-helm}
+export OSH_HELM_BINARY_VERSION=${OSH_HELM_BINARY_VERSION:-v2.13.1}
+
+
# Configuration
export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
export OPNFV_HOST_IP=192.168.122.2
@@ -28,6 +34,8 @@ export XCI_PLAYBOOKS=${XCI_PATH}/xci/playbooks
# Functest parameters
export FUNCTEST_MODE=${FUNCTEST_MODE:-"tier"}
export FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME:-"healthcheck"}
+# TODO: Investigate and fix why the env var FUNCTEST_VERSION set by Jenkins job doesn't take effect
+export FUNCTEST_VERSION=${FUNCTEST_VERSION:-"hunter"}
# CI paremeters
export CI_LOOP=${CI_LOOP:-"daily"}
@@ -45,12 +53,15 @@ export LOG_PATH=${LOG_PATH:-${XCI_PATH}/xci/logs}
# This currently matches to OSA Ansible version but it doesn't really
# matter since bifrost and OSA will use the Ansible version they need.
# Overall, it's better to use what OSA supports so we can use new features.
-export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")}
+# OSA currently has 2.5.5 which breaks due to missing
+# https://github.com/ansible/ansible/commit/67859c3476501d5d9839fd904aec55468d09593a
+# This was fixed in 2.5.6 so remove the pin when OSA updates to newer version.
+#export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")}
+export XCI_ANSIBLE_PIP_VERSION="2.7.8"
+
export ANSIBLE_HOST_KEY_CHECKING=False
-# subject of the certificate
-export XCI_SSL_SUBJECT=${XCI_SSL_SUBJECT:-"/C=US/ST=California/L=San Francisco/O=IT/CN=xci.releng.opnfv.org"}
export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature"}
-# Kubespray requires that ansible version is 2.4.4
-export XCI_KUBE_ANSIBLE_PIP_VERSION=2.4.4
+# attempt to sync Ansible version used by Kubespray with the rest
+export XCI_KUBE_ANSIBLE_PIP_VERSION=$XCI_ANSIBLE_PIP_VERSION
# OpenStack global requirements version
export OPENSTACK_REQUIREMENTS_VERSION=${OPENSTACK_REQUIREMENTS_VERSION:-$(awk '/requirements_git_install_branch:/ {print $2}' ${XCI_PATH}/xci/installer/osa/files/openstack_services.yml)}
diff --git a/xci/config/ha-vars b/xci/config/ha-vars
index 3440a855..4c40fb33 100755
--- a/xci/config/ha-vars
+++ b/xci/config/ha-vars
@@ -12,6 +12,7 @@
export NUM_NODES=6
[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/mini-vars b/xci/config/mini-vars
index 9e7e6180..aaa4cb88 100755
--- a/xci/config/mini-vars
+++ b/xci/config/mini-vars
@@ -12,6 +12,7 @@
export NUM_NODES=3
[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00"
[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/noha-vars b/xci/config/noha-vars
index 2f3db993..e887ddb8 100755
--- a/xci/config/noha-vars
+++ b/xci/config/noha-vars
@@ -12,6 +12,7 @@
export NUM_NODES=4
[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00 compute01"
[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 5ef0c7c2..440972ae 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -25,25 +25,31 @@
#-------------------------------------------------------------------------------
# use releng-xci from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# use functest-healthcheck image that is known to work and contains the original list of testcases
-export OPNFV_FUNCTEST_HEALTHCHECK_DOCKER_IMAGE_DIGEST="sha256:faa1ec5778ac1580cc46f0e4f5abec24026868b95fc6fc3ae6023275dc980c2d"
-# HEAD of bifrost "master" as of 13.02.2018
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"81e48e7b488c15516503b2b08f087f4a7ae9a673"}
-# HEAD of ironic "master" as of 13.02.2018
-export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"9b8440aa318e4883a74ef8640ad5409dd22858a9"}
-# HEAD of ironic-client "master" as of 13.02.2018
-export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"1da269b0e99601f8f6395b2ce3f436f5600e8140"}
-# HEAD of ironic-inspector "master" as of 13.02.2018
-export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"84da941fafb905c2debdd9a9ba68ba743af3ce8a"}
-# HEAD of ironic-inspector-client "master" as of 13.02.2018
-export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"b73403fdad3165cfcccbf4b0330d426ae5925e01"}
-# HEAD of osa "stable/queens" as of 06.06.2018
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"5dbc4c5b67e67c370b04099c7dce56dd2f559288"}
+# HEAD of bifrost "master" as of 02.07.2019
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"cd559480c95867d272b8a32240e50c390646665b"}
+# HEAD of ironic "master" as of 02.07.2019
+export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"1beb8068f95f90a570c72b82f6e518110312b696"}
+# HEAD of ironic-client "master" as of 02.07.2019
+export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"eae60397bfcbed322b2121f77c35ac74d0c6b74c"}
+# HEAD of ironic-inspector "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"0b38536d1c9ab92952e6ecd069ea13facf012830"}
+# HEAD of ironic-inspector-client "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"81ae133bd570ea7359b4797ee5699d2d4233b445"}
+# HEAD of osa "stable/rocky" as of 04.01.2019
+export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"2087cd98f28b35f655ca398d25d2a6c71e38328e"}
+export OPENSTACK_OSH_VERSION="rocky"
+# HEAD of osh "master" as of 17.07.2019
+export OSH_VERSION=${OSH_VERSION:-"dadf9946e076df2b09556f4a18107dc487788cdd"}
+# HEAD of osh-infra "master" as of 16.07.2019
+export OSH_INFRA_VERSION=${OSH_INFRA_VERSION:-"e96bdd9fb6235573acf5d4d1d019dca1e1446b7d"}
export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \
${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
-# HEAD of kubspray "master" as of 16.05.2018
-export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"38e727dbe1bdf5316fae8d645718cc8279fbda20"}
+# Kubespray release v2.11.0 dated 31.08.2019
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"v2.11.0"}
+# Kubernetes version supported by the pinned kubespray version
+# this is needed for pulling in kubectl
+export KUBERNETES_VERSION=${KUBERNETES_VERSION:-"v1.15.3"}
diff --git a/xci/config/user-vars b/xci/config/user-vars
index 1f30ec38..d3d7b2f1 100755
--- a/xci/config/user-vars
+++ b/xci/config/user-vars
@@ -21,7 +21,7 @@
# export XCI_FLAVOR="ha"
#-------------------------------------------------------------------------------
export XCI_FLAVOR=${XCI_FLAVOR:-mini}
-export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
+export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; ID=${ID%%-*}; echo ${ID,,})}
export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
#-------------------------------------------------------------------------------
@@ -34,6 +34,14 @@ export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
# export INSTALLER_TYPE="kubespray"
export INSTALLER_TYPE=${INSTALLER_TYPE:-osa}
+#Wait upstream in openstack-helm (OSH) to support opensuse
+if [ "$XCI_DISTRO" == "opensuse" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export XCI_DISTRO=ubuntu-bionic
+ export OSH_DISTRO=opensuse
+elif [ "$XCI_DISTRO" == "ubuntu" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export OSH_DISTRO=ubuntu
+fi
+
#-------------------------------------------------------------------------------
# Set DEPLOYMENT
#-------------------------------------------------------------------------------
@@ -53,6 +61,6 @@ export INFRA_DEPLOYMENT=${INFRA_DEPLOYMENT:-bifrost}
export XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-""}
export RUN_TEMPEST=${RUN_TEMPEST:-false}
export CORE_OPENSTACK_INSTALL=${CORE_OPENSTACK_INSTALL:-false}
-export BIFROST_USE_PREBUILT_IMAGES=${BIFROST_USE_PREBUILT_IMAGES:-false}
+export BIFROST_CREATE_IMAGE_VIA_DIB=${BIFROST_CREATE_IMAGE_VIA_DIB:-true}
# Set this to to true to force XCI to re-create the target OS images
export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
diff --git a/xci/files/requirements.yml b/xci/files/requirements.yml
index a1b7feb3..1e097b09 100644
--- a/xci/files/requirements.yml
+++ b/xci/files/requirements.yml
@@ -7,4 +7,4 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- src: peru.proxy_settings
+- src: ruzickap.proxy_settings
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index c95ea838..058d6569 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -21,20 +21,20 @@ rm -rf /opt/stack
# HOME is normally set by sudo -H
rm -rf ${HOME}/.config/openstack
rm -rf ${HOME}/.ansible
+# keepalived role fails ansible lint when cached
+rm -rf ${HOME}/releng-xci/xci/playbooks/roles/keepalived
# Wipe repos
rm -rf ${XCI_CACHE}/repos
-# bifrost installs everything on venv so we need to look there if virtualbmc is not installed on the host.
-if which vbmc &>/dev/null || { [[ -e ${XCI_VENV}/bifrost/bin/activate ]] && source ${XCI_VENV}/bifrost/bin/activate; }; then
+if which ${XCI_VENV}/bin/vbmc &>/dev/null; then
# Delete all libvirt VMs and hosts from vbmc (look for a port number)
- for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
+ for vm in $(${XCI_VENV}/bin/vbmc list | awk '/[0-9]/{{ print $2 }}'); do
if which virsh &>/dev/null; then
- virsh destroy $vm &>/dev/null || true
- virsh undefine $vm &>/dev/null || true
+ virsh destroy $vm || true
+ virsh undefine $vm || true
fi
- vbmc delete $vm
+ ${XCI_VENV}/bin/vbmc delete $vm
done
- which vbmc &>/dev/null || { [[ -e /opt/stack/bifrost/bin/activate ]] && deactivate; }
fi
# Destroy all XCI VMs on all flavors
@@ -42,8 +42,8 @@ for varfile in ${flavors[@]}; do
source ${XCI_PATH}/xci/config/${varfile}-vars
for vm in ${NODE_NAMES}; do
if which virsh &>/dev/null; then
- virsh destroy $vm &>/dev/null || true
- virsh undefine $vm &>/dev/null || true
+ virsh destroy $vm &> /dev/null || true
+ virsh undefine $vm &> /dev/null || true
fi
done
done
@@ -84,5 +84,7 @@ service ironic-conductor start || true
service ironic-inspector restart || true
rm -rf ${XCI_VENV}
+# We also need to clear up previous vbmc config dirs
+rm -rf ${HOME}/.vbmc
# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/files/xci-lib.sh b/xci/files/xci-lib.sh
index 060dc267..860153b9 100644
--- a/xci/files/xci-lib.sh
+++ b/xci/files/xci-lib.sh
@@ -10,6 +10,35 @@
# Avoid double sourcing the file
[[ -n ${XCI_LIB_SOURCED:-} ]] && return 0 || export XCI_LIB_SOURCED=1
+function usage() {
+ echo "
+Usage: $(basename ${0}) [-i <idf>] [-p <pdf>]
+
+ -h: This message
+ -i: Installer Descriptor File (IDF). (Default ${XCI_PATH}/xci/var/idf.yml)
+ -p: Pod Descriptor File (PDF). (Default ${XCI_PATH}/xci/var/pdf.yml)
+ "
+ exit 0
+}
+
+function parse_cmdline_opts() {
+ IDF=${XCI_PATH}/xci/var/idf.yml
+ PDF=${XCI_PATH}/xci/var/pdf.yml
+
+ while getopts ":hi:p:" o; do
+ case "${o}" in
+ i) IDF="${OPTARG}" ;;
+ p) PDF="${OPTARG}" ;;
+ h) usage ;;
+ *) echo "ERROR: Invalid option '-${OPTARG}'"; usage ;;
+ esac
+ done
+
+ # Do all the exports
+ export PDF=$(realpath ${PDF})
+ export IDF=$(realpath ${IDF})
+}
+
function bootstrap_xci_env() {
# Declare our virtualenv
export XCI_VENV=${XCI_PATH}/venv/
@@ -23,6 +52,8 @@ function bootstrap_xci_env() {
source "$XCI_PATH/xci/installer/${INSTALLER_TYPE}/env" &>/dev/null || true
# source xci configuration
source $XCI_PATH/xci/config/env-vars
+ # baremetal variable to true if the vendor in the pdf is not libvirt
+ grep -o vendor.* ${PDF} | grep -q libvirt && export BAREMETAL=false || export BAREMETAL=true
}
function install_ansible() {
@@ -55,7 +86,7 @@ function install_ansible() {
source /etc/os-release || source /usr/lib/os-release
case ${ID,,} in
- *suse)
+ *suse*)
OS_FAMILY="Suse"
INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
CHECK_CMD="zypper search --match-exact --installed"
@@ -104,7 +135,7 @@ function install_ansible() {
[curl]=curl
)
EXTRA_PKG_DEPS=( apt-utils )
- sudo apt-get update
+ sudo apt-get update -qq > /dev/null
;;
rhel|fedora|centos)
@@ -127,7 +158,7 @@ function install_ansible() {
[wget]=wget
[curl]=curl
)
- sudo $PKG_MANAGER updateinfo
+ sudo $PKG_MANAGER updateinfo > /dev/null
EXTRA_PKG_DEPS=( deltarpm )
;;
@@ -141,14 +172,7 @@ function install_ansible() {
install_map+=(${EXTRA_PKG_DEPS[@]} )
- ${INSTALLER_CMD} ${install_map[@]}
-
- # Note(cinerama): If pip is linked to pip3, the rest of the install
- # won't work. Remove the alternatives. This is due to ansible's
- # python 2.x requirement.
- if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
- sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
- fi
+ ${INSTALLER_CMD} ${install_map[@]} > /dev/null
# We need to prepare our virtualenv now
virtualenv --quiet --no-site-packages ${XCI_VENV}
@@ -158,7 +182,7 @@ function install_ansible() {
# We are inside the virtualenv now so we should be good to use pip and python from it.
pip -q install --upgrade pip==9.0.3 # We need a version which supports the '-c' parameter
- pip -q install --upgrade -c $uc -c $osa_uc ara virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
+ pip -q install --upgrade -c $uc -c $osa_uc ara==0.16.4 virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
ara_location=$(python -c "import os,ara; print(os.path.dirname(ara.__file__))")
export ANSIBLE_CALLBACK_PLUGINS="/etc/ansible/roles/plugins/callback:${ara_location}/plugins/callbacks"
@@ -166,9 +190,9 @@ function install_ansible() {
ansible_lint() {
set -eu
- local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks)
+ local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks xci/installer/osh/playbooks)
# Extract role from scenario information
- local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev)
+ local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}$/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev)
# clear XCI_CACHE
rm -rf ${XCI_CACHE}/repos/openstack-ansible-tests
@@ -239,7 +263,7 @@ submit_bug_report() {
echo "xci installer: $INSTALLER_TYPE"
echo "xci scenario: $DEPLOY_SCENARIO"
echo "Environment variables:"
- env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\)'
+ env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\|BIFROST\|DIB\)'
echo "-------------------------------------------------------------------------"
}
@@ -262,6 +286,7 @@ log_xci_information() {
[[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
[[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
[[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ [[ "$INSTALLER_TYPE" == "osh" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
echo "-------------------------------------------------------------------------"
}
diff --git a/xci/infra/bifrost/infra-provision.sh b/xci/infra/bifrost/infra-provision.sh
index 17eb4158..b0617733 100644
--- a/xci/infra/bifrost/infra-provision.sh
+++ b/xci/infra/bifrost/infra-provision.sh
@@ -30,14 +30,16 @@ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
-e vm_domain_type=${VM_DOMAIN_TYPE} \
-e baremetal_json_file=/tmp/baremetal.json \
-e xci_distro=${XCI_DISTRO} \
- ${BIFROST_ROOT_DIR}/playbooks/xci-create-virtual.yml
+ -e pdf_file=${PDF} \
+ -e idf_file=${IDF} \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-setup-nodes.yml
ansible-playbook ${XCI_ANSIBLE_PARAMS} \
--private-key=${XCI_PATH}/xci/scripts/vm/id_rsa_for_dib \
--user=devuser \
-i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
- ${BIFROST_ROOT_DIR}/playbooks/xci-prepare-virtual.yml
+ ${BIFROST_ROOT_DIR}/playbooks/xci-prepare-env.yml
source ${XCI_CACHE}/repos/bifrost/scripts/bifrost-env.sh
@@ -52,28 +54,33 @@ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
-e testing_user=root \
-e test_vm_num_nodes=${NUM_NODES} \
-e test_vm_cpu='host-model' \
- -e inventory_dhcp=false \
+ -e inventory_dhcp=${BIFROST_INVENTORY_DHCP} \
-e inventory_dhcp_static_ip=false \
-e enable_inspector=true \
-e inspect_nodes=true \
- -e download_ipa=true \
- -e create_ipa_image=false \
+ -e download_ipa=${BIFROST_DOWNLOAD_IPA} \
+ -e create_ipa_image=${BIFROST_CREATE_IPA} \
-e write_interfaces_file=true \
-e ipv4_gateway=192.168.122.1 \
-e wait_timeout=3600 \
-e enable_keystone=false \
- -e ironicinspector_source_install=true \
-e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION:-master} \
- -e ironicinspectorclient_source_install=true \
-e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master} \
- -e ironicclient_source_install=true \
-e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION:-master} \
-e ironic_git_branch=${BIFROST_IRONIC_VERSION:-master} \
- -e use_prebuilt_images=${BIFROST_USE_PREBUILT_IMAGES:-false} \
+ -e create_image_via_dib=${BIFROST_CREATE_IMAGE_VIA_DIB:-true} \
-e xci_distro=${XCI_DISTRO} \
-e ironic_url="http://192.168.122.2:6385/" \
${BIFROST_ROOT_DIR}/playbooks/opnfv-virtual.yml
+
+if [ "${BAREMETAL}" = true ]; then
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/wait-for-baremetal.yml
+fi
+
echo "-----------------------------------------------------------------------"
echo "Info: VM nodes are provisioned!"
echo "-----------------------------------------------------------------------"
diff --git a/xci/infra/bifrost/playbooks/opnfv-virtual.yml b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
index 68d76cfc..f97eae4b 100644
--- a/xci/infra/bifrost/playbooks/opnfv-virtual.yml
+++ b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
@@ -52,7 +52,7 @@
mode: '0755'
owner: 'root'
group: 'root'
- when: use_prebuilt_images | bool == true
+ when: create_image_via_dib | bool == false
- name: Ensure /etc/hosts has good defaults
lineinfile:
create: yes
@@ -76,19 +76,17 @@
testing: false
enabled_hardware_types: ipmi
network_interface: "{{ ansible_default_ipv4.interface }}"
- # NOTE(TheJulia): While the next step creates a ramdisk, some elements
- # do not support ramdisk-image-create as they invoke steps to cleanup
- # the ramdisk which causes ramdisk-image-create to believe it failed.
+ # Create the IPA image for ironic to boot the nodes and write the final distro in the hard drive
+ # fedora is used because it is the only one working with ericsson-pod2 (it has support for newer hardware)
- role: bifrost-create-dib-image
dib_imagename: "{{ http_boot_folder }}/ipa"
build_ramdisk: false
- dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
- dib_os_release: "jessie"
+ dib_os_element: "{{ ipa_dib_os_element|default('fedora') }}"
dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
dib_notmpfs: true
when:
- create_ipa_image | bool == true
- - not use_prebuilt_images | bool == false
+ # Create the final distro image
- role: bifrost-create-dib-image
dib_imagetype: "qcow2"
dib_imagename: "{{deploy_image}}"
@@ -103,7 +101,6 @@
when:
- create_image_via_dib | bool == true
- transform_boot_image | bool == false
- - use_prebuilt_images | bool == false
- role: bifrost-keystone-client-config
clouds:
bifrost:
@@ -128,10 +125,6 @@
setup:
delegate_to: opnfv
delegate_facts: False
- - name: "Override default bifrost DNS if we are behind a proxy"
- set_fact:
- ipv4_nameserver: "192.168.122.1"
- when: lookup('env','http_proxy') != ''
- name: Find network interface in the OPNFV node
set_fact:
network_interface: "{{ ansible_default_ipv4.interface }}"
@@ -147,6 +140,8 @@
- import_role:
name: bifrost-configdrives-dynamic
private: True
+ vars:
+ ipv4_nameserver: "{{ host_info[inventory_hostname]['public']['dns'] | list }}"
delegate_to: opnfv
- import_role:
name: bifrost-deploy-nodes-dynamic
diff --git a/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
new file mode 100644
index 00000000..7f7ad670
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: "If VENV is set in the environment, enable installation into venv"
+ set_fact:
+ enable_venv: true
+ when: lookup('env', 'VENV') | length > 0
+
+- name: "Retrieve venv python path"
+ shell: "/bin/echo -e \"import sys\\nprint(':'.join(sys.path))\" | {{ ansible_python.get('executable', '/usr/bin/python').split('/')[-1] }}"
+ environment: "{{ bifrost_venv_env | default({}) }}"
+ register: venv_pythonpath_result
+ when: enable_venv
+
+- name: "Compute venv python path"
+ set_fact:
+ venv_pythonpath:
+ PYTHONPATH: "{{ venv_pythonpath_result.get('stdout', '') }}"
+ when: enable_venv
+
+- name: "Compute proper complete venv including proper Python path"
+ set_fact:
+ venv: "{{ venv | default({}) | combine(bifrost_venv_env | default({})) | combine(venv_pythonpath | default({})) }}"
+
diff --git a/xci/infra/bifrost/playbooks/wait-for-baremetal.yml b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
new file mode 100644
index 00000000..96aab29c
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
@@ -0,0 +1,17 @@
+# ironic needs to boot the server again to install the OS in the hard drive
+# we are currently modifying opnfv vm networking config while ironic is
+# doing that and it sometimes fail because of networking glitches. We should
+# wait until the OS is installed to do the opnfv config
+
+- hosts: baremetal
+ name: "Wait for baremetal blades to be ready"
+ become: no
+ gather_facts: False
+ tasks:
+ - name: "Wait for nodes to reboot."
+ wait_for: state=stopped port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+ - name: "Wait for nodes to become available."
+ wait_for: state=started port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+
diff --git a/xci/infra/bifrost/playbooks/xci-prepare-virtual.yml b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
index b4ad8c0c..d576324d 100644
--- a/xci/infra/bifrost/playbooks/xci-prepare-virtual.yml
+++ b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
@@ -18,10 +18,6 @@
vars_files:
- "{{ xci_path }}/xci/var/opnfv.yml"
tasks:
- - name: Copy bifrost inventory file
- copy:
- src: /tmp/baremetal.json
- dest: /tmp/baremetal.json
- name: Configure SSH key for devuser
user:
@@ -70,6 +66,25 @@
state: directory
become: yes
+ # Directory must exist before passing the static config
+ - name: "Setup Inventory DHCP Hosts Directory"
+ file:
+ path: "/etc/dnsmasq.d/bifrost.dhcp-hosts.d"
+ state: directory
+ owner: "root"
+ group: "root"
+ mode: 0755
+ become: yes
+
+ - name: Copy bifrost files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dst }}"
+ with_items:
+ - { src: '/tmp/baremetal.json', dst: '/tmp/baremetal.json' }
+ - { src: '/tmp/baremetalstaticips', dst: '/etc/dnsmasq.d/bifrost.dhcp-hosts.d/baremetalstaticips' }
+ become: yes
+
- name: Copy original qcow2 image to OPNFV VM
synchronize:
src: "{{ xci_cache }}/{{ item }}"
@@ -91,3 +106,13 @@
become: yes
when: _resolv_conf_netconfig.stat.exists
when: ansible_pkg_mgr == 'zypper'
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route del default"
+ become: yes
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route add default via {{ host_info[inventory_hostname].public.gateway }}"
+ become: yes
diff --git a/xci/infra/bifrost/playbooks/xci-create-virtual.yml b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
index 043907fe..a0f92159 100644
--- a/xci/infra/bifrost/playbooks/xci-create-virtual.yml
+++ b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
@@ -12,15 +12,17 @@
name: "Bootstrap XCI hardware resources and prepare provisioning environment"
gather_facts: yes
vars_files:
- - "{{ xci_path }}/xci/var/pdf.yml"
- - "{{ xci_path }}/xci/var/opnfv_vm.yml"
+ - "{{ pdf_file }}"
+ - "{{ idf_file }}"
+ - "{{ xci_path }}/xci/var/opnfv_vm_pdf.yml"
+ - "{{ xci_path }}/xci/var/opnfv_vm_idf.yml"
- "{{ xci_path }}/xci/var/opnfv.yml"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
- - role: create-vm-nodes
+ - role: create-nodes
become: yes
- role: clone-repository
project: "opnfv/bifrost"
@@ -36,7 +38,17 @@
state: started
port: 22
connect_timeout: 10
- timeout: 180
+ timeout: 10180
+
+ # No ansible module for brctl found
+ - name: Add pxe interface to the bridge
+ shell: "brctl addif {{ item.bridge }} {{ item.interface }}"
+ become: true
+ when: baremetal | bool == true
+ with_items:
+ - { bridge: "{{ network_bridge_admin }}", interface: "{{ network_interface_admin }}" }
+ - { bridge: "{{ network_bridge_mgmt }}", interface: "{{ network_interface_mgmt }}" }
+
- name: Load distribution variables
include_vars:
file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
diff --git a/xci/infra/bifrost/scripts/bifrost-env.sh b/xci/infra/bifrost/scripts/bifrost-env.sh
index 72d1dafe..7d882125 100755
--- a/xci/infra/bifrost/scripts/bifrost-env.sh
+++ b/xci/infra/bifrost/scripts/bifrost-env.sh
@@ -30,4 +30,14 @@ esac
export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
+if [ "${BAREMETAL}" = true ]; then
+ export BIFROST_INVENTORY_DHCP=true
+ export BIFROST_DOWNLOAD_IPA=false
+ export BIFROST_CREATE_IPA=true
+else
+ export BIFROST_INVENTORY_DHCP=false
+ export BIFROST_DOWNLOAD_IPA=true
+ export BIFROST_CREATE_IPA=false
+fi
+
pip install -q --upgrade -r "${XCI_CACHE}/repos/bifrost/requirements.txt"
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
index bcd7dc1d..af80b38f 100755
--- a/xci/installer/kubespray/deploy.sh
+++ b/xci/installer/kubespray/deploy.sh
@@ -33,6 +33,20 @@ echo "-----------------------------------------------------------------------"
echo "Info: Configured localhost for kubespray"
#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
# Configure deployment host, opnfv
#-------------------------------------------------------------------------------
# This playbook
@@ -69,14 +83,17 @@ if [ $XCI_FLAVOR != "aio" ]; then
echo "Info: Configured target hosts for kubespray"
fi
+
echo "Info: Using kubespray to deploy the kubernetes cluster"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
- -i opnfv_inventory/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
$LOG_PATH/setup-kubernetes.log
+
cd $K8_XCI_PLAYBOOKS
ansible-playbook ${XCI_ANSIBLE_PARAMS} \
-i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
@@ -85,32 +102,53 @@ echo "-----------------------------------------------------------------------"
echo "Info: Kubernetes installation is successfully completed!"
echo "-----------------------------------------------------------------------"
-# Configure the kubernetes authentication in opnfv host. In future releases
-# kubectl is no longer an artifact so we should not fail if it's not available.
-# This needs to be removed in the future
-ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
- cp -f ~/admin.conf ~/.kube/config; \
- cp -f ~/kubectl /usr/local/bin || true"
-
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/; \
+ cp -f ~/admin.conf ~/.kube/config"
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
echo "Login opnfv host ssh root@$OPNFV_HOST_IP
according to the user-guide to create a service
https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
-
echo
echo "-----------------------------------------------------------------------"
echo "Info: Kubernetes login details"
echo "-----------------------------------------------------------------------"
-
-# Get the dashborad URL
-DASHBOARD_SERVICE=$(ssh root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "known_hosts entry from opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
echo "Info: Kubernetes Dashboard URL:"
echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
-# Get the dashborad user and password
+# Get the dashboard user and password
MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
-USER_CSV=$(ssh root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
echo "Info: Dashboard username: ${USERNAME}"
diff --git a/xci/installer/kubespray/playbooks/configure-installer.yml b/xci/installer/kubespray/playbooks/configure-installer.yml
new file mode 100644
index 00000000..d88ee55c
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-installer.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
index 3b1cb013..18a126c1 100644
--- a/xci/installer/kubespray/playbooks/configure-kubenet.yml
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -38,14 +38,14 @@
with_items: "{{ kubenet_xci_static_routes }}"
loop_control:
label: "{{ item.network }}"
- when: deploy_scenario == 'k8-nosdn-nofeature'
+ when: deploy_scenario.find('k8-nosdn-') != -1
- name: Ensure rp_filter is disabled on localhost
sysctl:
name: net.ipv4.conf.all.rp_filter
sysctl_set: yes
state: present
- value: "{{ deploy_scenario == 'k8-nosdn-nofeature' | ternary(0, 1) }}"
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
reload: yes
delegate_to: localhost
run_once: True
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index 00a8053f..52e42b06 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -28,29 +28,44 @@
configure_network: xci_flavor != 'aio'
tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
+ archive: yes
src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
- - name: delete the opnfv_inventory directory
- file:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
- state: absent
-
- - name: make sure kubespray/opnfv_inventory/group_vars/ exist
- file:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
- state: directory
-
- - name: copy kubespray inventory directory
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
file:
src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
state: link
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
- name: Reload XCI deployment host facts
setup:
filter: ansible_local
@@ -65,15 +80,9 @@
package:
name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
state: present
- update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
when: xci_flavor == 'aio'
- - name: change dashboard server type to NodePort
- lineinfile:
- path: "{{ remote_xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
- insertafter: 'targetPort'
- line: " type: NodePort"
-
- name: pip install required packages
pip:
name: "{{ item.name }}"
@@ -83,9 +92,6 @@
- { name: 'netaddr' }
- { name: 'ansible-modules-hashivault' }
- - name: Configure SSL certificates
- include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
-
- name: fetch xci environment
copy:
src: "{{ xci_path }}/.cache/xci.env"
diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml
index 7989bfb6..2fde9877 100644
--- a/xci/installer/kubespray/playbooks/configure-targethosts.yml
+++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml
@@ -22,7 +22,7 @@
package:
name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
state: present
- update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
- hosts: kube-master
remote_user: root
@@ -37,6 +37,4 @@
when: xci_flavor == 'ha'
- role: "haproxy_server"
haproxy_service_configs: "{{ haproxy_default_services}}"
- haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
- haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
when: xci_flavor == 'ha'
diff --git a/xci/installer/kubespray/playbooks/post-deployment.yml b/xci/installer/kubespray/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index 4542a4e3..8b3a67d0 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -68,7 +68,6 @@ echo "Info: Configured opnfv deployment host for openstack-ansible"
#-------------------------------------------------------------------------------
# This playbook is only run for the all flavors except aio since aio is configured
# by an upstream script.
-
# This playbook
# - adds public keys to target hosts
# - configures network
@@ -161,6 +160,22 @@ echo
echo "Info: OpenStack installation is successfully completed!"
#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo
+echo "Info: Post-deployment scenario role execution done"
+
+#-------------------------------------------------------------------------------
# - Getting OpenStack login information
#-------------------------------------------------------------------------------
echo "Info: Openstack login details"
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
index f24e5103..e787aff5 100644
--- a/xci/installer/osa/files/ansible-role-requirements.yml
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -7,181 +7,180 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# these versions are based on the osa commit 85714acedb50ea65d7e7684c127984c8dc56afe4 on 2018-04-03
-# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?id=90d0679d209cb494b9a71817c56e2c26c7fc5ca1
+# these versions are based on the osa commit e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 on 2018-12-11
+# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?h=refs/heads/stable/rocky&id=e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02
- name: ansible-hardening
scm: git
src: https://git.openstack.org/openstack/ansible-hardening
- version: 3f870c24f9bcd88ec1f1d7815c30cf2abfac39e5
+ version: 14e6bb6a411b6b03bf258144be66845a5831705c
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: b488ec5ee3092ba5b6765b5888c9ad2e44922ec5
+ version: 4b2584d699c79ac65acfeb2157a97327df6f0fd6
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 78e615c712771e33c1a7436e05bc91644318ece1
+ version: 671e7129ad3dcf20bdda942842f9f76203bf5a5e
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 4bc75a7b141fa0ff0ff1f35d26c09163df482b34
+ version: 6dbac51e5b74ffdee429375f6c22739e7a5ef017
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: 9b2c2e8098f0f02e206c0498fa466a6798f7c89d
+ version: 7a7036f6d15ce3117a925217b66cba806034bb96
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 50ea8b644c0713d007f6f172cd7bbc850f44a55a
+ version: 278aaca502533b33b9714393e47b536654055c58
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 0c0c9453e8760fcbb0a126e6c97de83f004ae06b
+ version: 6bc259471283162b3cb8ec0c4bc736f81254d050
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: 2b4a1f36c29b06b832bc4e6d112ca5559a98fd4a
+ version: 64764d25ab868417f1138a7b9605f2eb94cbfd02
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 3d1e70d1be8d10a54da35ad97c3e750384f8a73b
+ version: 14a74f2fb60fa7865cf34f75e3196e802847b9d1
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: e77c3a923bc8aae4a264917f592b58b5d1c79aed
+ version: 83e20af591b00fc796eba0e0e1c7650faaa20cd7
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: 67ff6cd34d8158dde56a7a59b8ccbdd079effde5
+ version: e058c81a44859c7bcd3eeaac49a8f25b423e38a4
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 372be6bfc1169131c6607c4f0f5758563dc1265f
+ version: 0028cedcccc4913bd1c604404c84be16164d1fe5
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: 48019740f86570f8bcb14068a0e253b05ffb4336
+ version: 5a54cc6ba50875c4068e4cdfe3cb23ae1603e257
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: e86c73ef9af547b30a4aab0d39aca96359bf5ce4
+ version: 805ef5349db7d8af0132b546ff56a36ec80ea7db
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: 75c8a1f07c0b0f8e8baa68198be789efd453183e
+ version: 9b8d7483d69e60f4ae71ceb6a3336ff81f355c38
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: bd8b72cb68c2629f3d1c032f315eb9c25931920e
+ version: f9ce44edb809c92735fa093334fa1d79cc538126
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: c9b2115cf7c38a5861a8126d45eddef9ea03d1ad
+ version: 221dcccfef3efa1a187678f71c59d81d7e930a92
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 948305aa6bfeeb2abdda7351aa0a8ff292810e56
+ version: a824d8d4dc6de6563f186449838e94c69a869e02
+- name: os_congress
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_congress
+ version: 0e6ccb63dba466bb1b7a11e94db7a420c716c06d
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: b1a08cc7e897e5b600415a69280a64f8f61dd66c
+ version: 74c33e9788607f772d8402c4f5cfc79eb379278b
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 9f2aa6478dadab3a4ec0cee6d23ffc86fa76a99b
+ version: 7ec6a11b98715530e3cd5adbf682c2834e3122a8
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: 5beb9ca451812959f09c9f9235eee529c42b3805
+ version: db881f143223723b38f5d197e8e4b6dd4e057c6f
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 7eff32af7fae96096694d582589389c66d10a8a3
+ version: 14b8927123aa9b0cf47f365c1ab9f82147ce4bdc
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: ea9a27931e6d7f22df23ea02e1c0938ba576fada
+ version: b088034eeaa73ac781fe271588ba03871c88118e
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: 8c33498070489e2ff645cc1286df535e2b16726b
+ version: 6ecf38f1296080a33366528ad40d513539138925
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 06087d8d193f4985ed8c33e996b02fa717628c27
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 5102381790218c390438011f64e763016d335c61
+ version: 316f22626d242e33ce56fad367ef3570e0d8ab8b
- name: os_neutron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
- version: 4bbb681632f2d577585905982d81fa019332f993
+ version: 3032836715b4055041554583fa2ed685ab076c25
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: d104fb16bdc349382fd05265ff9e19b6b0133fac
- refspec: refs/changes/17/572417/1
+ version: 9db5bf5ab6f82c1947d05a1ec7cd6e3ef304760f
- name: os_octavia
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: c4cdbc5f36c43591cf729a5ce0f2a1e605c30be0
+ version: 508ea6d834153d0eb6da5bd32d10472f483c6dfa
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: 083bbb8c1290506797d49c51ee91a344a481d25c
+ version: 8e98112b858ecffbb92c6ae342237af87416b7fa
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: 9b5111884ebd64ec8088bbdfb3b9a58cdabf1edb
+ version: ed7aa2d64a2ea3508c7d88a9e869524fdf0e9353
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: 5e88210fdd42d40960a14767fc662b3bd8a73c8a
+ version: a88edf84964819870ef990d25b3bfa514186249a
- name: os_tacker
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
- version: d4acca1ce9ec3ce0c599a3424fa3c92ee318d270
+ version: bbce8657c13d2545bf632eb81bb78329a5479798
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: f34582d887e8e26e99710b29ac35306938ca857c
+ version: 08341f4a19b2ed2231b790496c9f7cf2b4eda2e6
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: 6cd21b625d9f3da5c537e98064f67001173c9174
+ version: eaca0137de0d3d7bd57a68eecfecf52e3171f591
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: e1f330786d5f09bb02c3f06b3484a089223419ad
+ version: a84ae0d744047fe41a0c028213de8daa52f72aee
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: cffd1ebd45e20331ee505568cd34c277d3225138
+ version: deccf93bdda1aa873b956418168368284509c99b
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: a84d11ab04c911c788b534cd61d33e6e2b71dd0b
+ version: 630a6dfdcb46ba719ddb7fd7a4875259c5602b15
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 5979a638eade8523f113714f9fd5c0fb59353277
+ version: dd143b381b2fb94a3ba435f951e8b9338353a48d
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: ed8e178c38a28cab87b8d9bd4396caccf8c0e790
+ version: ed5e61c8bc2aabb905918bb2751ae985b1cfe229
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: d401a62d2f8ff7c8e6924b6fae0086e47ab37fa6
+ version: 9318bafbe60fed5f026c1e216d693bce745b9f99
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
+ version: d2ba81107ade1cf53c8b93590465c21ad2bc4530
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: 21d7d8de5af9e73c0853d3434a4b3d3f8dd39a70
+ version: 0fdb4848b5aca949ffade9be5a2ae254979e673e
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
@@ -189,20 +188,44 @@
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
+ version: 3bb7414f46b757e943507b65ca4c9f1080a008b0
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
+ version: '1.4'
- name: ceph-ansible
scm: git
src: https://github.com/ceph/ceph-ansible
- version: 0be60456ce98d11ca6acf73d7f7a76c4f9dc5309
+ version: a5aca6ebbc341feb34b9ec0d73e16aeeedae63ac
- name: opendaylight
scm: git
src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: 9d5951c39da7722c71632a10ec53e7ab93b8ac9b
+ version: 0aebbc250b34ac5ac14b37bdf9b1a2e1cfaa5a76
- name: haproxy_endpoints
scm: git
src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
+ version: 8e3a24a35beb16d717072dc83895c5a1f92689fb
+- name: nspawn_container_create
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create
+ scm: git
+ version: 2bcf03f1cca550731789d5b53c7d0806ef5f5ff7
+- name: nspawn_hosts
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts
+ scm: git
+ version: f69e101b9191682986272b766747f107b8a7a136
+- name: systemd_service
+ src: https://git.openstack.org/openstack/ansible-role-systemd_service
+ scm: git
+ version: a085a50c338b2eeaa87ed50eaaa22564d7c12968
+- name: systemd_mount
+ src: https://git.openstack.org/openstack/ansible-role-systemd_mount
+ scm: git
+ version: ee6263b3ce6502712ff4d6fb56474066df1773e4
+- name: systemd_networkd
+ src: https://git.openstack.org/openstack/ansible-role-systemd_networkd
+ scm: git
+ version: b024d0a3d97caf06b962a1f19450511b108dc5eb
+- name: python_venv_build
+ src: https://git.openstack.org/openstack/ansible-role-python_venv_build
+ scm: git
+ version: 5fdd8e00633f28606fc531a449d741e8c772a9fc
diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt
index fd401854..ec198a79 100644
--- a/xci/installer/osa/files/global-requirement-pins.txt
+++ b/xci/installer/osa/files/global-requirement-pins.txt
@@ -5,10 +5,17 @@
#
# Use this file with caution!
#
+
+### Pinned for gnocchi's dependency pycradox
+# https://github.com/sileht/pycradox/commit/2209f89fd65ecf31bea8eac6405acce2543e7b84
+Cython<0.28
+
###
### These are pinned to ensure exactly the same behaviour forever! ###
### These pins are updated through the sources-branch-updater script ###
###
-pip==9.0.1
-setuptools==38.5.1
-wheel==0.30.0
+# Bumping pip to version 10 fails in tempest when trying to install
+# packages with an empty list.
+pip==18.0
+setuptools==40.0.0
+wheel==0.31.1
diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml
index 360aa5cb..dc2ec183 100644
--- a/xci/installer/osa/files/ha/openstack_user_config.yml
+++ b/xci/installer/osa/files/ha/openstack_user_config.yml
@@ -77,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -97,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -115,18 +115,18 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -139,27 +139,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -170,43 +170,43 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -225,10 +225,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -238,10 +238,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -251,5 +251,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/xci/installer/osa/files/ha/user_variables.yml b/xci/installer/osa/files/ha/user_variables.yml
index c51a6e12..8c2e9f0c 100644
--- a/xci/installer/osa/files/ha/user_variables.yml
+++ b/xci/installer/osa/files/ha/user_variables.yml
@@ -154,7 +154,7 @@ trove_wsgi_processes: 1
sahara_api_workers_max: 2
sahara_api_workers: 1
-openrc_os_auth_url: "http://192.168.122.220:5000/v3"
+openrc_os_auth_url: "https://192.168.122.220:5000/v3"
keystone_auth_admin_password: "opnfv-secret-password"
openrc_os_password: "opnfv-secret-password"
openrc_os_domain_name: "Default"
@@ -163,9 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_ssl: false
-openstack_service_publicuri_proto: http
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/mini/user_variables.yml b/xci/installer/osa/files/mini/user_variables.yml
index ef56dd2c..b4d847bc 100644
--- a/xci/installer/osa/files/mini/user_variables.yml
+++ b/xci/installer/osa/files/mini/user_variables.yml
@@ -154,7 +154,7 @@ trove_wsgi_processes: 1
sahara_api_workers_max: 2
sahara_api_workers: 1
-openrc_os_auth_url: "http://192.168.122.3:5000/v3"
+openrc_os_auth_url: "https://192.168.122.3:5000/v3"
keystone_auth_admin_password: "opnfv-secret-password"
openrc_os_password: "opnfv-secret-password"
openrc_os_domain_name: "Default"
@@ -163,9 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_ssl: false
-openstack_service_publicuri_proto: http
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
index 4e578819..5e7ed83c 100644
--- a/xci/installer/osa/files/noha/user_variables.yml
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -154,7 +154,7 @@ trove_wsgi_processes: 1
sahara_api_workers_max: 2
sahara_api_workers: 1
-openrc_os_auth_url: "http://192.168.122.3:5000/v3"
+openrc_os_auth_url: "https://192.168.122.3:5000/v3"
keystone_auth_admin_password: "opnfv-secret-password"
openrc_os_password: "opnfv-secret-password"
openrc_os_domain_name: "Default"
@@ -163,9 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_ssl: false
-openstack_service_publicuri_proto: http
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml
index f8a2a95b..64718e33 100644
--- a/xci/installer/osa/files/openstack_services.yml
+++ b/xci/installer/osa/files/openstack_services.yml
@@ -31,210 +31,270 @@
## Global Requirements
requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 207ac2e166f0874b7ff891535bdb78ecf36cabc6 # HEAD of "stable/queens" as of 01.03.2018
+requirements_git_install_branch: 32f8fa388d3b8367320a3308a350f28254a82d65 # HEAD of "stable/rocky" as of 11.12.2018
+requirements_git_track_branch: stable/rocky
## Aodh service
aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: f549faea0ea19dad5bb3f1871b7d66ae5d9d80f2 # HEAD of "stable/queens" as of 01.03.2018
+aodh_git_install_branch: ae5e710cd5ade867ebd0e6666bad95f82d130210 # HEAD of "stable/rocky" as of 11.12.2018
aodh_git_project_group: aodh_all
+aodh_git_track_branch: stable/rocky
## Barbican service
barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 5b525f6b0a7cf5342a9ffa3ca3618028d6d53649 # HEAD of "stable/queens" as of 01.03.2018
+barbican_git_install_branch: 0a1a9917e791d0c6fc8534a052700af5f5cbe9d0 # HEAD of "stable/rocky" as of 11.12.2018
barbican_git_project_group: barbican_all
+barbican_git_track_branch: stable/rocky
## Ceilometer service
ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: 24caac82528be7678165bf12fb5b997852727ecd # HEAD of "stable/queens" as of 01.03.2018
-ceilometer_git_project_group: ceilometer_all
+ceilometer_git_install_branch: 018ff32fe0200a041297c386eb8b381f1bec0e71 # HEAD of "stable/rocky" as of 11.12.2018
+ceilometer_git_project_group: all
+ceilometer_git_track_branch: stable/rocky
## Cinder service
cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: b61a02de56c1b9cc6d5003b5304ce66ee930f37b # HEAD of "stable/queens" as of 01.03.2018
+cinder_git_install_branch: 8dbf5d7882a6271514a3075a02cd080e44b709d5 # HEAD of "stable/rocky" as of 11.12.2018
cinder_git_project_group: cinder_all
+cinder_git_track_branch: stable/rocky
## Designate service
designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 6ca9446bdcf04ba80787348892937cf19eefbf5a # HEAD of "stable/queens" as of 01.03.2018
+designate_git_install_branch: af1bb8a36a704bb1a226fe5154f828e152ef23e1 # HEAD of "stable/rocky" as of 11.12.2018
designate_git_project_group: designate_all
+designate_git_track_branch: stable/rocky
## Horizon Designate dashboard plugin
designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 5570a2dd51ccd3750012bfde9991f0689a02323b # HEAD of "stable/queens" as of 01.03.2018
+designate_dashboard_git_install_branch: faa67c87ad3cd5563da722f13b3adaee5bfe350f # HEAD of "stable/rocky" as of 11.12.2018
designate_dashboard_git_project_group: horizon_all
+designate_dashboard_git_track_branch: stable/rocky
## Dragonflow service
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: a2f50a8e8222ae1de04e44a6fd6f7e00d5864fc0 # HEAD of "master" as of 01.03.2018
+dragonflow_git_install_branch: 945b1e368c651ffa3655f42df724d9f13a7b6b96 # FROZEN HEAD of "master" as of 17.08.2018
dragonflow_git_project_group: neutron_all
+dragonflow_git_track_branch: None
## Glance service
glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: 968f4ae9ce244d9372cb3e8f45acea9d557f317d # HEAD of "stable/queens" as of 01.03.2018
+glance_git_install_branch: 4982c24f0aeb64f9d20159e543a90e31fc325dce # HEAD of "stable/rocky" as of 11.12.2018
glance_git_project_group: glance_all
+glance_git_track_branch: stable/rocky
## Heat service
heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: 43f122be13736f15fbc38cb6e6ce29545f784c86 # HEAD of "stable/queens" as of 01.03.2018
+heat_git_install_branch: 98eea44d5d91b74e1ab28c052e4fbc4b533d5f83 # HEAD of "stable/rocky" as of 11.12.2018
heat_git_project_group: heat_all
+heat_git_track_branch: stable/rocky
+## Horizon Heat dashboard plugin
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+heat_dashboard_git_repo: https://git.openstack.org/openstack/heat-dashboard
+heat_dashboard_git_install_branch: bc7f5068bbb6f7974eaffa2d865a859ff0fd0069 # FROZEN HEAD of "master" as of 17.08.2018
+heat_dashboard_git_project_group: horizon_all
+heat_dashboard_git_track_branch: None
## Horizon service
horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: d017fde2a0fdc48e4687f0f5ae0362ba6c5ad66a # HEAD of "stable/queens" as of 01.03.2018
+horizon_git_install_branch: 0ccfce882749998f3a6a7f9bfc6fa74ea346ca53 # HEAD of "stable/rocky" as of 11.12.2018
horizon_git_project_group: horizon_all
+horizon_git_track_branch: stable/rocky
## Horizon Ironic dashboard plugin
ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: 1c4cbd2b90270f65d04b91ddc5f86efa35bbc622 # HEAD of "stable/queens" as of 01.03.2018
+ironic_dashboard_git_install_branch: c700f3a613f3d78875caf7588e7bdf42a5db83cb # HEAD of "stable/rocky" as of 11.12.2018
ironic_dashboard_git_project_group: horizon_all
+ironic_dashboard_git_track_branch: stable/rocky
## Horizon Magnum dashboard plugin
magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 051408e5b86615f74e5fa4cd2e4284b6d1e6a3f2 # HEAD of "stable/queens" as of 01.03.2018
+magnum_dashboard_git_install_branch: 2e9cb253eaee45a57f07369e432369dbff8fc173 # HEAD of "stable/rocky" as of 11.12.2018
magnum_dashboard_git_project_group: horizon_all
+magnum_dashboard_git_track_branch: stable/rocky
## Horizon LBaaS dashboard plugin
neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: a42434a21bf95566472dc6c8ce078ca84432423d # HEAD of "stable/queens" as of 01.03.2018
+neutron_lbaas_dashboard_git_install_branch: 84fd20a474e8165ddbf5cf4bd14b7eb7da63ed41 # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_dashboard_git_project_group: horizon_all
+neutron_lbaas_dashboard_git_track_branch: stable/rocky
## Horizon FWaaS dashboard plugin
-neutron_fwaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: a710e7c4f48afe0261ef25efc44088346124de1c # HEAD of "stable/queens" as of 01.03.2018
+neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
+neutron_fwaas_dashboard_git_install_branch: 4adf5599211ef90696da94b2fee3aac730f3b7bc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_dashboard_git_project_group: horizon_all
+neutron_fwaas_dashboard_git_track_branch: stable/rocky
## Horizon Sahara dashboard plugin
sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 707059ff4e372ae66b21b82050a9e16295176782 # HEAD of "stable/queens" as of 01.03.2018
+sahara_dashboard_git_install_branch: 6e3f7538ce7779612d8e82b069597c06c2225a77 # HEAD of "stable/rocky" as of 11.12.2018
sahara_dashboard_git_project_group: horizon_all
+sahara_dashboard_git_track_branch: stable/rocky
## Keystone service
keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: c06d74fcf4cf5338db6572265c609036f6817466 # HEAD of "stable/queens" as of 01.03.2018
+keystone_git_install_branch: 295ccda8190b39a505c397d2f4d9e4896dc538cf # HEAD of "stable/rocky" as of 11.12.2018
keystone_git_project_group: keystone_all
+keystone_git_track_branch: stable/rocky
## Neutron service
neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: abb60c6175af435964028ce7c97bb4803aeab004 # HEAD of "stable/queens" as of 01.03.2018
+neutron_git_install_branch: ae2ef681403d1f103170ea70df1010f006244752 # HEAD of "stable/rocky" as of 11.12.2018
neutron_git_project_group: neutron_all
+neutron_git_track_branch: stable/rocky
neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: f6b8b5b0ad2c19ddf6a7c102c706cbfdb0b2bf05 # HEAD of "stable/queens" as of 01.03.2018
+neutron_lbaas_git_install_branch: 1353bad713fd97418a9984016da49df8cfa8825b # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_git_project_group: neutron_all
+neutron_lbaas_git_track_branch: stable/rocky
neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 8b01dcabb456d2d0bdf905b23f0bdb3ff2530f4d # HEAD of "stable/queens" as of 01.03.2018
+neutron_vpnaas_git_install_branch: 0876f4dfe7e2f57305110e035efa753bfb711a3f # HEAD of "stable/rocky" as of 11.12.2018
neutron_vpnaas_git_project_group: neutron_all
+neutron_vpnaas_git_track_branch: stable/rocky
neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 43f56b794b19bb0f362e1d0a1449ee24bb16156e # HEAD of "stable/queens" as of 01.03.2018
+neutron_fwaas_git_install_branch: 5ece265b65247ee81a9335d5a685fa9f0a68b0fc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_git_project_group: neutron_all
+neutron_fwaas_git_track_branch: stable/rocky
neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 386b5e4c33ab765eb7a72e9a9d4ffc1524d7d0c8 # HEAD of "stable/queens" as of 01.03.2018
+neutron_dynamic_routing_git_install_branch: ae3a01ca1fd6270fc27b3c6bae11afc0f17563d5 # HEAD of "stable/rocky" as of 11.12.2018
neutron_dynamic_routing_git_project_group: neutron_all
+neutron_dynamic_routing_git_track_branch: stable/rocky
+# Networking Calico is following master
networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 10626324b597585cc781197133d4b12f890b8081 # HEAD of "master" as of 01.03.2018
+networking_calico_git_install_branch: 79c7e00360ddb5fd3c38e60e5bbb3399928d9172 # HEAD of "master" as of 11.12.2018
networking_calico_git_project_group: neutron_all
+networking_calico_git_track_branch: stable/rocky
networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
-networking_odl_git_install_branch: 8733cf68cbc827a4dd458e3328b5fd2c23a07bcf # HEAD of "stable/queens" as of 01.03.2018
+networking_odl_git_install_branch: 1cef1f0939a405eea4cb87e712794e8fa26b5166 # HEAD of "stable/rocky" as of 11.12.2018
networking_odl_git_project_group: neutron_all
+networking_odl_git_track_branch: stable/rocky
+networking_ovn_git_repo: https://git.openstack.org/openstack/networking-ovn
+networking_ovn_git_install_branch: e077aa93b1dc244b59864236d7c673f852e4e3ba # HEAD of "stable/rocky" as of 11.12.2018
+networking_ovn_git_project_group: neutron_all
+
+# BGPVPN is frozen until further notice due to
+# https://github.com/openstack/networking-bgpvpn/commit/e9a0ea199b47f76f69545e04bdb4db44869c388b#diff-b4ef698db8ca845e5845c4618278f29a
networking_bgpvpn_git_repo: https://git.openstack.org/openstack/networking-bgpvpn
-networking_bgpvpn_git_install_branch: a15c091d8a616c1fd1d3741f32c5d135b5db594f # HEAD of "stable/queens" as of 01.03.2018
+networking_bgpvpn_git_install_branch: 3b93ddacd390d92fb144e5660324d4da064ad9a4 # FROZEN HEAD of "stable/rocky" as of 31.03.2018
networking_bgpvpn_git_project_group: neutron_all
+networking_bgpvpn_git_track_branch: None
networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
-networking_sfc_git_install_branch: cbb68837a38428766ed4d22c5adfe3b2bc6c5f99 # HEAD of "stable/queens" as of 01.03.2018
+networking_sfc_git_install_branch: f0eddef3d53bbad417038f9d32b196ace2ebd0b2 # HEAD of "stable/rocky" as of 11.12.2018
networking_sfc_git_project_group: neutron_all
+networking_sfc_git_track_branch: stable/rocky
## Nova service
nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 5039511840bd64151f3111d9c8d7d8a01344193b # HEAD of "stable/queens" as of 01.03.2018
+nova_git_install_branch: 8066142a1e381536291232250b3237e5c01ed1f4 # HEAD of "stable/rocky" as of 11.12.2018
nova_git_project_group: nova_all
+nova_git_track_branch: stable/rocky
## PowerVM Virt Driver
nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: 2999bff2d0e651cc091757d0501f82af2691daf6 # HEAD of "stable/queens" as of 01.03.2018
+nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 11.12.2018
nova_powervm_git_project_group: nova_all
+nova_powervm_git_track_branch: stable/rocky
## LXD Virt Driver
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: 01b6a8e07558678505e3fa2b6f9ea2d10f821642 # HEAD of "stable/queens" as of 01.03.2018
+nova_lxd_git_install_branch: bc8d540c95b3209321658000fd74b0e5065a7ee2 # FROZEN HEAD of "master" as of 17.08.2018
nova_lxd_git_project_group: nova_all
+nova_lxd_git_track_branch: None
## Sahara service
sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: abcc07a70f2da288548aa96abb16c8380e46dcf9 # HEAD of "stable/queens" as of 01.03.2018
+sahara_git_install_branch: ddb518fd81b82308bdd01e58ebf6ed7a48c544ae # HEAD of "stable/rocky" as of 11.12.2018
sahara_git_project_group: sahara_all
+sahara_git_track_branch: stable/rocky
## Swift service
swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: bd4b3c5dc9256fc0d6cca8f925705740c2395efd # HEAD of "stable/queens" as of 01.03.2018
+swift_git_install_branch: 7fdf66ab70da705774a4ae9c328a3e762bb2f3b4 # HEAD of "stable/rocky" as of 11.12.2018
swift_git_project_group: swift_all
+swift_git_track_branch: stable/rocky
## Swift3 middleware
+# please remove this when swift role is configured without this middleware (and uses swift code only)
swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1c117c96dda8113c3398c16e68b61efef397de74 # HEAD of "master" as of 01.03.2018
+swift_swift3_git_install_branch: 90db5d1510b2a770387961e7bf0fbeae8101ba45 # FROZEN HEAD of "master" as of 17.08.2018
swift_swift3_git_project_group: swift_all
+swift_swift3_git_track_branch: None
## Ironic service
ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: 4c3a611ac3803a17dd584eb319f0bb40d6ee5ba3 # HEAD of "stable/queens" as of 01.03.2018
+ironic_git_install_branch: 6a6c0d882fe8ac299d18df75d2bbd111b170ad48 # HEAD of "stable/rocky" as of 11.12.2018
ironic_git_project_group: ironic_all
+ironic_git_track_branch: stable/rocky
## Magnum service
magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 0b3133280fd7dbde65c8581b7be03cd1e3686bc4 # HEAD of "stable/queens" as of 01.03.2018
+magnum_git_install_branch: 765e207a5d3a45b8523cb2c34e5d74541da481e6 # HEAD of "stable/rocky" as of 11.12.2018
magnum_git_project_group: magnum_all
+magnum_git_track_branch: stable/rocky
## Trove service
trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: 43d2b96f86a5365d69c885738ea1c3642f4e5aa1 # HEAD of "stable/queens" as of 01.03.2018
+trove_git_install_branch: 2953676e81fc22099e72ea7d0f27002a59aa779f # HEAD of "stable/rocky" as of 11.12.2018
trove_git_project_group: trove_all
+trove_git_track_branch: stable/rocky
## Horizon Trove dashboard plugin
trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: f7cf9d5bbe8b04fc9ea95e79b9bec21842d324f9 # HEAD of "stable/queens" as of 01.03.2018
+trove_dashboard_git_install_branch: c6482d8f7ebeb980a99cc89593245be381675984 # HEAD of "stable/rocky" as of 11.12.2018
trove_dashboard_git_project_group: horizon_all
+trove_dashboard_git_track_branch: stable/rocky
## Octavia service
octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: 9f379aef7c0665d4183ac549ed7a0dbc0e5d3aca # HEAD of "stable/queens" as of 01.03.2018
+octavia_git_install_branch: ec4c88e23ebeb786491158682f9a7dd42928f97a # HEAD of "stable/rocky" as of 12.14.2018
octavia_git_project_group: octavia_all
-
-
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 01.03.2018
-molteniron_git_project_group: molteniron_all
+octavia_git_track_branch: stable/rocky
## Tacker service
tacker_git_repo: https://git.openstack.org/openstack/tacker
-tacker_git_install_branch: 6932f5642598d53d93f94514eaed55cc93ea19d7 # HEAD of "stable/queens" as of 01.03.2018
+tacker_git_install_branch: 279b1a2840b9f28377476e0d11ca83ce2e88a0b2 # HEAD of "stable/rocky" as of 11.12.2018
tacker_git_project_group: tacker_all
+tacker_git_track_branch: stable/rocky
+
+## Congress service
+congress_git_repo: https://git.openstack.org/openstack/congress
+congress_git_install_branch: 6862ac9f356a5403e1e37050e12f032f661bae96 # HEAD of "stable/rocky" as of 11.12.2018
+congress_git_project_group: congress_all
+congress_git_track_branch: stable/rocky
+
+## Horizon Octavia dashboard plugin
+octavia_dashboard_git_repo: https://git.openstack.org/openstack/octavia-dashboard
+octavia_dashboard_git_install_branch: 80766f9390492c24de38911d7240c5490c7ef562 # HEAD of "stable/rocky" as of 11.12.2018
+octavia_dashboard_git_project_group: horizon_all
+octavia_dashboard_git_track_branch: stable/rocky
diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml
index 544a9999..904215b7 100644
--- a/xci/installer/osa/files/setup-openstack.yml
+++ b/xci/installer/osa/files/setup-openstack.yml
@@ -19,11 +19,13 @@
- include: os-nova-install.yml
- include: os-neutron-install.yml
- include: os-heat-install.yml
+- include: os-ceilometer-install.yml
- include: os-horizon-install.yml
when: not core_openstack | default(False)
- include: os-swift-install.yml
- include: os-ironic-install.yml
when: not core_openstack | default(False)
+- include: os-barbican-install.yml
- include: os-tacker-install.yml
- include: os-tempest-install.yml
when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/xci/installer/osa/files/user_variables_xci.yml b/xci/installer/osa/files/user_variables_xci.yml
index 65e09bb4..1d69f532 100644
--- a/xci/installer/osa/files/user_variables_xci.yml
+++ b/xci/installer/osa/files/user_variables_xci.yml
@@ -13,5 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-opensuse_mirror_obs: "http://ftp.gwdg.de/pub/opensuse"
-opensuse_mirror: "http://mirrors.rit.edu/opensuse"
+debug: False
+install_method: source
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
index c92abd97..07ad683b 100644
--- a/xci/installer/osa/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -35,19 +35,36 @@
roles:
- role: bootstrap-host
configure_network: xci_flavor != 'aio'
- - role: peru.proxy_settings
+ - role: ruzickap.proxy_settings
proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
+ archive: yes
src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
- name: Re-create OpenStack-Ansible /etc directory
file:
@@ -135,6 +152,7 @@
- name: Install ARA callback plugin in OSA virtualenv
pip:
name: ara
+ version: 0.16.4
state: present
extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
executable: '/opt/ansible-runtime/bin/pip'
@@ -158,11 +176,6 @@
chdir: "{{openstack_osa_path}}/scripts"
changed_when: True
- - name: Configure SSL certificates
- include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
- vars:
- extra_args: "-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt"
-
- name: fetch xci environment
copy:
src: "{{ xci_path }}/.cache/xci.env"
@@ -176,12 +189,6 @@
include_role:
name: "openstack-ansible-openstack_openrc"
- - name: add extra insecure flag to generated openrc
- blockinfile:
- dest: "{{ ansible_env.HOME }}/openrc"
- block: |
- export OS_INSECURE=true
-
- name: fetch generated openrc
fetch:
src: "{{ ansible_env.HOME }}/openrc"
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
index a5d2923c..dfa17696 100644
--- a/xci/installer/osa/playbooks/configure-targethosts.yml
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -18,7 +18,7 @@
with_items:
- "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
- - role: peru.proxy_settings
+ - role: ruzickap.proxy_settings
proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
diff --git a/xci/installer/osa/playbooks/post-deployment.yml b/xci/installer/osa/playbooks/post-deployment.yml
new file mode 100644
index 00000000..36c052c9
--- /dev/null
+++ b/xci/installer/osa/playbooks/post-deployment.yml
@@ -0,0 +1,66 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/{{ xci_flavor }}/user_variables.yml"
+
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+
+ tasks:
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/README b/xci/installer/osh/README
new file mode 100644
index 00000000..902ac10e
--- /dev/null
+++ b/xci/installer/osh/README
@@ -0,0 +1,50 @@
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. mini: One deployment host, 1 master host and 1 node host.
+ 2. noha: One deployment host, 1 master host and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:
+ - calico
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run openstack-helm
+
+ export INSTALLER_TYPE=osh
+ export DEPLOY_SCENARIO=k8-calico-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/osh/deploy.sh b/xci/installer/osh/deploy.sh
new file mode 100755
index 00000000..e56845b8
--- /dev/null
+++ b/xci/installer/osh/deploy.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+OSH_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $OSH_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
+ cp -f ~/admin.conf ~/.kube/config;"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "Info: known_hosts entry for opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Continue with running the openstack-helm installation"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -v -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ install-openstack-helm.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Openstack-helm installation execution done"
+echo "-----------------------------------------------------------------------"
+echo
+
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/osh/files/ha/inventory/group_vars/all.yml b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/osh/playbooks/configure-installer.yml b/xci/installer/osh/playbooks/configure-installer.yml
new file mode 100644
index 00000000..383f55fc
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-installer.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2019 Ericsson Software Technology and Others
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/osh/playbooks/configure-kubenet.yml b/xci/installer/osh/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/osh/playbooks/configure-opnfvhost.yml b/xci/installer/osh/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osh/playbooks/configure-targethosts.yml b/xci/installer/osh/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/osh/playbooks/group_vars/all.yml b/xci/installer/osh/playbooks/group_vars/all.yml
new file mode 100644
index 00000000..7453bdab
--- /dev/null
+++ b/xci/installer/osh/playbooks/group_vars/all.yml
@@ -0,0 +1,55 @@
+---
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/osh/playbooks/install-openstack-helm.yml b/xci/installer/osh/playbooks/install-openstack-helm.yml
new file mode 100644
index 00000000..a16572a5
--- /dev/null
+++ b/xci/installer/osh/playbooks/install-openstack-helm.yml
@@ -0,0 +1,24 @@
+---
+- hosts: kube-node
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ roles:
+ - role: prepare-kube-nodes-osh
+
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ roles:
+ - role: prepare-opnfvhost-osh
+ - role: prepare-osh
+ - role: install-osh-mini
+ when: xci_flavor == 'mini'
+ environment:
+ - CONTAINER_DISTRO_NAME: "{{ container_distro_name }}"
+ - CONTAINER_DISTRO_VERSION: "{{ container_distro_version }}"
+ - OPENSTACK_RELEASE: "{{ openstack_osh_version }}"
+ - role: install-osh-noha
+ when: xci_flavor == 'noha'
diff --git a/xci/installer/osh/playbooks/post-deployment.yml b/xci/installer/osh/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/osh/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
new file mode 100644
index 00000000..e5df54fa
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
@@ -0,0 +1,109 @@
+---
+
+- name: Setup Clients
+ command: ./tools/deployment/common/setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/component/common/ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/component/common/mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/component/common/memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/component/common/rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Update nfs-provisioner helm-chart
+ shell: helm dependency update nfs-provisioner
+ args:
+ chdir: /root/repos/openstack-helm-infra
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+- name: Deploy nfs-provisioner
+ command: ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/component/keystone/keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/component/heat/heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/component/glance/glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/component/compute-kit/openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/component/compute-kit/libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/component/compute-kit/compute-kit.sh
+ regexp: 'tunnel: docker0'
+ line: ' tunnel: br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/component/compute-kit/compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
new file mode 100644
index 00000000..03c02a83
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright 2019, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cacheable: yes
+container_distro_name: "{{ (osh_distro=='opensuse') | ternary('opensuse', 'ubuntu') }}"
+container_distro_version: "{{ (osh_distro=='opensuse') | ternary('15', 'xenial') }}"
diff --git a/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
new file mode 100644
index 00000000..befdcfce
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Setup Clients
+ command: ./tools/deployment/multinode/010-setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/multinode/020-ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Ceph
+ command: ./tools/deployment/multinode/030-ceph.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Activate the openstack namespace to be able to use Ceph
+ command: ./tools/deployment/multinode/040-ceph-ns-activate.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/multinode/050-mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/multinode/060-rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/multinode/070-memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/multinode/080-keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Horizon
+ command: ./tools/deployment/multinode/085-horizon.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Rados Gateway for object store
+ command: ./tools/deployment/multinode/090-ceph-radosgateway.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/multinode/100-glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Cinder
+ command: ./tools/deployment/multinode/110-cinder.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/multinode/120-openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/multinode/130-libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/multinode/140-compute-kit.sh
+ regexp: 'NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"'
+ line: 'NETWORK_TUNNEL_DEV=br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/multinode/140-compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/multinode/150-heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Barbican
+ command: ./tools/deployment/multinode/160-barbican.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
new file mode 100644
index 00000000..ff0aff60
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Install packages in kubernetes nodes
+ package:
+ name: "{{ packages }}"
+ state: present
+ changed_when: false
+ vars:
+ packages:
+ - ceph-common
+ - rbd-nbd
+ - apparmor
+ - nfs-common
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
new file mode 100644
index 00000000..c3988d6f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Helm Server
+After=network.target
+
+[Service]
+User=root
+Restart=always
+ExecStart=/usr/bin/helm serve
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
new file mode 100644
index 00000000..72ae821f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Set kubernetes service account permissions
+ command: "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default"
+ changed_when: false
+
+- name: Set kubernetes node labels
+ command: "kubectl label nodes {{ item }} {{ node_labels[item]|join(' ') }}"
+ changed_when: false
+ with_items: "{{ groups['kube-node'] }}"
+
+- name: Create directories
+ file:
+ path: /root/{{ item }}
+ state: directory
+ with_items:
+ ['repos','tmp', '.helm/repository/local']
+
+- name: Rename bifrost clouds file to get it out of precedence
+ command: "mv .config/openstack/clouds.yaml .config/openstack/clouds.yaml.bifrost"
+ changed_when: false
+
+- name: Clone openstack-helm
+ git:
+ repo: "{{ osh_git_url }}"
+ dest: /root/repos/openstack-helm
+ version: "{{ osh_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Fix dns nameserver for openstack installation (mini flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/gate/files/heat-public-net-deployment.yaml
+ regexp: '10\.96\.0\.10'
+ line: " - 10.233.0.3"
+
+- name: Fix dns nameserver for openstack installation (noha flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tempest/values.yaml
+ regexp: 'dns_servers'
+ line: " dns_servers: 10.233.0.3"
+
+- name: Clone openstack-helm-infra
+ git:
+ repo: "{{ osh_infra_git_url }}"
+ dest: /root/repos/openstack-helm-infra
+ version: "{{ osh_infra_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Get helm
+ get_url:
+ url: "{{ osh_helm_binary_url }}/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz"
+ dest: tmp
+
+- name: Uncompress helm package
+ command: "tar zxvf tmp/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz --strip-components=1 -C tmp/"
+ changed_when: false
+ tags:
+ - skip_ansible_lint
+
+- name: Put helm in system binaries
+ copy:
+ src: tmp/helm
+ dest: /usr/bin/helm
+ remote_src: yes
+ mode: 0755
+
+- name: Create helm-serve service file
+ copy:
+ src: helm-serve.service
+ dest: "/etc/systemd/system/helm-serve.service"
+ mode: 0640
+
+- name: Start helm-serve service
+ service:
+ name: helm-serve
+ state: started
+ enabled: yes
+
+- name: Wait for helm-serve service to start
+ wait_for:
+ port: 8879
+ host: 127.0.0.1
+
+- name: Install pyhelm
+ pip:
+ name: pyhelm
+
+- name: Init helm
+ command: "helm init"
+ changed_when: false
+
+- name: Remove stable (external) service from helm
+ command: "helm repo remove stable"
+ changed_when: false
+
+- name: Add local repositories service to helm
+ command: "helm repo add local http://localhost:8879/charts"
+ changed_when: false
+
+- name: Make charts from infra
+ make:
+ chdir: /root/repos/openstack-helm-infra
+ target: "{{ item }}"
+ with_items:
+ - helm-toolkit
+ - ingress
+ - mariadb
+ - rabbitmq
+ - memcached
+ - ceph-mon
+ - ceph-osd
+ - ceph-client
+ - ceph-provisioners
+ - ceph-rgw
+ - openvswitch
+ - libvirt
+
+- name: Install packages
+ package:
+ name: "{{ required_packages }}"
+ state: present
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
new file mode 100644
index 00000000..979c3329
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
@@ -0,0 +1,31 @@
+---
+required_packages:
+- patch
+- ipcalc
+- jq
+- nmap
+- bc
+
+node_labels:
+ node1:
+ - openstack-control-plane=enabled
+ - openstack-compute-node={{ (xci_flavor == 'mini') | ternary('enabled', 'disable') }}
+ - openstack-helm-node-class=primary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
+ node2:
+ - openstack-control-plane={{ (xci_flavor == 'noha') | ternary('disable', 'enabled') }}
+ - openstack-compute-node=enabled
+ - openstack-helm-node-class=secondary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
new file mode 100644
index 00000000..453a815c
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+- name: Write new resolv.conf file
+ template:
+ src: resolv.conf.j2
+ dest: /etc/resolv.conf
+
+- name: Make resolv.conf immutable
+ shell: "chattr +i /etc/resolv.conf"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+#TODO Fetch the value from a file generated by k8s deployer
+- name: Get kube service addresses
+ shell: "grep -r 'kube_service_addresses:' /root/releng-xci/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml | awk '{print $2}'"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ register: kube_service_addresses
+ tags:
+ - skip_ansible_lint
+
+#This rule allows openstack client in OPNFV VM to reach openstack
+- name: Update routing table with kube service addresses
+ shell: "ip route add {{ kube_service_addresses.stdout }} via 192.168.122.3 dev br-vlan onlink"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
new file mode 100644
index 00000000..ae706e02
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+{{ dns_var }}
+{% for nameserver in external_dns_nameservers %}
+nameserver {{ nameserver }}
+{% endfor %}
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
new file mode 100644
index 00000000..4d6f9cbb
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
@@ -0,0 +1,7 @@
+---
+kube_dns_ip: "10.233.0.3"
+external_dns_nameservers:
+- '{{kube_dns_ip}}'
+- '192.168.122.1'
+dns_var: "search svc.cluster.local cluster.local"
+
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index a43127b5..98abf528 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -28,6 +28,26 @@
- opensuse
- ubuntu
- centos
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - opensuse
+ - ubuntu-bionic
+
+- scenario: os-nosdn-osm
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/os-nosdn-osm/role/os-nosdn-osm
+ installers:
+ - installer: osa
+ flavors:
+ - mini
+ distros:
+ - ubuntu
- scenario: os-odl-nofeature
scm: git
@@ -59,6 +79,20 @@
- opensuse
- ubuntu
+- scenario: os-odl-sfc_osm
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/sfc
+ version: master
+ role: scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm
+ installers:
+ - installer: osa
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+
- scenario: os-odl-bgpvpn
scm: git
src: https://gerrit.opnfv.org/gerrit/sdnvpn
@@ -123,6 +157,14 @@
- ubuntu
- centos
- opensuse
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - opensuse
+ - ubuntu-bionic
- scenario: k8-flannel-nofeature
scm: git
@@ -155,3 +197,19 @@
- ubuntu
- centos
- opensuse
+
+- scenario: k8-nosdn-istio
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-nosdn-istio/role/k8-nosdn-istio
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
index 5b64c785..7aab18f3 100644
--- a/xci/playbooks/configure-localhost.yml
+++ b/xci/playbooks/configure-localhost.yml
@@ -46,21 +46,21 @@
repo: "{{ kubespray_git_url }}"
dest: "{{ xci_cache }}/repos/kubespray"
version: "{{ kubespray_version }}"
- when: installer_type == "kubespray"
+ when: installer_type in ["kubespray", "osh"]
- role: clone-repository
project: "openstack/openstack-ansible-haproxy_server"
repo: "{{ openstack_osa_haproxy_git_url }}"
dest: roles/haproxy_server
version: "{{ haproxy_version }}"
when:
- - installer_type == "kubespray"
+ - installer_type == "kubespray" or installer_type == "osh"
- role: clone-repository
project: "ansible-keepalived"
repo: "{{ keepalived_git_url }}"
dest: roles/keepalived
version: "{{ keepalived_version }}"
when:
- - installer_type == "kubespray"
+ - installer_type == "kubespray" or installer_type == "osh"
tasks:
- name: create log directory {{log_path}}
@@ -99,3 +99,18 @@
args:
executable: /bin/bash
creates: "{{ xci_path }}/.cache/xci.env"
+
+ #TODO: Create an Ansible variable for
+ # kube_service_addresses(10.233.0.0/18)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 10.233.0.0/18 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
+
+ #Provide access to the external network (for tests)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 172.24.4.0/24 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py
index bf9483da..ed63141c 100755
--- a/xci/playbooks/dynamic_inventory.py
+++ b/xci/playbooks/dynamic_inventory.py
@@ -21,6 +21,12 @@ import json
class XCIInventory(object):
+ """
+
+ Generates the ansible inventory based on the idf and pdf files provided
+ when executing the deployment script
+
+ """
def __init__(self):
super(XCIInventory, self).__init__()
self.inventory = {}
@@ -42,12 +48,12 @@ class XCIInventory(object):
self.opnfv_networks = {}
self.opnfv_networks['opnfv'] = {}
- self.opnfv_networks['opnfv']['admin'] = {}
- self.opnfv_networks['opnfv']['admin']['address'] = '172.29.236.10/22'
+ self.opnfv_networks['opnfv']['mgmt'] = {}
+ self.opnfv_networks['opnfv']['mgmt']['address'] = '172.29.236.10/22'
self.opnfv_networks['opnfv']['public'] = {}
self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24'
self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1'
- self.opnfv_networks['opnfv']['public']['dns'] = '192.168.122.1'
+ self.opnfv_networks['opnfv']['public']['dns'] = ['192.168.122.1']
self.opnfv_networks['opnfv']['private'] = {}
self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22'
self.opnfv_networks['opnfv']['storage'] = {}
@@ -74,8 +80,10 @@ class XCIInventory(object):
self.args = parser.parse_args()
def read_pdf_idf(self):
- pdf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/pdf.yml"
- idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/idf.yml"
+ pdf_file = os.environ['PDF']
+ idf_file = os.environ['IDF']
+ opnfv_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_pdf.yml"
+ opnfv_idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_idf.yml"
nodes = []
host_networks = {}
@@ -93,19 +101,34 @@ class XCIInventory(object):
print(e)
sys.exit(1)
- valid_host = (host for host in idf['xci'][self.installer]['nodes_roles'] \
+ with open(opnfv_file) as f:
+ try:
+ opnfv_pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_idf_file) as f:
+ try:
+ opnfv_idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+
+ valid_host = (host for host in idf['xci']['installers'][self.installer]['nodes_roles'] \
if host in idf['xci']['flavors'][self.flavor] \
and host != 'opnfv')
for host in valid_host:
nodes.append(host)
- hostname = idf['xci'][self.installer]['hostnames'][host]
+ hostname = idf['xci']['installers'][self.installer]['hostnames'][host]
self.add_host(hostname)
- for role in idf['xci'][self.installer]['nodes_roles'][host]:
+ for role in idf['xci']['installers'][self.installer]['nodes_roles'][host]:
self.add_to_group(role, hostname)
- pdf_host_info = filter(lambda x: x['name'] == host, pdf['nodes'])[0]
- native_vlan_if = filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces'])
+ pdf_host_info = list(filter(lambda x: x['name'] == host, pdf['nodes']))[0]
+ native_vlan_if = list(filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces']))
self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address'])
self.add_hostvar(hostname, 'ip', native_vlan_if[0]['address'])
host_networks[hostname] = {}
@@ -117,15 +140,41 @@ class XCIInventory(object):
if 'gateway' in ndata.keys():
host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask'])
if 'dns' in ndata.keys():
- host_networks[hostname][network]['dns'] = str(ndata['dns'])
+ host_networks[hostname][network]['dns'] = []
+ for d in ndata['dns']:
+ host_networks[hostname][network]['dns'].append(str(d))
+
+ # Get also vlan and mac_address from pdf
+ host_networks[hostname][network]['mac_address'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['mac_address'])
+ host_networks[hostname][network]['vlan'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['vlan'])
+
+ # Get also vlan and mac_address from opnfv_pdf
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ opnfv_mgmt = opnfv_pdf['opnfv_vm_pdf']['interfaces'][mgmt_idf_index]
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ opnfv_public = opnfv_pdf['opnfv_vm_pdf']['interfaces'][admin_idf_index]
+ self.opnfv_networks['opnfv']['mgmt']['mac_address'] = str(opnfv_mgmt['mac_address'])
+ self.opnfv_networks['opnfv']['mgmt']['vlan'] = str(opnfv_mgmt['vlan'])
+ self.opnfv_networks['opnfv']['public']['mac_address'] = str(opnfv_public['mac_address'])
+ self.opnfv_networks['opnfv']['public']['vlan'] = str(opnfv_public['vlan'])
+
+ # Add the interfaces from idf
+
host_networks.update(self.opnfv_networks)
self.add_groupvar('all', 'host_info', host_networks)
+ if 'deployment_host_interfaces' in idf['xci']['installers'][self.installer]['network']:
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ self.add_hostvar('deployment_host', 'network_interface_admin', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][admin_idf_index])
+ self.add_hostvar('deployment_host', 'network_interface_mgmt', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][mgmt_idf_index])
+
# Now add the additional groups
- for parent in idf['xci'][self.installer]['groups'].keys():
- map(lambda x: self.add_group(x, parent), idf['xci'][self.installer]['groups'][parent])
+ for parent in idf['xci']['installers'][self.installer]['groups'].keys():
+ for host in idf['xci']['installers'][self.installer]['groups'][parent]:
+ self.add_group(host, parent)
# Read additional group variables
self.read_additional_group_vars()
diff --git a/xci/playbooks/manage-ssl-certs.yml b/xci/playbooks/manage-ssl-certs.yml
deleted file mode 100644
index d0c5c518..00000000
--- a/xci/playbooks/manage-ssl-certs.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2018 SUSE Linux GmbH and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: Install required pip packages for SSL
- pip:
- name: pyOpenSSL
- state: present
- extra_args: "{{ extra_args | default(omit) }}"
-
-- name: Generate XCI private key
- openssl_privatekey:
- path: /etc/ssl/private/xci.key
- size: 2048
-
-- name: Generate XCI certificate request
- openssl_csr:
- privatekey_path: /etc/ssl/private/xci.key
- path: /etc/ssl/private/xci.csr
- common_name: "{{ xci_ssl_subject }}"
-
-- name: Generate XCI self signed certificate
- openssl_certificate:
- path: /etc/ssl/certs/xci.crt
- privatekey_path: /etc/ssl/private/xci.key
- csr_path: /etc/ssl/private/xci.csr
- provider: selfsigned
- selfsigned_not_after: 20800101000000Z
diff --git a/xci/playbooks/prepare-functest.yml b/xci/playbooks/prepare-tests.yml
index a4cb664b..1a1935aa 100644
--- a/xci/playbooks/prepare-functest.yml
+++ b/xci/playbooks/prepare-tests.yml
@@ -13,11 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-- name: Prepare the environment for functest
+- name: Prepare the environment for testing
hosts: opnfv
user: root
vars_files:
- ../var/opnfv.yml
- ../installer/osa/files/openstack_services.yml
roles:
- - role: "prepare-functest"
+ - role: "prepare-tests"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
index 3cac1e22..176c7eb1 100644
--- a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
@@ -45,14 +45,54 @@
- { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
- { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
- { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
- { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
- { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
- { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
loop_control:
label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
- name: restart network service
- shell: "/sbin/ifconfig {{ ansible_local.xci.network.xci_interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+ shell: "/sbin/ip addr flush dev {{ item }}; /sbin/ifdown -a; /sbin/ifup -a"
async: 15
poll: 0
+ with_items:
+ - "{{ public_interface }}"
+ - "{{ mgmt_interface }}"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
index c360f15d..288fdf65 100644
--- a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
@@ -18,7 +18,7 @@
- { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
- { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
- { name: "br-vlan" , network: "{{ host_info[inventory_hostname].public }}" }
- - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].mgmt }}" }
- { name: "br-storage", network: "{{ host_info[inventory_hostname].storage }}" }
- { name: "br-vxlan" , network: "{{ host_info[inventory_hostname].private }}" }
loop_control:
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
index e2b5aa4a..a8f1bf59 100644
--- a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
@@ -17,12 +17,52 @@
- { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
- { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
- { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
- { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
- { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
- { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
loop_control:
label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ mgmt_interface }}.30", vlan_id: 30 }
+ - { name: "{{ mgmt_interface }}.20", vlan_id: 20 }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ mgmt_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-storage", bridge_ports: "{{ mgmt_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
- name: Add postup/postdown scripts on SUSE
copy:
@@ -33,7 +73,7 @@
- name: Configure static DNS on SUSE
lineinfile:
regexp: '^NETCONFIG_DNS_STATIC_SERVERS=.*'
- line: "NETCONFIG_DNS_STATIC_SERVERS={{ host_info[inventory_hostname]['public']['dns'] }}"
+ line: "NETCONFIG_DNS_STATIC_SERVERS=\"{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}\""
path: "/etc/sysconfig/network/config"
state: present
when: host_info[inventory_hostname]['public']['dns'] is defined
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
index f9e4d8df..2f976002 100644
--- a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
@@ -33,7 +33,7 @@ iface {{ item.name }} inet static
gateway {{ item.network.gateway | ipaddr('address') }}
{% endif %}
{% if item.network is defined and item.network.dns is defined %}
- dns-nameservers {{ item.network.dns }}
+ dns-nameservers {{ item.network.dns | join(' ') }}
{% endif %}
{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
index 3a51eb86..525686d9 100644
--- a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
@@ -21,6 +21,6 @@ IPADDR={{ item.network.address }}
GATEWAY="{{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}"
{% endif %}
{% if item.network is defined and item.network.dns is defined %}
-DNS="{{ host_info[inventory_hostname]['public']['dns'] }}"
+DNS="{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}"
{% endif %}
{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
index 70811a09..7c2929d6 100644
--- a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
@@ -1,8 +1,7 @@
STARTMODE='auto'
BOOTPROTO='static'
{% if item.vlan_id is defined %}
-ETHERDEVICE={{ ansible_default_ipv4.interface }}
-VLAN_ID={{ item.vlan_id }}
+ETHERDEVICE={{ item.name.split('.')[0] }}
{% endif %}
{% if item.bridge_ports is defined %}
BRIDGE='yes'
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osh b/xci/playbooks/roles/bootstrap-host/templates/osh
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osh
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/vars/main.yml b/xci/playbooks/roles/bootstrap-host/vars/main.yml
new file mode 100644
index 00000000..1730ad57
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/vars/main.yml
@@ -0,0 +1,70 @@
+---
+# admin network information
+admin_mac: "{{ host_info[inventory_hostname].admin.mac_address }}"
+admin_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == admin_mac -%}
+ {%- if admin_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ admin_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+admin_vlan: "{{ host_info[inventory_hostname].admin.vlan }}"
+
+# mgmt network information
+mgmt_mac: "{{ host_info[inventory_hostname].mgmt.mac_address }}"
+mgmt_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == mgmt_mac -%}
+ {%- if mgmt_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ mgmt_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+mgmt_vlan: "{{ host_info[inventory_hostname].mgmt.vlan }}"
+
+# storage network information
+storage_mac: "{{ host_info[inventory_hostname].storage.mac_address }}"
+storage_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == storage_mac -%}
+ {%- if storage_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ storage_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+storage_vlan: "{{ host_info[inventory_hostname].storage.vlan }}"
+
+# public vlan netwrk information
+public_mac: "{{ host_info[inventory_hostname].public.mac_address }}"
+public_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == public_mac -%}
+ {%- if public_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ public_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+public_vlan: "{{ host_info[inventory_hostname].public.vlan }}"
+
+# private vxlan network information
+private_mac: "{{ host_info[inventory_hostname].private.mac_address }}"
+private_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == private_mac -%}
+ {%- if private_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{x.device}}.{{ private_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+private_vlan: "{{ host_info[inventory_hostname].private.vlan }}"
diff --git a/xci/playbooks/roles/create-vm-nodes/README.md b/xci/playbooks/roles/create-nodes/README.md
index d96a2981..bf190296 100644
--- a/xci/playbooks/roles/create-vm-nodes/README.md
+++ b/xci/playbooks/roles/create-nodes/README.md
@@ -1,14 +1,16 @@
-create-vm-nodes
+create-nodes
================
-This role creates the XCI VMs used to deploy scenarios. It is a branch from the
-bifrost role "bifrost-create-vm-nodes":
+This role creates the all nodes required for the XCI deployment. In a baremetal
+deployment, it creates the OPNFV VM and provisions the physical servers. In a
+non-baremetal deployment, it creates the OPNFV VM and the rest of VMs used to
+deploy scenarios. It is based on the bifrost role:
https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
-It creates the VMs based on the pdf and idf document which describes the
-characteristics of the VMs or physical servers. For more information check the
-spec:
+It creates the VMs or provisions the physical servers based on the pdf and idf
+document which describes the characteristics of the VMs or physical servers.
+For more information check the spec:
https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
@@ -19,9 +21,9 @@ Flow
The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
playbook that starts executing the role:
-xci-create-vms.yaml
+xci-setup-nodes.yaml
-Note that at this stage the pdf and the opnfv_vm.yml are loaded.
+Note that at this stage the pdf and the opnfv_pdf_vm.yml are loaded.
Some distro specific tasks related to variables are done and then the
prepare_libvirt playbook is run. This playbook, as the name says,
@@ -32,7 +34,9 @@ the data and finally dump it all into the baremetal_json_file which will be
read by bifrost in the subsequent role.
The opnfv vm and the rest of vms get created using the xml libvirt template,
-which gets filled with the pdf and opnfv_vm.yml variables.
+which gets filled with the pdf and opnfv_pdf_vm.yml variables. If there is a
+baremetal deployment, the nodes_json_data gets filled in the
+baremetalhoststojson.yml playbook which basically reads the pdf info.
Finally nodes_json_data is dumped.
@@ -49,18 +53,9 @@ The following packages are required and ensured to be present:
Warning
-------
-- It is currently assumed that the OS for the VM will be installed in the first
-disk of the node described by the pdf. That's why there is a [0] in:
-
- - name: create volume for vm
- command: >
- virsh --connect {{ vm_libvirt_uri }}
- vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
- {{ item.disks[0].disk_capacity }}
- --format qcow2 {{ prealloc|default("") }}
-
- It is assumed that the opnfv VM characteristics are not described in the pdf
-but in a similar document called opnfv_vm.yml
+but in a similar document called opnfv_pdf_vm.yml. There is also an idf
+document opnfv_idf_vm.yml
- All references to csv from bifrost-create-vm-nodes were removed
@@ -76,7 +71,7 @@ vm_disk_cache: Disk cache mode to use by VMs disk.
if that is not set, to 'writeback'.
node_names: Space-separated names for nodes to be created.
- Defaults to shell variable 'NODE_NAMES'.
+ It is taken from the hostnames variable in idf.
If not set, VM names will be autogenerated.
Note that independent on the number of names in this list,
at most 'test_vm_num_nodes' VMs will be created.
diff --git a/xci/playbooks/roles/create-vm-nodes/defaults/main.yml b/xci/playbooks/roles/create-nodes/defaults/main.yml
index 6ac266a5..889f9c10 100644
--- a/xci/playbooks/roles/create-vm-nodes/defaults/main.yml
+++ b/xci/playbooks/roles/create-nodes/defaults/main.yml
@@ -4,16 +4,16 @@ baremetal_json_file: '/tmp/baremetal.json'
# We collect these parameters from the pdf
vm_nic: "virtio"
-vm_groups: {}
-vm_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
vm_disk_cache: unsafe
-node_names: "{{ lookup('env', 'NODE_NAMES').split() }}"
+node_groups: {}
+node_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+
+network_bridge_admin: 'br-admin'
+network_bridge_mgmt: 'br-mgmt'
+
+vm_network_admin: "{{ lookup('env', 'VM_NET_BRIDGE') | default('admin', true) }}"
+vm_network_mgmt: "{{ lookup('env', 'VM_NET_BRIDGE_MGMT') | default('mgmt', true) }}"
-# NOTE(pas-ha) name and default are chosen to be the same
-# as in 'bifrost-ironic-install' role
-network_interface: "virbr0"
-# NOTE(pas-ha) these correspond to settings for the libvirt network created by default
-vm_network: "{{ lookup('env', 'VM_NET_BRIDGE') | default('default', true) }}"
node_network_netmask: "255.255.255.0"
node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
@@ -25,3 +25,7 @@ vm_emulator: "/usr/bin/qemu-system-x86_64"
vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
opnfv_image_path: "/var/lib/libvirt/images"
+
+vms_to_create: "{{ (baremetal | bool) | ternary([opnfv_vm_pdf], [opnfv_vm_pdf] + nodes) }}"
+baremetal_nodes: "{{ (baremetal | bool) | ternary(nodes, omit) }}"
+libvirt_networks: "{{ (baremetal | bool) | ternary([vm_network_admin,vm_network_mgmt],[vm_network_admin]) }}"
diff --git a/xci/playbooks/roles/create-nodes/files/virtualbmc.conf b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
new file mode 100644
index 00000000..f8351dc1
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
@@ -0,0 +1,3 @@
+[log]
+logfile: /var/log/vbmc.log
+debug: true
diff --git a/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
new file mode 100644
index 00000000..ef6ec345
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
@@ -0,0 +1,91 @@
+---
+# Copyright 2018, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This playbook builds the json file with information about the baremetal nodes
+# which is read by ironic to start the pxe booting
+
+
+- name: BAREMETAL - Create file for static ip
+ file:
+ path: /tmp/baremetalstaticips
+ state: touch
+ group: root
+ owner: root
+ mode: 0644
+
+- name: "Generating the json describing baremetal nodes"
+ block:
+
+ - set_fact:
+ node_name: "{{ idf.kubespray.hostnames[item.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ node_name: "{{ idf.osa.hostnames[item.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ node_name: "{{ idf.osh.hostnames[item.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ host_group: "{{ node_default_groups | union(node_groups[node_name]) }}"
+ when: node_groups[node_name] is defined
+
+ - name: BAREMETAL - Fetch the ip
+ set_fact:
+ admin_ip: "{{ item.interfaces[idf.net_config.admin.interface].address }}"
+
+ - name: BAREMETAL - Fetch the mac
+ set_fact:
+ admin_mac: "{{ item.interfaces[idf.net_config.admin.interface].mac_address }}"
+
+ - name: BAREMETAL - set the json entry for baremetal nodes
+ set_fact:
+ node_data:
+ name: "{{ node_name }}"
+ uuid: "{{ node_name | to_uuid }}"
+ host_groups: "{{ host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "{{ item.remote_management.address }}"
+ ipmi_port: "{{ virtual_ipmi_port| default('623') }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ admin_mac }}"
+ ansible_ssh_host: "{{ admin_ip }}"
+ ipv4_address: "{{ admin_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: BAREMETAL - Static ip config for dnsmasq
+ lineinfile:
+ path: /tmp/baremetalstaticips
+ state: present
+ line: '{{ admin_mac }},{{ admin_ip }}'
+
+ - name: BAREMETAL - add created node info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({node_name: node_data}) }}"
+
+ when: (num_nodes | int) > (nodes_json_data | length | int) + 1
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
index d8169c2f..ac55bf32 100644
--- a/xci/playbooks/roles/create-vm-nodes/tasks/create_vm.yml
+++ b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
@@ -1,21 +1,25 @@
---
-# Create a VM and volume for it, save its MAC address
-- shell: "sudo virsh list --all | grep 'shut off' | wc -l"
- register: num_vms
-
- name: "Creating VM"
block:
- # NOTE(pas-ha) item here refers to name of the vm
- set_fact:
- vm_name: "{{ node_names[num_vms.stdout | int] }}"
+ vm_name: "{{ idf.kubespray.hostnames[item.1.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ vm_name: "{{ idf.osa.hostnames[item.1.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ vm_name: "{{ idf.osh.hostnames[item.1.name] }}"
+ when: installer_type == "osh"
- set_fact:
vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
- vm_host_group: "{{ vm_default_groups }}"
+ vm_host_group: "{{ node_default_groups }}"
- set_fact:
- vm_host_group: "{{ vm_default_groups | union(vm_groups[vm_name]) }}"
- when: vm_groups[vm_name] is defined
+ vm_host_group: "{{ node_default_groups | union(node_groups[vm_name]) }}"
+ when: node_groups[vm_name] is defined
- name: set prealloc arg for Debian
set_fact:
@@ -40,13 +44,19 @@
path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
register: _vm_volume_prepared
+ - name: Resize opnfv VM image to {{ item.1.disks[0].disk_capacity }}
+ command: "qemu-img resize {{ opnfv_image_path }}/opnfv.qcow2 {{ item.1.disks[0].disk_capacity }}"
+ when:
+ - vm_name == 'opnfv'
+ - _vm_volume_prepared.stat.exists
+
# NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
# mbuil: Assuming there is only one disk [0]
- name: create volume for vm
command: >
virsh --connect {{ vm_libvirt_uri }}
vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
- {{ item.disks[0].disk_capacity }}
+ {{ item.1.disks[0].disk_capacity }}
--format qcow2 {{ prealloc|default("") }}
when:
- not _vm_volume_prepared.stat.exists
@@ -97,21 +107,39 @@
# with a custom Ansible module using vbmc Python API
- name: get list of nodes from virtualbmc
command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
register: vbmc_list
+ - debug: var=vbmc_list
+
# NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
- name: delete vm from virtualbmc if it is there
command: vbmc delete {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
when: vbmc_list.stdout.find(vm_name) != -1
- set_fact:
- virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (num_vms.stdout | int ) }}"
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (item.0 | int) }}"
- name: plug vm into vbmc
command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
- name: start virtualbmc
command: vbmc start {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list2
+
+ - debug: var=vbmc_list2
- name: get XML of the vm
virt:
@@ -119,9 +147,13 @@
command: get_xml
register: vm_xml
+ - name: Fetch the index for admin network
+ set_fact:
+ admin_index: "{{ (vm_name == 'opnfv') | ternary(opnfv_vm_idf.net_config.admin.interface, idf.net_config.admin.interface) | int }}"
+
- name: Fetch the ip
set_fact:
- vm_ip: "{%- for interface in item.interfaces %}{%- if 'native' in (interface.vlan | string) %}{{ interface.address }}{%- endif %}{%- endfor %}"
+ vm_ip: "{{ item.1.interfaces[admin_index | int].address }}"
# Assumes there is only a single NIC per VM
- name: get MAC from vm XML
@@ -136,22 +168,22 @@
name: "{{ vm_name }}"
uuid: "{{ vm_name | to_uuid }}"
host_groups: "{{ vm_host_group }}"
- driver: "{{ vm_node_driver|default('ipmi') }}"
+ driver: "ipmi"
driver_info:
power:
ipmi_address: "192.168.122.1"
ipmi_port: "{{ virtual_ipmi_port }}"
- ipmi_username: "{{ item.remote_management.user }}"
- ipmi_password: "{{ item.remote_management.pass }}"
+ ipmi_username: "{{ item.1.remote_management.user }}"
+ ipmi_password: "{{ item.1.remote_management.pass }}"
nics:
- mac: "{{ vm_mac }}"
ansible_ssh_host: "{{ vm_ip }}"
ipv4_address: "{{ vm_ip }}"
properties:
- cpu_arch: "{{ item.node.arch }}"
- ram: "{{ item.node.memory.rstrip('G') }}"
- cpus: "{{ item.node.cpus }}"
- disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+ cpu_arch: "{{ item.1.node.arch }}"
+ ram: "{{ item.1.node.memory.rstrip('G') }}"
+ cpus: "{{ item.1.node.cpus }}"
+ disk_size: "{{ item.1.disks[0].disk_capacity.rstrip('G') }}"
- name: add created vm info
set_fact:
@@ -163,4 +195,4 @@
opnfv_vm_ip: "{{ vm_ip }}"
when: vm_name == 'opnfv'
- when: (num_nodes | int) > (num_vms.stdout | int)
+ when: (num_nodes | int) > (item.0 | int)
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
index a227bc4f..a227bc4f 100644
--- a/xci/playbooks/roles/create-vm-nodes/tasks/download_opnfvimage.yml
+++ b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/main.yml b/xci/playbooks/roles/create-nodes/tasks/main.yml
index 7e0090e4..607ac494 100644
--- a/xci/playbooks/roles/create-vm-nodes/tasks/main.yml
+++ b/xci/playbooks/roles/create-nodes/tasks/main.yml
@@ -8,8 +8,12 @@
- name: "Install required packages"
package:
name: "{{ required_packages }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
- include_tasks: prepare_libvirt.yml
+ with_items: "{{ libvirt_networks }}"
+
- include_tasks: download_opnfvimage.yml
- name: create placeholder var for vm entries in JSON format
@@ -18,10 +22,13 @@
# First we create the opnfv_vm
- include_tasks: create_vm.yml
- with_items: "{{ [opnfv_vm] + nodes }}"
+ with_indexed_items: "{{ vms_to_create }}"
+
+- include_tasks: baremetalhoststojson.yml
+ with_items: "{{ baremetal_nodes }}"
- name: Start the opnfv vm
- virt:
+ virt:
command: start
name: opnfv
@@ -36,8 +43,6 @@
dest: "{{ baremetal_json_file }}"
content: "{{ nodes_json_data | to_nice_json }}"
-- debug: var=nodes_json_data
-
- name: >
"Set file permissions such that the baremetal data file
can be read by the user executing Ansible"
diff --git a/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
index e09e2d6b..06afaec3 100644
--- a/xci/playbooks/roles/create-vm-nodes/tasks/prepare_libvirt.yml
+++ b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
@@ -46,15 +46,24 @@
# with the default 192.168.122/0/24 network
- name: destroy libvirt network
virt_net:
- name: "{{ vm_network }}"
+ name: "{{ item }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+# Ubuntu creates a default network when installing libvirt.
+# This network uses the 192.168.122.0/24 range and thus
+# conflicts with our admin network
+- name: destroy libvirt network
+ virt_net:
+ name: "default"
state: absent
uri: "{{ vm_libvirt_uri }}"
- name: ensure libvirt network is present
virt_net:
- name: "{{ vm_network }}"
+ name: "{{ item }}"
state: present
- xml: "{{ lookup('template', 'net.xml.j2') }}"
+ xml: "{{ lookup('template', 'net-'+item+'.xml.j2') }}"
uri: "{{ vm_libvirt_uri }}"
- name: find facts on libvirt networks
@@ -62,29 +71,28 @@
command: facts
uri: "{{ vm_libvirt_uri }}"
-# NOTE(pas-ha) yet another place where non-local libvirt will not work
- name: "Delete network interface if virtual network is not active"
- command: ip link del {{ ansible_libvirt_networks[vm_network].bridge }}
+ command: ip link del {{ ansible_libvirt_networks[item].bridge }}
when:
- - ansible_libvirt_networks[vm_network].state != 'active'
+ - ansible_libvirt_networks[item].state != 'active'
- vm_libvirt_uri == 'qemu:///system'
ignore_errors: yes
- name: set libvirt network to autostart
virt_net:
- name: "{{ vm_network }}"
+ name: "{{ item }}"
autostart: yes
uri: "{{ vm_libvirt_uri }}"
- name: ensure libvirt network is running
virt_net:
- name: "{{ vm_network }}"
+ name: "{{ item }}"
state: active
uri: "{{ vm_libvirt_uri }}"
- name: get libvirt network status
virt_net:
- name: "{{ vm_network }}"
+ name: "{{ item }}"
command: status
uri: "{{ vm_libvirt_uri }}"
register: test_vm_net_status
@@ -117,3 +125,15 @@
- name: install virtualbmc
pip:
name: virtualbmc
+ version: 1.5 # >1.3 needs zmq dependency.
+ virtualenv: "{{ lookup('env', 'XCI_VENV') }}"
+
+- name: Create directory for the config of vbmc
+ file:
+ path: /etc/virtualbmc
+ state: directory
+
+- name: Place the config for virtualbmc
+ copy:
+ src: virtualbmc.conf
+ dest: /etc/virtualbmc/virtualbmc.conf
diff --git a/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
new file mode 100644
index 00000000..aedbbeb7
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
new file mode 100644
index 00000000..4a9964c3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
@@ -0,0 +1,11 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='route'>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..7e372ffe
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
index e4645deb..e4645deb 100644
--- a/xci/playbooks/roles/create-vm-nodes/templates/pool_dir.xml.j2
+++ b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
index c44fa6aa..9fad42b8 100644
--- a/xci/playbooks/roles/create-vm-nodes/templates/vm.xml.j2
+++ b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
@@ -1,9 +1,9 @@
<domain type='{{ vm_domain_type }}'>
<name>{{ vm_name }}</name>
- <memory unit='GiB'>{{ item.node.memory.rstrip('G') }}</memory>
- <vcpu>{{ item.node.cpus }}</vcpu>
+ <memory unit='GiB'>{{ item.1.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.1.node.cpus }}</vcpu>
<os>
- <type arch='{{ item.node.arch }}' machine='{{ item.node.model }}'>hvm</type>
+ <type arch='{{ item.1.node.arch }}' machine='{{ item.1.node.model }}'>hvm</type>
{%- if 'opnfv' in vm_name -%}
<boot dev='hd'/>
{%- else -%}
@@ -17,7 +17,7 @@
<apic/>
<pae/>
</features>
- <cpu mode='{{ item.node.cpu_cflags }}'>
+ <cpu mode='{{ item.1.node.cpu_cflags }}'>
<model fallback='allow'/>
</cpu>
<clock offset='utc'/>
@@ -35,19 +35,22 @@
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
- {% set native_interfaces = [] %}
- {%- for interface in item.interfaces %}
- {%- if 'native' in (interface.vlan | string) %}
- {%- set _ = native_interfaces.append(interface) %}
- {%- endif %}
- {%- endfor %}
- {%- for interface in native_interfaces -%}
<interface type='network'>
- <source network='{{ vm_network }}'/>
+ <source network='{{ vm_network_admin }}'/>
<model type='{{ vm_nic }}'/>
- <mac address='{{ interface.mac_address }}'/>
+ {%- if vm_name == 'opnfv' -%}
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}'/>
+ {%- else -%}
+ <mac address='{{ item.1.interfaces[idf.net_config.admin.interface].mac_address }}'/>
+ {%- endif -%}
</interface>
- {% endfor -%}
+ {%- if baremetal | bool -%}
+ <interface type='network'>
+ <source network='{{ vm_network_mgmt }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}'/>
+ </interface>
+ {%- endif -%}
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<video>
@@ -56,19 +59,9 @@
</video>
<serial type='file'>
<source path='{{ vm_log_file }}'/>
- <target port='0'/>
- <alias name='serial0'/>
- </serial>
- <serial type='pty'>
- <source path='/dev/pts/49'/>
<target port='1'/>
<alias name='serial1'/>
</serial>
- <console type='file'>
- <source path='{{ vm_log_file }}'/>
- <target type='serial' port='0'/>
- <alias name='serial0'/>
- </console>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</memballoon>
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/debian.yml b/xci/playbooks/roles/create-nodes/vars/debian.yml
index bcfc47d5..bcfc47d5 100644
--- a/xci/playbooks/roles/create-vm-nodes/vars/debian.yml
+++ b/xci/playbooks/roles/create-nodes/vars/debian.yml
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml b/xci/playbooks/roles/create-nodes/vars/redhat.yml
index 2b285110..2b285110 100644
--- a/xci/playbooks/roles/create-vm-nodes/vars/redhat.yml
+++ b/xci/playbooks/roles/create-nodes/vars/redhat.yml
diff --git a/xci/playbooks/roles/create-vm-nodes/vars/suse.yml b/xci/playbooks/roles/create-nodes/vars/suse.yml
index 7e4c41ef..7e4c41ef 100644
--- a/xci/playbooks/roles/create-vm-nodes/vars/suse.yml
+++ b/xci/playbooks/roles/create-nodes/vars/suse.yml
diff --git a/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
deleted file mode 100644
index 3c082170..00000000
--- a/xci/playbooks/roles/create-vm-nodes/templates/net.xml.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-<network>
- <name>{{ vm_network }}</name>
- <forward mode='nat'>
- <nat>
- <port start='1024' end='65535'/>
- </nat>
- </forward>
- <bridge name='{{ network_interface }}' stp='on' delay='0'/>
- <ip address='{{ nodes[0].remote_management.address.split(':')[0] }}' netmask='{{ node_network_netmask }}'>
- <dhcp>
- {%- for interface in opnfv_vm.interfaces %}
- {%- if 'native' in (interface.vlan | string) %}
- <host mac="{{ interface.mac_address }}" ip="{{ interface.address }}"/>
- {%- endif %}
- {%- endfor %}
- </dhcp>
- </ip>
-</network>
diff --git a/xci/playbooks/roles/prepare-functest/defaults/main.yml b/xci/playbooks/roles/prepare-functest/defaults/main.yml
deleted file mode 100644
index a3638302..00000000
--- a/xci/playbooks/roles/prepare-functest/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Gateway parameters
-gateway_ip: "10.10.10.1"
-gateway_ip_mask: "10.10.10.1/24"
-broadcast_ip: "10.10.10.255"
-gateway_interface: "br-vlan"
-
-# Network parameters
-external_network: "ext-net"
-
-# Subnet parameters
-subnet_name: "ext-subnet"
-allocation_pool: "start=10.10.10.5,end=10.10.10.254"
-subnet_cidr: "10.10.10.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
deleted file mode 100644
index 52bca30b..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-# Variables that we need to pass from XCI to functest
-XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME OPNFV_FUNCTEST_HEALTHCHECK_DOCKER_IMAGE_DIGEST)
-
-# Create directory to store functest logs
-mkdir -p ~/results/
-
-# Extract variables from xci.env file
-if [[ -e /root/xci.env ]]; then
- for x in ${XCI_ENV[@]}; do
- grep "^${x}=" /root/xci.env >> /root/env
- done
- # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
- # set the functest container's DEPLOY_SCENARIO variable in the
- # following format <scenario>-<flavor>. But the XCI's mini flavor
- # is converted into noha.
- DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
- XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
- XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
- echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
-fi
-
-# Dump the env file
-echo "------------------------------------------------------"
-echo "------------- functest environment file --------------"
-cat /root/env
-echo "------------------------------------------------------"
-
-# we need to ensure the necessary environment variables are sourced
-source /root/env
-
-{% if 'os-' in deploy_scenario %}
-{# stuff needed for OpenStack based scenarios #}
-source /root/openrc
-
-openstack --insecure network create --external \
- --provider-physical-network flat \
- --provider-network-type flat {{ external_network }}
-
-openstack --insecure subnet create --network {{ external_network }} \
- --allocation-pool {{ allocation_pool }} \
- --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
- --no-dhcp {{ subnet_name }}
-
-# the needed images differ between the suites so avoid downloading unnecessary images
-if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
- mkdir ~/images && cd ~/images && wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img && cd ~
-elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
- mkdir -p images && wget -q -O- https://git.opnfv.org/functest/plain/functest/ci/download_images.sh | bash -s -- images && ls -1 images/*
-else
- echo "Unsupported test suite for functest"
- exit 1
-fi
-
-# docker image to use will be different for healthcheck and smoke test
-DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}"
-
-sudo docker run --env-file env \
- -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
- -v $(pwd)/images:/home/opnfv/functest/images \
- -v $(pwd)/results:/home/opnfv/functest/results \
- ${DOCKER_IMAGE_NAME}@${OPNFV_FUNCTEST_HEALTHCHECK_DOCKER_IMAGE_DIGEST}
-{% else %}
-{# stuff needed for Kubernetes based scenarios #}
-# Create k8s.creds file for functest
-KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
-KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
-cat << EOF > ~/k8s.creds
-KUBERNETES_PROVIDER=local
-KUBE_MASTER_URL=$KUBE_MASTER_URL
-KUBE_MASTER_IP=$KUBE_MASTER_IP
-EOF
-
-# docker image to use will be different for healthcheck and smoke test
-DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
-
-sudo docker run --env-file env \
- -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
- -v $(pwd)/.kube/config:/root/.kube/config \
- -v $(pwd)/results:/home/opnfv/functest/results \
- $DOCKER_IMAGE_NAME
-{% endif %}
-
diff --git a/xci/playbooks/roles/prepare-tests/defaults/main.yml b/xci/playbooks/roles/prepare-tests/defaults/main.yml
new file mode 100644
index 00000000..7002586c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# Gateway parameters
+gateway_ip: "192.168.122.1"
+gateway_ip_mask: "192.168.122.1/24"
+broadcast_ip: "192.168.122.255"
+gateway_interface: "br-vlan"
+
+# Network parameters
+external_network: "ext-net"
+
+# Subnet parameters
+subnet_name: "ext-subnet"
+allocation_pool: "start=192.168.122.100,end=192.168.122.254"
+subnet_cidr: "192.168.122.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/tasks/main.yml b/xci/playbooks/roles/prepare-tests/tasks/main.yml
index c29baca9..a543ac1f 100644
--- a/xci/playbooks/roles/prepare-functest/tasks/main.yml
+++ b/xci/playbooks/roles/prepare-tests/tasks/main.yml
@@ -8,46 +8,49 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- name: install functest required packages
+- name: install required packages
package:
- name: "{{ functest_required_packages[ansible_pkg_mgr] }}"
+ name: "{{ required_packages[ansible_pkg_mgr] }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
state: present
-# Docker is needed for functest
+# Docker is needed for test frameworks
- name: Ensure Docker service is started and enabled
service:
name: docker
state: started
enabled: yes
-- name: install functest required pip packages
+- name: install required pip packages
pip:
- name: "{{ functest_required_pip }}"
+ name: "{{ required_pip }}"
state: present
extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
-- name: create public network gateway for functest
- block:
- - name: check if the gateway was already set
- shell: "ip a | grep {{ gateway_ip }}"
- register: gateway_ip_result
- ignore_errors: True
- changed_when: False
-
- - name: add public network gateway
- command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
- changed_when: False
- when: gateway_ip_result|failed
- when: deploy_scenario is match("os-.*")
-
-- name: prepare environment file for functest
+# odl scenarios require to add odl variables to env
+- include_tasks: process_neutron_conf.yml
+ when: "'-odl-' in deploy_scenario"
+
+- name: prepare environment file for tests
template:
src: env.j2
dest: /root/env
mode: 0755
-- name: prepare the script to create networks and run functest
+- name: create the script to prepare for testing
+ template:
+ src: prepare-tests.sh.j2
+ dest: /root/prepare-tests.sh
+ mode: 0755
+
+- name: create the script to run functest
template:
src: run-functest.sh.j2
dest: /root/run-functest.sh
mode: 0755
+
+- name: create the script to run yardstick
+ template:
+ src: run-yardstick.sh.j2
+ dest: /root/run-yardstick.sh
+ mode: 0755
diff --git a/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
new file mode 100644
index 00000000..45608df3
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
@@ -0,0 +1,19 @@
+---
+- name: Collecting ODL variables
+ block:
+ - name: Fetch odl_password variable
+ shell: "cat /tmp/ml2_conf.ini | grep password | cut -d ' ' -f3"
+ register: odl_password
+
+ - name: Fetch odl_username variable
+ shell: "cat /tmp/ml2_conf.ini | grep username | cut -d ' ' -f3"
+ register: odl_username
+
+ - name: Fetch odl_port variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f3 | cut -d '/' -f1"
+ register: odl_port
+
+ - name: Fetch odl_ip variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f2 | cut -d '/' -f3"
+ register: odl_ip
+ when: "'-odl-' in deploy_scenario"
diff --git a/xci/playbooks/roles/prepare-functest/templates/env.j2 b/xci/playbooks/roles/prepare-tests/templates/env.j2
index d9a3bf32..d4f8f86c 100644
--- a/xci/playbooks/roles/prepare-functest/templates/env.j2
+++ b/xci/playbooks/roles/prepare-tests/templates/env.j2
@@ -5,3 +5,11 @@ ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
{% if 'os-' in deploy_scenario %}
EXTERNAL_NETWORK={{ external_network }}
{% endif %}
+{% if '-odl-' in deploy_scenario %}
+SDN_CONTROLLER_IP={{ odl_ip.stdout }}
+SDN_CONTROLLER_USER={{ odl_username.stdout }}
+SDN_CONTROLLER_PASSWORD={{ odl_password.stdout }}
+SDN_CONTROLLER_RESTCONFPORT={{ odl_port.stdout }}
+SDN_CONTROLLER_WEBPORT={{ odl_port.stdout }}
+{% endif %}
+
diff --git a/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
new file mode 100644
index 00000000..1b779cb9
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Variables that we need to pass from XCI to testing
+XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME FUNCTEST_VERSION)
+
+# Extract variables from xci.env file
+if [[ -e /root/xci.env ]]; then
+ for x in ${XCI_ENV[@]}; do
+ grep "^${x}=" /root/xci.env >> /root/env
+ done
+ # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
+ # set the functest container's DEPLOY_SCENARIO variable in the
+ # following format <scenario>-<flavor>. But the XCI's mini flavor
+ # is converted into noha.
+ DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
+ XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
+ XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
+ echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
+fi
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+source /root/openrc
+
+openstack --insecure network create --external \
+ --provider-physical-network flat \
+ --provider-network-type flat {{ external_network }}
+
+openstack --insecure subnet create --network {{ external_network }} \
+ --allocation-pool {{ allocation_pool }} \
+ --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
+ --no-dhcp {{ subnet_name }}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# Create k8s.creds file for testing
+KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
+KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
+cat << EOF > ~/k8s.creds
+KUBERNETES_PROVIDER=local
+KUBE_MASTER_URL=$KUBE_MASTER_URL
+KUBE_MASTER_IP=$KUBE_MASTER_IP
+EOF
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
new file mode 100644
index 00000000..b4cf46d7
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Create directory to store functest logs
+mkdir -p /root/functest-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- functest environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+# the needed images differ between the suites so avoid downloading unnecessary images
+echo "Downloading the images needed for functest-$FUNCTEST_SUITE_NAME"
+mkdir ~/images && cd ~/images
+if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img \
+ http://testresults.opnfv.org/functest/shaker-image.qcow2 \
+ https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+else
+ echo "Unsupported test suite for functest"
+ exit 1
+fi
+echo "------------------------------------------------------"
+ls -al . && cd ~
+echo "------------------------------------------------------"
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}:${FUNCTEST_VERSION}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ ${DOCKER_IMAGE_NAME}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/.kube/config:/root/.kube/config \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ $DOCKER_IMAGE_NAME
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
new file mode 100644
index 00000000..6a7fd8be
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Create directory to store yardstick logs
+mkdir -p /root/yardstick-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- yardstick environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+rc_file_vol="-v /root/openrc:/etc/yardstick/openstack.creds"
+{% else %}
+{# k8 scenario name is hardcoded for the timebeing until we clarify #}
+{# which suite name we should use for the scenarios without yardstick suites #}
+DEPLOY_SCENARIO="k8-nosdn-nofeature-noha"
+rc_file_vol="-v /root/admin.conf:/etc/yardstick/admin.conf"
+{% endif %}
+
+OS_CACERT="/etc/ssl/certs/haproxy.cert"
+DOCKER_IMAGE_NAME="opnfv/yardstick"
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_${DEPLOY_SCENARIO}_daily.yaml"
+
+# add OS_CACERT to openrc
+echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ~/openrc
+
+opts="--privileged=true --rm"
+envs="-e INSTALLER_TYPE=$INSTALLER_TYPE -e INSTALLER_IP=$INSTALLER_IP \
+ -e NODE_NAME=$NODE_NAME -e EXTERNAL_NETWORK=$EXTERNAL_NETWORK \
+ -e YARDSTICK_BRANCH=master -e BRANCH=master \
+ -e DEPLOY_SCENARIO=$DEPLOY_SCENARIO -e CI_DEBUG=true"
+cacert_file_vol="-v $OS_CACERT:/etc/yardstick/os_cacert"
+map_log_dir="-v /root/yardstick-results:/tmp/yardstick"
+sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} \
+ ${map_log_dir} ${sshkey} ${DOCKER_IMAGE_NAME} \
+ exec_tests.sh ${YARDSTICK_SCENARIO_SUITE_NAME}"
+echo "Running yardstick with the command"
+echo "------------------------------------------------------"
+echo $cmd
+echo "------------------------------------------------------"
+$cmd
diff --git a/xci/playbooks/roles/prepare-functest/vars/main.yml b/xci/playbooks/roles/prepare-tests/vars/main.yml
index 3a6c8a4d..83638466 100644
--- a/xci/playbooks/roles/prepare-functest/vars/main.yml
+++ b/xci/playbooks/roles/prepare-tests/vars/main.yml
@@ -1,14 +1,17 @@
---
-functest_required_packages:
+required_packages:
apt:
- docker.io
- wget
+ - xz-utils
zypper:
- docker
- wget
+ - xz
yum:
- docker
- wget
+ - xz
-functest_required_pip:
+required_pip:
- docker-py
diff --git a/xci/scripts/update-osa-version-files.sh b/xci/scripts/update-osa-version-files.sh
index 42405a3f..bb0d82ab 100755
--- a/xci/scripts/update-osa-version-files.sh
+++ b/xci/scripts/update-osa-version-files.sh
@@ -76,20 +76,20 @@ cat $tempdir/openstack-ansible/ansible-role-requirements.yml >> $releng_xci_base
# Update the pinned OSA version
sed -i -e "/^export OPENSTACK_OSA_VERSION/s@:-\"[a-z0-9]*@:-\"${1}@" \
- -e "s/\(^# HEAD of osa.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s@\(^# HEAD of osa \).*@\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)@" $releng_xci_base/config/pinned-versions
# Update the pinned bifrost version
if [[ -n ${2:-} ]]; then
echo "Updating bifrost..."
sed -i -e "/^export OPENSTACK_BIFROST_VERSION/s@:-\"[a-z0-9]*@:-\"${2}@" \
- -e "s/\(^# HEAD of bifrost.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s/\(^# HEAD of bifrost \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
# Get ironic shas
for ironic in ironic_git_url ironic_client_git_url ironic_inspector_git_url ironic_inspector_client_git_url; do
- ironic_sha=$(git ls-remote ${!ironic} | grep master | awk '{print $1}')
+ ironic_sha=$(git ls-remote ${!ironic} | grep "${OPENSTACK_OSA_VERSION:-master}" | awk '{print $1}')
ironic=${ironic/_git*/}
echo "... updating ${ironic}"
sed -i -e "/^export BIFROST_${ironic^^}_VERSION/s@:-\"[a-z0-9]*@:-\"${ironic_sha}@" \
- -e "s/\(^# HEAD of ${ironic/_/-}.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s/\(^# HEAD of ${ironic/_/-} \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
done
fi
diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh
index ecc2998d..965cfe4c 100755
--- a/xci/scripts/vm/start-new-vm.sh
+++ b/xci/scripts/vm/start-new-vm.sh
@@ -119,17 +119,14 @@ COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-ma
case ${ID,,} in
*suse*)
- pkg_mgr_cmd="sudo zypper -q -n ref"
- pkg_mgr_cmd+=" && sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu"
+ pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu > /dev/null"
;;
centos)
- pkg_mgr_cmd="yum updateinfo"
- pkg_mgr_cmd+=" && sudo yum install -q -y epel-release"
- pkg_mgr_cmd+=" && sudo yum install -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm"
+ pkg_mgr_cmd="sudo yum install -C -q -y epel-release > /dev/null"
+ pkg_mgr_cmd+=" && sudo yum install -C -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm > /dev/null"
;;
ubuntu)
- pkg_mgr_cmd="sudo apt-get update"
- pkg_mgr_cmd+=" && sudo apt-get install -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io"
+ pkg_mgr_cmd="sudo apt-get install --no-upgrade -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io > /dev/null"
;;
esac
@@ -370,7 +367,7 @@ if [[ $? != 0 ]]; then
#!/bin/bash
set -o pipefail
export XCI_FLAVOR=mini
-export BIFROST_USE_PREBUILT_IMAGES=true
+export BIFROST_CREATE_IMAGE_VIA_DIB=false
cd ~/releng-xci/xci
./xci-deploy.sh | ts
EOF
diff --git a/xci/var/ericsson-pod2-idf.yml b/xci/var/ericsson-pod2-idf.yml
new file mode 100644
index 00000000..2839b120
--- /dev/null
+++ b/xci/var/ericsson-pod2-idf.yml
@@ -0,0 +1,187 @@
+##############################################################################
+# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ERICSSON POD 2 installer descriptor file ###
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'osa', 'osh']
+ net_config: &net_config
+ admin:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'PXE/Admin' on wiki, different IP
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0 # Tagged, not the same with 'storage' on wiki
+ mask: 22
+ private:
+ interface: 4
+ network: 172.29.242.0 # Tagged, not the same with 'private' on wiki
+ mask: 22
+ public:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'public' on wiki
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+
+xci:
+ pod_name: pod1
+ net_config: *net_config
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
+
+ # net_config network to be used by the PXE
+ pxe_network: public
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: virbr0
+ ip: 192.168.122.1
+
+ extra_addresses:
+ opnfv: 192.168.122.2
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/ericsson-pod2-pdf.yml b/xci/var/ericsson-pod2-pdf.yml
new file mode 100644
index 00000000..4c7271ec
--- /dev/null
+++ b/xci/var/ericsson-pod2-pdf.yml
@@ -0,0 +1,269 @@
+---
+### POD descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Jose Lausuch
+ contact: jose.lausuch@ericsson.com
+ lab: Ericsson
+ location: Rosersberg, Sweden
+ type: production
+ link: https://wiki.opnfv.org/display/pharos/CI-ERICSSON-POD2
+##############################################################################
+jumphost:
+ name: CI-POD2-HOST
+ node: &nodeparams
+ type: baremetal
+ vendor: HP
+ model: ProLiant BL460c Gen9
+ arch: x86_64
+ cpus: 2
+ cpu_cflags: haswell
+ cores: 12
+ memory: 128G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 1200G
+ disk_type: hdd
+ disk_interface: scsi
+ disk_rotation: 15000
+ os: ubuntu-16.04
+ remote_params: &remoteparas
+ type: ipmi
+ versions:
+ - 1.0
+ - 2.0
+ user: opnfv
+ pass: Winter2017
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.11
+ mac_address: "58:20:B1:01:8A:F2"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ address: 172.16.2.11
+ mac_address: "ec:b1:d7:a1:a1:10"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.236.10
+ mac_address: "5c:b9:01:8b:9f:e8"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 192.168.122.2
+ mac_address: "5c:b9:01:8b:9f:e9"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.240.10
+ mac_address: "5c:b9:01:8b:9f:ec"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.242.10
+ mac_address: "5c:b9:01:8b:9f:ed"
+ vlan: 3010
+##############################################################################
+nodes:
+ - name: node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.12
+ mac_address: "58:20:B1:01:8B:F0"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:a0"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:94"
+ address: "172.29.236.11"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:95"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:80"
+ address: "172.29.240.11"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:81"
+ address: "172.29.242.11"
+ vlan: 3010
+ ############################################################################
+ - name: node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.13
+ mac_address: "58:20:B1:01:8E:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:80"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:30"
+ address: "172.29.236.12"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:31"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:64"
+ address: "172.29.240.12"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:65"
+ address: "172.29.242.12"
+ vlan: 3010
+ ############################################################################
+ - name: node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.14
+ mac_address: "58:20:B1:01:8D:32"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:43:c0"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4c"
+ address: "172.29.236.13"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4d"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6c"
+ address: "172.29.240.13"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6d"
+ address: "172.29.242.13"
+ vlan: 3010
+ ############################################################################
+ - name: node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.15
+ mac_address: "58:20:B1:01:8B:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:8b:d0"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fc"
+ address: "172.29.236.14"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fd"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:08"
+ address: "172.29.240.14"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:09"
+ address: "172.29.242.14"
+ vlan: 3010
+ ############################################################################
+ - name: node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.16
+ mac_address: "58:20:B1:01:8F:EA"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:bd:60"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e8"
+ address: "172.29.236.15"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e9"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:14"
+ address: "172.29.240.15"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:15"
+ address: "172.29.242.15"
+ vlan: 3010
+
diff --git a/xci/var/idf.yml b/xci/var/idf.yml
index 4d743cda..8ed55f6f 100644
--- a/xci/var/idf.yml
+++ b/xci/var/idf.yml
@@ -13,6 +13,10 @@ idf:
version: 0.1
net_config: &net_config
admin:
+ interface: 2
+ network: 192.168.122.0
+ mask: 22
+ mgmt:
interface: 0
network: 172.29.236.0
mask: 22
@@ -25,34 +29,13 @@ idf:
network: 192.168.122.0
mask: 24
gateway: 192.168.122.1
- dns: 192.168.122.1
+ dns:
+ - 192.168.122.1
private:
interface: 3
network: 172.29.244.0
mask: 22
-
-xci:
- pod_name: vpod1
- net_config: *net_config
- flavors:
- mini:
- - opnfv
- - node1
- - node2
- noha:
- - opnfv
- - node1
- - node2
- - node3
- ha:
- - opnfv
- - node1
- - node2
- - node3
- - node4
- - node5
-
- osa:
+ osa: &idf_osa
nodes_roles:
opnfv: [deployment]
node1: [controller]
@@ -72,9 +55,20 @@ xci:
node3: compute01
node4: controller01
node5: controller02
- kubespray:
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ kubespray: &idf_kubespray
nodes_roles:
- opnvf: [opnfv]
+ opnfv: [opnfv]
node1: [kube-master, etcd, vault]
node2: [kube-node]
node3: [kube-node]
@@ -91,7 +85,67 @@ xci:
node3: node2
node4: master2
node5: master3
-
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ osh: &idf_osh
+ nodes_roles:
+ opnfv: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+xci:
+ pod_name: vpod1
+ net_config: *net_config
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
# net_config network to be used by the PXE
pxe_network: public
@@ -104,13 +158,7 @@ xci:
extra_addresses:
opnfv: 192.168.122.2
- # network mapping
- network_mapping:
- # Management network used by installer components to communicate
- net-mgmt: admin
- # Storage Network
- net-storage: storage
- # Internal network for communication between VNF
- net-internal: private
- # Public network for VNF remote acces (ext-net in Openstack)
- net-vnf: public
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-idf.yml b/xci/var/lf-pod4-idf.yml
new file mode 100644
index 00000000..55ca6b63
--- /dev/null
+++ b/xci/var/lf-pod4-idf.yml
@@ -0,0 +1,222 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 installer descriptor file ###
+
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'fuel', 'osa', 'osh']
+ net_config: &net_config
+ oob:
+ interface: 0
+ ip-range: 172.30.8.83-172.30.8.88
+ vlan: 410
+ mask: 24
+ admin:
+ interface: 0
+ vlan: native
+ network: 192.168.122.0
+ gateway: 192.168.122.1
+ dns: 8.8.8.8
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0
+ mask: 24
+ private:
+ interface: 2
+ network: 172.29.242.0
+ mask: 24
+ public:
+ interface: 4
+ network: 192.168.122.0
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ pod4-node1: [controller]
+ pod4-node2: [compute, storage]
+ pod4-node3: [compute, storage]
+ pod4-node4: [controller]
+ pod4-node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: controller00
+ pod4-node2: compute00
+ pod4-node3: compute01
+ pod4-node4: controller01
+ pod4-node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ fuel:
+ jumphost:
+ bridges:
+ admin: 'pxebr'
+ mgmt: 'br-ctl'
+ private: ~
+ public: ~
+ network:
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3'
+ - 'eno4'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:04:00.0'
+ - '0000:02:00.0'
+ - '0000:02:00.1'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+xci:
+ pod_name: lf-pod4
+ net_config: *net_config
+ nodes_roles:
+ opnfv_host: [opnfv_host]
+ pod4-node1: [compute, storage]
+ pod4-node2: [compute, storage]
+ pod4-node3: [controller, storage]
+ pod4-node4: [controller, storage]
+ pod4-node5: [controller, storage]
+
+ # net_config network to be used by the PXE
+ pxe_network: admin
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: br_admin
+ ip:
+
+ extra_addresses:
+ opnfv_host: 192.168.12.2
+
+ flavors:
+ mini:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ noha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ ha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ - pod4-node4
+ - pod4-node5
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-pdf.yml b/xci/var/lf-pod4-pdf.yml
new file mode 100644
index 00000000..9607e4db
--- /dev/null
+++ b/xci/var/lf-pod4-pdf.yml
@@ -0,0 +1,198 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Trevor Bramwell
+ contact: tbramwell@linuxfoundation.org
+ lab: Linux Foundation
+ location: Portland, Oregon, USA
+ type: development
+ link: https://wiki.opnfv.org/display/pharos/LF+POD+4
+jumphost:
+ name: pod4-jump
+ node: &nodeparams
+ type: baremetal
+ vendor: Intel Corporation
+ model: S2600WT2R
+ arch: x86_64
+ cpus: 88
+ cpu_cflags: haswell
+ cores: 22
+ memory: 62G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 480G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ os: centos-7
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: admin
+ pass: octopus
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.83
+ mac_address: "a4:bf:01:01:b0:bb"
+ interfaces:
+ - name: nic1
+ speed: 1gb
+ features: 'dpdk|sriov'
+ vlan: native
+ mac_address: "a4:bf:01:01:b0:b9"
+ address: 192.168.12.1
+ - name: nic2
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 450
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.0.2
+ - name: nic3
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 452
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.2.2
+ - name: nic4
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 451
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 192.168.1.2
+ - name: nic5
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 414
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 172.30.12.83
+##############################################################################
+nodes:
+ - name: pod4-node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.84
+ mac_address: "a4:bf:01:01:ab:b6"
+ interfaces:
+ - mac_address: "a4:bf:01:01:ab:b4"
+ address: 192.168.122.3
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 172.29.236.11
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 192.168.122.3
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.240.11
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.242.11
+ vlan: 414
+ ############################################################################
+ - name: pod4-node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.85
+ mac_address: "a4:bf:01:01:b6:97"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b6:95"
+ address: 192.168.122.4
+ vlan: native
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 172.29.236.12
+ vlan: 450
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 192.168.122.4
+ vlan: 452
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.240.12
+ vlan: 451
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.242.12
+ vlan: 414
+ ############################################################################
+ - name: pod4-node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.86
+ mac_address: "a4:bf:01:01:66:fe"
+ interfaces:
+ - mac_address: "a4:bf:01:01:66:fc"
+ address: 192.168.122.5
+ vlan: native
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 172.29.236.13
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 192.168.122.5
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.240.13
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.242.13
+ vlan: 414
+ ############################################################################
+ - name: pod4-node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.87
+ mac_address: "a4:bf:01:01:b2:f5"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b2:f3"
+ address: 192.168.122.6
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 172.29.236.14
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 192.168.122.6
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.240.14
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.242.14
+ vlan: 414
+ ############################################################################
+ - name: pod4-node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.88
+ mac_address: "a4:bf:01:01:b5:11"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b5:0f"
+ address: 192.168.122.7
+ vlan: native
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 172.29.236.15
+ vlan: 450
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 192.168.122.7
+ vlan: 452
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.240.15
+ vlan: 451
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.242.14
+ vlan: 414
diff --git a/xci/var/opnfv.yml b/xci/var/opnfv.yml
index e7e3b76c..91b9ee38 100644
--- a/xci/var/opnfv.yml
+++ b/xci/var/opnfv.yml
@@ -28,8 +28,18 @@ openstack_osa_haproxy_git_url: "{{ lookup('env','OPENSTACK_OSA_HAPROXY_GIT_URL')
# kubespray variables
kubespray_git_url: "{{ lookup('env','KUBESPRAY_GIT_URL') }}"
kubespray_version: "{{ lookup('env','KUBESPRAY_VERSION') }}"
+kubernetes_version: "{{ lookup('env','KUBERNETES_VERSION') }}"
xci_kube_ansible_pip_version: "{{ lookup('env','XCI_KUBE_ANSIBLE_PIP_VERSION') }}"
+# openstack-helm variables
+osh_git_url: "{{ lookup('env','OSH_GIT_URL') }}"
+osh_version: "{{ lookup('env','OSH_VERSION') }}"
+osh_infra_git_url: "{{ lookup('env','OSH_INFRA_GIT_URL') }}"
+osh_infra_version: "{{ lookup('env','OSH_INFRA_VERSION') }}"
+osh_helm_binary_url: "{{ lookup('env','OSH_HELM_BINARY_URL') }}"
+osh_helm_binary_version: "{{ lookup('env','OSH_HELM_BINARY_VERSION') }}"
+openstack_osh_version: "{{ lookup('env','OPENSTACK_OSH_VERSION') }}"
+
# variables for other components
keepalived_git_url: "{{ lookup('env','KEEPALIVED_GIT_URL') }}"
haproxy_version: "{{ lookup('env','HAPROXY_VERSION') }}"
@@ -49,3 +59,7 @@ run_tempest: "{{ lookup('env', 'RUN_TEMPEST') }}"
core_openstack_install: "{{ lookup('env', 'CORE_OPENSTACK_INSTALL') }}"
deploy_scenario: "{{ lookup('env','DEPLOY_SCENARIO') }}"
installer_type: "{{ lookup('env','INSTALLER_TYPE') }}"
+osh_distro: "{{ lookup('env', 'OSH_DISTRO') }}"
+
+# baremetal variables
+baremetal: "{{ lookup('env','BAREMETAL') }}"
diff --git a/xci/var/opnfv_vm_idf.yml b/xci/var/opnfv_vm_idf.yml
new file mode 100644
index 00000000..fa647287
--- /dev/null
+++ b/xci/var/opnfv_vm_idf.yml
@@ -0,0 +1,19 @@
+---
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+opnfv_vm_idf:
+ version: 0.1
+ net_config: &net_config
+ admin:
+ interface: 0
+ network: 192.168.122.0
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
diff --git a/xci/var/opnfv_vm.yml b/xci/var/opnfv_vm_pdf.yml
index 17f5038c..51371388 100644
--- a/xci/var/opnfv_vm.yml
+++ b/xci/var/opnfv_vm_pdf.yml
@@ -6,7 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-opnfv_vm:
+opnfv_vm_pdf:
name: opnfv
node: &nodeparams
type: virtual
@@ -36,32 +36,18 @@ opnfv_vm:
name: nic1
speed:
features:
- vlan: 10
+ vlan: native
interface_common_nic2: &interface_common_nic2
name: nic2
speed:
features:
- vlan: 20
- interface_common_nic3: &interface_common_nic3
- name: nic3
- speed:
- features:
vlan: native
- interface_common_nic4: &interface_common_nic4
- name: nic4
- speed:
- features:
- vlan: 30
interfaces:
- - mac_address: "52:54:00:33:82:d0"
- address: 172.29.236.10
- <<: *interface_common_nic1
- - mac_address: "52:54:00:33:82:d0"
- address: 172.29.244.10
- <<: *interface_common_nic2
- - mac_address: "52:54:00:33:82:d0"
- address: 192.168.122.2
- <<: *interface_common_nic3
- - mac_address: "52:54:00:33:82:d0"
- address: 172.29.240.10
- <<: *interface_common_nic4
+ - mac_address: "52:54:00:33:82:d0"
+ address: 192.168.122.2
+ gateway: 192.168.122.1
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:33:82:d1"
+ address: 172.29.236.10
+ gateway: 172.29.236.1
+ <<: *interface_common_nic2
diff --git a/xci/xci-deploy.sh b/xci/xci-deploy.sh
index c1654151..d9c41968 100755
--- a/xci/xci-deploy.sh
+++ b/xci/xci-deploy.sh
@@ -50,6 +50,11 @@ done
unset user_local_dev_vars local_user_var
#
+# Parse command line options
+#
+parse_cmdline_opts $*
+
+#
# Bootstrap environment for XCI Deployment
#
echo "Info: Preparing host environment for the XCI deployment"