summaryrefslogtreecommitdiffstats
path: root/xci
diff options
context:
space:
mode:
Diffstat (limited to 'xci')
-rwxr-xr-xxci/config/pinned-versions5
-rw-r--r--xci/files/install-lib.sh4
-rwxr-xr-xxci/files/xci-destroy-env.sh2
-rwxr-xr-xxci/installer/kubespray/deploy.sh4
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml1
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml15
-rwxr-xr-xxci/installer/osa/deploy.sh12
-rw-r--r--xci/installer/osa/files/aio/flavor-vars.yml3
-rw-r--r--xci/installer/osa/files/aio/inventory2
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml6
-rw-r--r--xci/installer/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/installer/osa/files/ha/inventory15
-rw-r--r--xci/installer/osa/files/mini/flavor-vars.yml21
-rw-r--r--xci/installer/osa/files/mini/inventory12
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory13
-rw-r--r--xci/installer/osa/files/user_variables_xci.yml (renamed from xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml)11
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml13
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml1
-rw-r--r--xci/opnfv-scenario-requirements.yml38
-rw-r--r--xci/playbooks/bootstrap-scenarios.yml43
-rw-r--r--xci/playbooks/configure-localhost.yml1
-rwxr-xr-xxci/playbooks/dynamic_inventory.py161
-rw-r--r--xci/playbooks/get-opnfv-scenario-requirements.yml177
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network.yml95
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml58
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml37
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml45
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j275
l---------xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j274
l---------xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j21
l---------xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j266
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j236
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j219
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 (renamed from xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2)4
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j21
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j29
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j210
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j21
-rw-r--r--xci/scenarios/README.rst1
-rw-r--r--xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml14
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml14
-rw-r--r--xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml292
-rw-r--r--xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml14
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep0
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml14
-rw-r--r--xci/scenarios/os-nosdn-nofeature/README.rst2
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml255
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml170
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml172
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml35
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml18
-rw-r--r--xci/scenarios/os-odl-nofeature/.gitkeep0
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml256
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml171
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml173
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml26
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j245
-rw-r--r--xci/scenarios/os-odl-nofeature/vars/main.yml2
-rw-r--r--xci/scenarios/os-odl-nofeature/xci_overrides7
-rwxr-xr-xxci/scripts/vm/start-new-vm.sh2
-rw-r--r--xci/var/idf.yml69
-rwxr-xr-xxci/xci-deploy.sh50
68 files changed, 599 insertions, 3238 deletions
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 72a0ff61..ccfc2704 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -43,6 +43,5 @@ export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \
export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
-# HEAD of kubspray "master" as of 27.02.2018
-# kubespray's bug Reference: https://github.com/kubernetes-incubator/kubespray/issues/2400
-export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"5d9bb300d716880610c34dd680c167d2d728984d"}
+# HEAD of kubspray "master" as of 16.05.2018
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"38e727dbe1bdf5316fae8d645718cc8279fbda20"}
diff --git a/xci/files/install-lib.sh b/xci/files/install-lib.sh
index 43e1213e..af86be41 100644
--- a/xci/files/install-lib.sh
+++ b/xci/files/install-lib.sh
@@ -34,6 +34,7 @@ function install_ansible() {
python
venv
wget
+ curl
)
source /etc/os-release || source /usr/lib/os-release
@@ -54,6 +55,7 @@ function install_ansible() {
[python-devel]=python-devel
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
EXTRA_PKG_DEPS=( python-xml )
sudo zypper -n ref
@@ -81,6 +83,7 @@ function install_ansible() {
[python-devel]=libpython-dev
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
EXTRA_PKG_DEPS=( apt-utils )
sudo apt-get update
@@ -103,6 +106,7 @@ function install_ansible() {
[python-devel]=python-devel
[venv]=python-virtualenv
[wget]=wget
+ [curl]=curl
)
sudo $PKG_MANAGER updateinfo
EXTRA_PKG_DEPS=( deltarpm )
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index 97b76c7c..3de21795 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -21,6 +21,8 @@ rm -rf /opt/stack
# HOME is normally set by sudo -H
rm -rf ${HOME}/.config/openstack
rm -rf ${HOME}/.ansible
+# Wipe repos
+rm -rf ${XCI_CACHE}/repos
# bifrost installs everything on venv so we need to look there if virtualbmc is not installed on the host.
if which vbmc &>/dev/null || { [[ -e ${XCI_VENV}/bifrost/bin/activate ]] && source ${XCI_VENV}/bifrost/bin/activate; }; then
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
index 1a0b34bc..02a9d430 100755
--- a/xci/installer/kubespray/deploy.sh
+++ b/xci/installer/kubespray/deploy.sh
@@ -75,13 +75,13 @@ fi
echo "Info: Using kubespray to deploy the kubernetes cluster"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/kubespray;\
- ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ ansible-playbook \
-i opnfv_inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log"
scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
$LOG_PATH/setup-kubernetes.log
cd $K8_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
-i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
configure-kubenet.yml
echo
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
index 1c3740b2..3b1cb013 100644
--- a/xci/installer/kubespray/playbooks/configure-kubenet.yml
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -13,6 +13,7 @@
# so cbr0 interfaces can talk to each other.
- name: Prepare networking for kubenet
hosts: k8s-cluster
+ remote_user: root
gather_facts: True
become: yes
vars_files:
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index ac8988da..0b38060b 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -43,10 +43,20 @@
file:
path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
state: directory
- - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
- name: Install required packages
package:
- name: "{{ kube_require_packages[ansible_pkg_mgr] }}"
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
state: present
update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}"
when: xci_flavor == 'aio'
@@ -64,6 +74,7 @@
with_items:
- { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
- { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
- name: Configure SSL certificates
include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index 6dada3f5..4542a4e3 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -58,7 +58,7 @@ echo "Info: Configuring opnfv deployment host for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
ansible-galaxy install -r ${XCI_PATH}/xci/files/requirements.yml -p $HOME/.ansible/roles
-ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-opnfvhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured opnfv deployment host for openstack-ansible"
@@ -78,7 +78,7 @@ if [[ $XCI_FLAVOR != "aio" ]]; then
echo "Info: Configuring target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
- ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-targethosts.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured target hosts"
@@ -91,7 +91,7 @@ fi
#-------------------------------------------------------------------------------
echo "Info: Setting up target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log "
scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log
echo "-----------------------------------------------------------------------"
@@ -113,7 +113,7 @@ echo "Info: Set up target hosts for openstack-ansible successfuly"
echo "Info: Gathering facts"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \
- ansible ${XCI_ANSIBLE_PARAMS} -m setup -a gather_subset=network,hardware,virtual all"
+ ansible -m setup -a gather_subset=network,hardware,virtual all"
echo "-----------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -124,7 +124,7 @@ echo "-----------------------------------------------------------------------"
echo "Info: Setting up infrastructure"
echo "-----------------------------------------------------------------------"
echo "xci: running ansible playbook setup-infrastructure.yml"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log"
scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log
echo "-----------------------------------------------------------------------"
@@ -153,7 +153,7 @@ echo "Info: Database cluster verification successful!"
#-------------------------------------------------------------------------------
echo "Info: Installing OpenStack on target hosts"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log"
scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log
echo "-----------------------------------------------------------------------"
diff --git a/xci/installer/osa/files/aio/flavor-vars.yml b/xci/installer/osa/files/aio/flavor-vars.yml
deleted file mode 100644
index 6ac1e0fe..00000000
--- a/xci/installer/osa/files/aio/flavor-vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/xci/installer/osa/files/aio/inventory b/xci/installer/osa/files/aio/inventory
deleted file mode 100644
index fa2a1009..00000000
--- a/xci/installer/osa/files/aio/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
index 5ecbf155..f631c839 100644
--- a/xci/installer/osa/files/ansible-role-requirements.yml
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -48,7 +48,7 @@
- name: lxc_hosts
scm: git
src: https://github.com/openstack/openstack-ansible-lxc_hosts
- version: 400f0c80b9c531a792dc01ff12cf1f3b3bd69a2d
+ version: e77c3a923bc8aae4a264917f592b58b5d1c79aed
- name: memcached_server
scm: git
src: https://github.com/openstack/openstack-ansible-memcached_server
@@ -160,7 +160,7 @@
- name: repo_build
scm: git
src: https://github.com/openstack/openstack-ansible-repo_build
- version: 0e50a282b09f62670494ada2f7d42509c148067f
+ version: ec5210e812e48dac30eca4c188a9d159c5cdacbf
- name: repo_server
scm: git
src: https://github.com/openstack/openstack-ansible-repo_server
@@ -200,7 +200,7 @@
- name: opendaylight
scm: git
src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: 1f0f943499dcdd28a1b6971992c46bb4513ce8fb
+ version: 9d5951c39da7722c71632a10ec53e7ab93b8ac9b
- name: haproxy_endpoints
scm: git
src: https://github.com/logan2211/ansible-haproxy-endpoints
diff --git a/xci/installer/osa/files/ha/flavor-vars.yml b/xci/installer/osa/files/ha/flavor-vars.yml
deleted file mode 100644
index 167502c9..00000000
--- a/xci/installer/osa/files/ha/flavor-vars.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'controller01': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'controller02': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.6',
- 'MGMT_IP': '172.29.236.14',
- 'VXLAN_IP': '172.29.240.14',
- 'STORAGE_IP': '172.29.244.14'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.7',
- 'MGMT_IP': '172.29.236.15',
- 'VXLAN_IP': '172.29.240.15',
- 'STORAGE_IP': '172.29.244.15'
- }
-}
diff --git a/xci/installer/osa/files/ha/inventory b/xci/installer/osa/files/ha/inventory
deleted file mode 100644
index f5d882ef..00000000
--- a/xci/installer/osa/files/ha/inventory
+++ /dev/null
@@ -1,15 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
-
-[openstack:children]
-controller
-compute
diff --git a/xci/installer/osa/files/mini/flavor-vars.yml b/xci/installer/osa/files/mini/flavor-vars.yml
deleted file mode 100644
index 0d446ba2..00000000
--- a/xci/installer/osa/files/mini/flavor-vars.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
-}
diff --git a/xci/installer/osa/files/mini/inventory b/xci/installer/osa/files/mini/inventory
deleted file mode 100644
index 4224131f..00000000
--- a/xci/installer/osa/files/mini/inventory
+++ /dev/null
@@ -1,12 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-
-[openstack:children]
-controller
-compute
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
deleted file mode 100644
index 3c69a34b..00000000
--- a/xci/installer/osa/files/noha/flavor-vars.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- }
-}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
deleted file mode 100644
index 0e3b8d84..00000000
--- a/xci/installer/osa/files/noha/inventory
+++ /dev/null
@@ -1,13 +0,0 @@
-[deployment]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
-
-[openstack:children]
-controller
-compute
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml b/xci/installer/osa/files/user_variables_xci.yml
index 25cd6839..65e09bb4 100644
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml
+++ b/xci/installer/osa/files/user_variables_xci.yml
@@ -1,5 +1,5 @@
---
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright 2018, SUSE LINUX GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Enable clustering for opendaylight
-cluster: true \ No newline at end of file
+opensuse_mirror_obs: "http://ftp.gwdg.de/pub/opensuse"
+opensuse_mirror: "http://mirrors.rit.edu/opensuse"
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
index 96bd9e5e..c92abd97 100644
--- a/xci/installer/osa/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -26,7 +26,6 @@
file: "{{ item }}"
with_items:
- "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ xci_flavor_ansible_file_path }}/flavor-vars.yml"
- name: Set facts for remote deployment
set_fact:
remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
@@ -74,13 +73,13 @@
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/env.d", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/conf.d", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ openstack_osa_path }}/etc/openstack_deploy/user_secrets.yml", dest: "{{ openstack_osa_etc_path }}" }
- - { src: "{{ remote_xci_flavor_files }}/inventory", dest: "{{ remote_xci_playbooks }}" }
- { src: "{{ remote_xci_flavor_files }}/openstack_user_config.yml", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ remote_xci_flavor_files }}/user_variables.yml", dest: "{{ openstack_osa_etc_path }}" }
- { src: "{{ remote_xci_flavor_files }}/ceph.yml", dest: "{{ openstack_osa_etc_path }}/conf.d/", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_flavor_files }}/user_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_ceph.yml", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_flavor_files }}/user_variables_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_ceph.yml", cond: xci_ceph_enabled }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/cinder.yml", dest: "{{ openstack_osa_etc_path }}/env.d" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_xci.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_xci.yml" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_proxy.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_proxy.yml", cond: "{{ lookup('env', 'http_proxy') != '' }}" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml", dest: "{{ openstack_osa_path }}/playbooks" }
- { src: "{{ remote_xci_path }}/xci/installer/osa/files/ansible-role-requirements.yml", dest: "{{openstack_osa_path}}/ansible-role-requirements.yml", cond: "{{ openstack_osa_version != 'master' }}" }
@@ -108,7 +107,15 @@
when:
- lookup('env','http_proxy') != ""
- - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
- name: bootstrap ansible on opnfv host
command: "/bin/bash ./scripts/bootstrap-ansible.sh"
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
index cb817cfc..a5d2923c 100644
--- a/xci/installer/osa/playbooks/configure-targethosts.yml
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -17,7 +17,6 @@
file: "{{ item }}"
with_items:
- "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ xci_flavor_ansible_file_path }}/flavor-vars.yml"
roles:
- role: peru.proxy_settings
proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index 925789a9..c5f6ae6a 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -14,7 +14,7 @@
- scenario: os-odl-sfc
scm: git
src: https://gerrit.opnfv.org/gerrit/sfc
- version: master
+ version: 6.0.1
role: scenarios/os-odl-sfc/role/os-odl-sfc
installers:
- installer: osa
@@ -28,9 +28,9 @@
- scenario: os-nosdn-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
installers:
- installer: osa
flavors:
@@ -44,9 +44,9 @@
- scenario: os-odl-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/os-odl-nofeature/role/os-odl-nofeature
installers:
- installer: osa
flavors:
@@ -59,9 +59,9 @@
- scenario: k8-nosdn-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
installers:
- installer: kubespray
flavors:
@@ -91,9 +91,9 @@
- scenario: k8-canal-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/k8-canal-nofeature/role/k8-canal-nofeature
installers:
- installer: kubespray
flavors:
@@ -108,9 +108,9 @@
- scenario: k8-calico-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/k8-calico-nofeature/role/k8-calico-nofeature
installers:
- installer: kubespray
flavors:
@@ -125,9 +125,9 @@
- scenario: k8-flannel-nofeature
scm: git
- src: https://gerrit.opnfv.org/gerrit/releng-xci
- version: master
- role: xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: 6.0.1
+ role: scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature
installers:
- installer: kubespray
flavors:
diff --git a/xci/playbooks/bootstrap-scenarios.yml b/xci/playbooks/bootstrap-scenarios.yml
deleted file mode 100644
index d1331252..00000000
--- a/xci/playbooks/bootstrap-scenarios.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-#
-# This file is aimed to be used by scenarios to plug into the XCI.
-# Ideally, all they need to do at this point is to include their
-# role using a statement like the following one
-#
-# - name: Include foobar role
-# include_role:
-# name: "foobar"
-# when: deploy_scenario == "foobar"
-
-- name: Prepare everything to run the os-nosdn-nofeature scenario
- include_role:
- name: "os-nosdn-nofeature"
- when: deploy_scenario == 'os-nosdn-nofeature'
-- name: Prepare everything to run the os-odl-nofeature scenario
- include_role:
- name: "os-odl-nofeature"
- when: deploy_scenario == 'os-odl-nofeature'
-- name: Prepare everything to run the os-odl-sfc scenario
- include_role:
- name: "os-odl-sfc"
- when: deploy_scenario == 'os-odl-sfc'
-- name: Prepare everything to run the os-odl-bgpvpn scenario
- include_role:
- name: "os-odl-bgpvpn"
- when: deploy_scenario == 'os-odl-bgpvpn'
-- name: Prepare everything to run the k8-canal-nofeature scenario
- include_role:
- name: "k8-canal-nofeature"
- when: deploy_scenario == 'k8-canal-nofeature'
-- name: Prepare everything to run the k8-canal-nofeature scenario
- include_role:
- name: "k8-calico-nofeature"
- when: deploy_scenario == 'k8-calico-nofeature'
-- name: Prepare everything to run the k8-flannel-nofeature scenario
- include_role:
- name: "k8-flannel-nofeature"
- when: deploy_scenario == 'k8-flannel-nofeature'
-- name: Prepare everything to run the k8-nosdn-nofeature scenario
- include_role:
- name: "k8-nosdn-nofeature"
- when: deploy_scenario == 'k8-nosdn-nofeature'
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
index 5f091c92..5b64c785 100644
--- a/xci/playbooks/configure-localhost.yml
+++ b/xci/playbooks/configure-localhost.yml
@@ -25,7 +25,6 @@
state: absent
recurse: no
with_items:
- - "{{ xci_cache }}/repos"
- "{{ log_path }} "
- "{{ opnfv_ssh_host_keys_path }}"
diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py
new file mode 100755
index 00000000..552a1337
--- /dev/null
+++ b/xci/playbooks/dynamic_inventory.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# coding utf-8
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Based on https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/cobbler.py
+
+import argparse
+import os
+import sys
+import yaml
+import json
+
+
+class XCIInventory(object):
+ def __init__(self):
+ super(XCIInventory, self).__init__()
+ self.inventory = {}
+ self.inventory['all'] = {}
+ self.inventory['all']['hosts'] = []
+ self.inventory['all']['vars'] = {}
+ self.inventory['_meta'] = {}
+ self.inventory['_meta']['hostvars'] = {}
+ self.installer = os.environ.get('INSTALLER_TYPE', 'osa')
+ self.flavor = os.environ.get('XCI_FLAVOR', 'mini')
+
+ # Static information for opnfv host for now
+ self.add_host('opnfv')
+ self.add_hostvar('opnfv', 'ansible_ssh_host', '192.168.122.2')
+ self.add_to_group('deployment', 'opnfv')
+ self.add_to_group('opnfv', 'opnfv')
+
+ self.opnfv_networks = {}
+ self.opnfv_networks['opnfv'] = {}
+ self.opnfv_networks['opnfv']['admin'] = {}
+ self.opnfv_networks['opnfv']['admin']['address'] = '172.29.236.10/22'
+ self.opnfv_networks['opnfv']['public'] = {}
+ self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24'
+ self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1'
+ self.opnfv_networks['opnfv']['private'] = {}
+ self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22'
+ self.opnfv_networks['opnfv']['storage'] = {}
+ self.opnfv_networks['opnfv']['storage']['address'] = '172.29.244.10/24'
+
+ self.read_pdf_idf()
+
+ self.parse_args()
+
+ if self.args.host:
+ self.dump(self.get_host_info(self.args.host))
+ else:
+ self.dump(self.inventory)
+
+ def parse_args(self):
+ parser = argparse.ArgumentParser(description='Produce an Ansible inventory based on PDF/IDF XCI files')
+ parser.add_argument('--list', action='store_true', default=True, help='List XCI hosts (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific host')
+ self.args = parser.parse_args()
+
+ def read_pdf_idf(self):
+ pdf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/pdf.yml"
+ idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/idf.yml"
+ nodes = []
+ host_networks = {}
+
+ with open(pdf_file) as f:
+ try:
+ pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(idf_file) as f:
+ try:
+ idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ valid_host = (host for host in idf['xci'][self.installer]['nodes_roles'] \
+ if host in idf['xci']['flavors'][self.flavor] \
+ and host != 'opnfv')
+
+ for host in valid_host:
+ nodes.append(host)
+ hostname = idf['xci'][self.installer]['hostnames'][host]
+ self.add_host(hostname)
+ for role in idf['xci'][self.installer]['nodes_roles'][host]:
+ self.add_to_group(role, hostname)
+
+ pdf_host_info = filter(lambda x: x['name'] == host, pdf['nodes'])[0]
+ native_vlan_if = filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces'])
+ self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address'])
+ host_networks[hostname] = {}
+ # And now record the rest of the information
+ for network, ndata in idf['idf']['net_config'].items():
+ network_interface_num = idf['idf']['net_config'][network]['interface']
+ host_networks[hostname][network] = {}
+ host_networks[hostname][network]['address'] = pdf_host_info['interfaces'][int(network_interface_num)]['address'] + "/" + str(ndata['mask'])
+ if 'gateway' in ndata.keys():
+ host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask'])
+
+ host_networks.update(self.opnfv_networks)
+
+ self.add_groupvar('all', 'host_info', host_networks)
+
+ # Now add the additional groups
+ for parent in idf['xci'][self.installer]['groups'].keys():
+ map(lambda x: self.add_group(x, parent), idf['xci'][self.installer]['groups'][parent])
+
+ def dump(self, data):
+ print (json.dumps(data, sort_keys=True, indent=2))
+
+ def add_host(self, host):
+ self.inventory['all']['hosts'].append(host)
+
+ def hosts(self):
+ return self.inventory['all']['hosts']
+
+ def add_group(self, group, parent = 'all'):
+ if parent not in self.inventory.keys():
+ self.inventory[parent] = {}
+ if 'children' not in self.inventory[parent]:
+ self.inventory[parent]['children'] = []
+ self.inventory[parent]['children'].append(group)
+
+ def add_to_group(self, group, host):
+ if group not in self.inventory.keys():
+ self.inventory[group] = []
+ self.inventory[group].append(host)
+
+ def add_hostvar(self, host, param, value):
+ if host not in self.hostvars():
+ self.inventory['_meta']['hostvars'][host] = {}
+ self.inventory['_meta']['hostvars'][host].update({param: value})
+
+ def add_groupvar(self, group, param, value):
+ if group not in self.groupvars(group):
+ self.inventory[group]['vars'] = {}
+ self.inventory[group]['vars'].update({param: value})
+
+ def hostvars(self):
+ return iter(self.inventory['_meta']['hostvars'].keys())
+
+ def groupvars(self, group):
+ return iter(self.inventory[group]['vars'].keys())
+
+ def get_host_info(self, host):
+ return self.inventory['_meta']['hostvars'][host]
+
+if __name__ == '__main__':
+ XCIInventory()
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/playbooks/get-opnfv-scenario-requirements.yml b/xci/playbooks/get-opnfv-scenario-requirements.yml
index af97ceb2..a9165709 100644
--- a/xci/playbooks/get-opnfv-scenario-requirements.yml
+++ b/xci/playbooks/get-opnfv-scenario-requirements.yml
@@ -31,114 +31,76 @@
loop_control:
label: "{{ item[0].scenario }}"
- - name: Create scenario directories
- file:
- path: "{{ role_path_default }}/{{ item.scenario }}"
- state: directory
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
-
- - name: Clone git repos (with git)
- git:
- repo: "{{ item.src }}"
- dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
- version: "{{ item.version | default('master') }}"
- refspec: "{{ item.refspec | default(omit) }}"
- update: true
- force: true
- when:
- - item.scm == "git" or item.scm is undefined
- with_items: "{{ scenarios }}"
- register: git_clone
- until: git_clone | success
- retries: "{{ git_clone_retries }}"
- delay: "{{ git_clone_retry_delay }}"
+ - name: Update scenarios with local overrides
+ set_fact:
+ scenarios: >
+ {%- for z in xci_scenarios_overrides -%}
+ {%- for x in scenarios if x.scenario == z.scenario -%}
+ {%- set _ = x.update(z) -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- scenarios -}}
+ with_items: "{{ xci_scenarios_overrides }}"
loop_control:
label: "{{ item.scenario }}"
+ when: xci_scenarios_overrides is defined
- - name: Check that scenarios exist
- stat:
- path: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}"
- register: scenarios_list_exists
+ - name: Collect list of known scenarions
+ set_fact:
+ known_scenarios: >
+ {%- set scenario_names = [] -%}
+ {%- for x in scenarios -%}
+ {%- set _ = scenario_names.append(x.scenario) -%}
+ {%- endfor -%}
+ {{- scenario_names -}}
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- - name: Plug in the scenario to XCI
- synchronize:
- src: "{{ scenario_path_default }}/{{ item.item.scenario }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- when: item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Synchronize local changes to scenarios' master branch
- synchronize:
- src: "{{ xci_path }}/xci/scenarios/{{ item.item.scenario }}/{{ item.item.role | replace('xci/scenarios/' ~ item.item.scenario ~ '/', '') }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- archive: no
- times: no
- recursive: yes
- checksum: yes
- owner: yes
- group: yes
- perms: yes
- links: yes
- failed_when: false
- when:
- - item.stat.exists
- - item.item.version == 'master'
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
+ - name: Fail if 'DEPLOY_SCENARIO' is not defined
+ fail:
+ msg: "DEPLOY_SCENARIO env variable is not defined so no scenario can be deployed"
+ when: deploy_scenario is not defined
- - name: Plug in the scenario to XCI (fallback)
- synchronize:
- src: "{{ xci_path }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.role | basename }}"
- when: not item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
+ - name: Ensure {{ deploy_scenario }} is a known XCI scenario
+ fail:
+ msg: "{{ deploy_scenario }} does not exist"
+ when: deploy_scenario not in known_scenarios
- - name: Gather information about the selected {{ deploy_scenario }} scenario
+ - name: Collect scenario information
set_fact:
- deploy_scenario: "{{ item }}"
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
- when: deploy_scenario | lower == item.scenario
+ xci_scenario: >
+ {%- set xci_scenario = {} -%}
+ {%- for x in scenarios if x.scenario == deploy_scenario -%}
+ {%- for z in x.installers if z.installer == installer_type -%}
+ {%- set _ = xci_scenario.update({'flavors': z.flavors}) -%}
+ {%- set _ = xci_scenario.update({'distros': z.distros}) -%}
+ {%- endfor -%}
+ {%- set _ = xci_scenario.update({'role': x.role | basename}) -%}
+ {%- endfor -%}
+ {{ xci_scenario }}
+
+ - name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+ become: true
- - name: Determine if the selected {{ deploy_scenario }} scenario can be deployed
- block:
- - name: Set scenario installer fact
- set_fact:
- deploy_scenario_installer: "{{ item }}"
- with_items: "{{ deploy_scenario.installers }}"
- loop_control:
- label: "{{ item.installer }}"
- when: item.installer == installer_type
- - name: Set scenario flavor fact
- set_fact:
- deploy_scenario_flavor: "{{ (xci_flavor in deploy_scenario_installer.flavors) | bool }}"
- when:
- - deploy_scenario_installer is defined
- - deploy_scenario_installer
- - name: Set scenario distro flavor fact
- set_fact:
- deploy_scenario_distro: "{{ (xci_distro in deploy_scenario_installer.distros) | bool }}"
- when:
- - deploy_scenario_flavor is defined
- - deploy_scenario_flavor
- when: deploy_scenario is defined
+ - name: Record scenario information
+ ini_file:
+ create: yes
+ section: scenarios
+ state: present
+ option: role
+ value: "{{ xci_scenario.role | basename }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ become: true
- - name: Fail if {{ deploy_scenario.scenario }} is not supported
+ - name: Fail if {{ deploy_scenario }} is not supported
fail:
msg:
- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ERROR! The {{ deploy_scenario.scenario }} scenario can't be deployed. This is because
+ - ERROR! The {{ deploy_scenario }} scenario can't be deployed. This is because
- the {{ installer_type }} XCI installer or the {{ xci_flavor }} flavor or the {{ xci_distro }}
- distribution is not supported by this scenario. It may also be possible that
- this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
@@ -147,9 +109,32 @@
- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- ''
when:
- - deploy_scenario is not defined or
- deploy_scenario_distro is not defined or
- (deploy_scenario_distro is defined and not deploy_scenario_distro)
+ (xci_scenario['flavors'] is defined and xci_flavor not in xci_scenario['flavors']) or
+ (xci_scenario['distros'] is defined and xci_distro not in xci_scenario['distros'])
+
+ - name: Clone git repos
+ git:
+ repo: "{{ item.src }}"
+ dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
+ version: "{{ item.version | default('master') }}"
+ refspec: "{{ item.refspec | default(omit) }}"
+ update: true
+ force: true
+ with_items: "{{ scenarios }}"
+ register: git_clone
+ until: git_clone | success
+ retries: "{{ git_clone_retries }}"
+ delay: "{{ git_clone_retry_delay }}"
+ loop_control:
+ label: "{{ item.scenario }}"
+
+ - name: Plug in the scenario Ansible roles to XCI
+ synchronize:
+ src: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}/"
+ dest: "{{ role_path_default }}/{{ item.role | basename }}"
+ with_items: "{{ scenarios }}"
+ loop_control:
+ label: "{{ item.scenario }}"
vars:
ansible_python_interpreter: "/usr/bin/python"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network.yml b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
index 92e9195e..a4f260c4 100644
--- a/xci/playbooks/roles/bootstrap-host/tasks/network.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
@@ -50,99 +50,8 @@
- name: Run handlers
meta: flush_handlers
-- block:
- - name: configure modules
- lineinfile:
- dest: /etc/modules
- state: present
- create: yes
- line: "8021q"
- - name: add modules
- modprobe:
- name: 8021q
- state: present
- - name: ensure interfaces.d folder is empty
- file:
- state: "{{ item }}"
- path: "/etc/network/interfaces.d"
- with_items:
- - absent
- - directory
- - name: ensure interfaces file is updated
- template:
- src: "{{ ansible_os_family | lower }}/{{ ansible_hostname }}.interface.j2"
- dest: "/etc/network/interfaces"
- - name: restart network service
- shell: "/sbin/ifconfig {{ ansible_local.xci.network.xci_interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
- async: 15
- poll: 0
- when: ansible_os_family | lower == "debian"
-
-- block:
- - name: Configure networking on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.interface.j2"
- dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ ansible_local.xci.network.xci_interface }}" }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", ip: "{{ host_info[inventory_hostname].MGMT_IP }}/22" }
- - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", ip: "{{ host_info[inventory_hostname].VXLAN_IP }}/22" }
- - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", ip: "{{ host_info[inventory_hostname].VLAN_IP }}/24" }
- - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}/22" }
-
- - name: Add postup/postdown scripts on SUSE
- copy:
- src: "network-config-suse"
- dest: "/etc/sysconfig/network/scripts/network-config-suse"
- mode: 0755
-
- - name: Configure routes on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.routes.j2"
- dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
- with_items:
- - { name: "br-vlan", gateway: "192.168.122.1", route: "default" }
- - name: restart network service
- service:
- name: network
- state: restarted
- async: 15
- poll: 0
- when: ansible_os_family | lower == "suse"
-
-- block:
- - name: Configure networking on CentOS for interfaces
- template:
- src: "{{ ansible_os_family | lower }}/interface.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
- - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
- - name: Configure networking on CentOS for bridges
- template:
- src: "{{ ansible_os_family | lower }}/bridge.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "br-vlan" , ip: "{{ host_info[inventory_hostname].VLAN_IP }}", prefix: 24 }
- - { name: "br-mgmt" , ip: "{{ host_info[inventory_hostname].MGMT_IP }}", prefix: 22 }
- - { name: "br-storage", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}", prefix: 22 }
- - { name: "br-vxlan" , ip: "{{ host_info[inventory_hostname].VXLAN_IP }}", prefix: 22 }
- - name: Add default route through br-vlan
- lineinfile:
- path: "/etc/sysconfig/network-scripts/ifcfg-br-vlan"
- line: "GATEWAY=192.168.122.1"
- - name: restart network service
- service:
- name: network
- state: restarted
- async: 15
- poll: 0
- when: ansible_os_family | lower == "redhat"
+- name: "Configure networking on {{ ansible_os_family }}"
+ include_tasks: "network_{{ ansible_os_family | lower }}.yml"
- name: Wait for host to come back to life
local_action:
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
new file mode 100644
index 00000000..3cac1e22
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
@@ -0,0 +1,58 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+
+- name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+
+- name: ensure interfaces.d folder is empty
+ file:
+ state: "{{ item }}"
+ path: "/etc/network/interfaces.d"
+ with_items:
+ - absent
+ - directory
+
+- name: Ensure /etc/interfaces can source additional files
+ copy:
+ content: |
+ auto lo
+ iface lo inet loopback
+ source /etc/network/interfaces.d/*.cfg
+ dest: "/etc/network/interfaces"
+
+- name: "Configure networking for {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: restart network service
+ shell: "/sbin/ifconfig {{ ansible_local.xci.network.xci_interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
new file mode 100644
index 00000000..b06a8695
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
@@ -0,0 +1,37 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.ifcfg.j2"
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
+ - { name: "br-vlan" , network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-storage", network: "{{ host_info[inventory_hostname].storage }}" }
+ - { name: "br-vxlan" , network: "{{ host_info[inventory_hostname].private }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Add default route through br-vlan
+ lineinfile:
+ path: "/etc/sysconfig/network-scripts/ifcfg-br-vlan"
+ line: "GATEWAY={{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}"
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
new file mode 100644
index 00000000..c9c9d83c
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
@@ -0,0 +1,45 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Add postup/postdown scripts on SUSE
+ copy:
+ src: "network-config-suse"
+ dest: "/etc/sysconfig/network/scripts/network-config-suse"
+ mode: 0755
+
+- name: Configure routes on SUSE
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.routes.j2"
+ dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
+ with_items:
+ - { name: "br-vlan", gateway: "{{ host_info[inventory_hostname]['public']['gateway'] }}", route: "default" }
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2
deleted file mode 100644
index 2da12f20..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/compute00.interface.j2
+++ /dev/null
@@ -1,75 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- offload-sg off
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2
deleted file mode 120000
index a74df1c2..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/compute01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-compute00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2
deleted file mode 100644
index c540f66e..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller00.interface.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/controller02.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2
deleted file mode 100644
index 03f81dbb..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/debian/opnfv.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ ansible_default_ipv4.interface }}
-iface {{ ansible_default_ipv4.interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ ansible_default_ipv4.interface }}.10
-iface {{ ansible_default_ipv4.interface }}.10 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ ansible_default_ipv4.interface }}.30
-iface {{ ansible_default_ipv4.interface }}.30 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ ansible_default_ipv4.interface }}.20
-iface {{ ansible_default_ipv4.interface }}.20 inet manual
- vlan-raw-device {{ ansible_default_ipv4.interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.30
- address {{ host_info[inventory_hostname].VXLAN_IP }}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ ansible_default_ipv4.interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
new file mode 100644
index 00000000..3eddce45
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
@@ -0,0 +1,36 @@
+# {{ ansible_managed }}
+
+# Physical interface
+{% if item.bridge_ports is not defined %}
+auto {{ item.name }}
+iface {{ item.name }} inet manual
+{% if item.vlan_id is defined %}
+ vlan-raw-device {{ item.name|replace('.' ~ item.vlan_id, '') }}
+{% endif %}
+
+{% else %}
+auto {{ item.name }}
+iface {{ item.name }} inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports {{ item.bridge_ports }}
+{% if item.name == 'br-vlan' %}
+ # Create veth pair, don't bomb if already exists
+ pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+ # Set both ends UP
+ pre-up ip link set br-vlan-veth up
+ pre-up ip link set eth12 up
+ # Delete veth pair on DOWN
+ post-down ip link del br-vlan-veth || true
+ bridge_ports br-vlan-veth
+{% endif %}
+{% if item.network is defined %}
+ address {{ item.network.address | ipaddr('address') }}
+ netmask {{ item.network.address | ipaddr('netmask') }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+ gateway {{ item.network.gateway | ipaddr('address') }}
+{% endif %}
+
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
new file mode 100644
index 00000000..fa957764
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
@@ -0,0 +1,19 @@
+DEVICE={{ item.name }}
+NM_CONTROLLED=no
+ONBOOT=yes
+BOOTPROTO=none
+{% if item.vlan_id is defined %}
+VLAN=yes
+ETHERDEVICE={{ ansible_local.xci.network.xci_interface }}
+VLAN_ID={{ item.vlan_id }}
+{% endif %}
+{% if item.bridge is not defined %}
+BRIDGE={{ item.bridge }}
+{% else %}
+TYPE=Bridge
+DELAY=0
+STP=off
+{% endif %}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
index 27b01eb4..70811a09 100644
--- a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
@@ -10,8 +10,8 @@ BRIDGE_FORWARDDELAY='0'
BRIDGE_STP=off
BRIDGE_PORTS={{ item.bridge_ports }}
{% endif %}
-{% if item.ip is defined %}
-IPADDR={{ item.ip }}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
{% endif %}
PRE_UP_SCRIPT="compat:suse:network-config-suse"
POST_DOWN_SCRIPT="compat:suse:network-config-suse"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
new file mode 100644
index 00000000..93941fad
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
@@ -0,0 +1 @@
+{{ item.route }} {{ item.gateway | ipaddr('address') }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2 b/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2
deleted file mode 100644
index 06b5f177..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/redhat/bridge.ifcfg.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-IPADDR={{ item.ip }}
-PREFIX={{ item.prefix }}
-ONBOOT=yes
-BOOTPROTO=none
-TYPE=Bridge
-DELAY=0
-STP=off
diff --git a/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2 b/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2
deleted file mode 100644
index a97ad0cf..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/redhat/interface.ifcfg.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-ONBOOT=yes
-BOOTPROTO=none
-{% if item.vlan_id is defined %}
-VLAN=yes
-ETHERDEVICE={{ ansible_default_ipv4.interface }}
-VLAN_ID={{ item.vlan_id }}
-{% endif %}
-BRIDGE={{ item.bridge }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2
deleted file mode 100644
index 7c868447..00000000
--- a/xci/playbooks/roles/bootstrap-host/templates/suse/suse.routes.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ item.route }} {{ item.gateway }}
diff --git a/xci/scenarios/README.rst b/xci/scenarios/README.rst
deleted file mode 100644
index 5d9bdf06..00000000
--- a/xci/scenarios/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-This folder keeps the roles for the generic scenarios.
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 20d3091d..00000000
--- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: calico
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
deleted file mode 100644
index 5b2939f1..00000000
--- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 7646aefa..00000000
--- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: canal
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
deleted file mode 100644
index 5b2939f1..00000000
--- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml b/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
deleted file mode 100644
index 3c3dc5d9..00000000
--- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: flannel
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml b/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml
deleted file mode 100644
index 5efd7c83..00000000
--- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 taseer94@gmail.com & others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: copy the k8-cluster config file
- copy:
- src: k8-cluster.yml
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
deleted file mode 100644
index 614d784e..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: cloud
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml
deleted file mode 100644
index 5b2939f1..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
-- name: copy k8s-cluster.yml
- copy:
- src: "k8s-cluster.yml"
- dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/scenarios/os-nosdn-nofeature/README.rst b/xci/scenarios/os-nosdn-nofeature/README.rst
deleted file mode 100644
index dcdc83fc..00000000
--- a/xci/scenarios/os-nosdn-nofeature/README.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-This scenario is currently incomplete. In order for it to be
-complete, changes for CEPH must be moved here, combining OVS + CEPH.
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 1aaf84d8..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,255 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 86b87c15..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,170 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 99b768c4..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
deleted file mode 100644
index 2f678544..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# neutron specific config
-neutron_plugin_type: ml2.ovs
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_provider_networks:
- network_flat_networks: "*"
- network_types: "vxlan"
- network_vxlan_ranges: "1:1000" \ No newline at end of file
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
deleted file mode 100644
index 79aa3aa1..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- name: copy user_variables_os-nosdn-nofeature.yml
- copy:
- src: "user_variables_os-nosdn-nofeature.yml"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-nosdn-nofeature.yml"
-- name: copy os-nosdn-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{xci_flavor}}/openstack_user_config.yml"
- dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/xci/scenarios/os-odl-nofeature/.gitkeep b/xci/scenarios/os-odl-nofeature/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/os-odl-nofeature/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 2ca5a987..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,256 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 0f8ccd18..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,171 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 7ed9cd32..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
deleted file mode 100644
index 7e872787..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- name: copy user_variables_os-odl-nofeature.yml
- template:
- src: "user_variables_os-odl-nofeature.yml.j2"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature.yml"
-
-- name: copy user_variables_os-odl-nofeature-ha.yml
- copy:
- src: "{{xci_flavor}}/user_variables_os-odl-nofeature-ha.yml"
- dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature-ha.yml"
- when:
- - xci_flavor == "ha"
-
-- name: copy os-odl-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{xci_flavor}}/openstack_user_config.yml"
- dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
deleted file mode 100644
index eb08adc0..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
+++ /dev/null
@@ -1,45 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-{% raw %}
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron"
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_plugin_base:
- - odl-router_v2
-{% endraw %}
-
-{% if odl_repo_version is defined %}
-odl_version: "{{ odl_repo_version }}"
-{% endif %}
diff --git a/xci/scenarios/os-odl-nofeature/vars/main.yml b/xci/scenarios/os-odl-nofeature/vars/main.yml
deleted file mode 100644
index 629b50c7..00000000
--- a/xci/scenarios/os-odl-nofeature/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
diff --git a/xci/scenarios/os-odl-nofeature/xci_overrides b/xci/scenarios/os-odl-nofeature/xci_overrides
deleted file mode 100644
index 2c65df0d..00000000
--- a/xci/scenarios/os-odl-nofeature/xci_overrides
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-if [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]] && [[ $XCI_FLAVOR == "ha" ]]; then
- export VM_MEMORY_SIZE=20480
-elif [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]]; then
- export VM_MEMORY_SIZE=16384
-fi
diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh
index f266d64f..8f6effe0 100755
--- a/xci/scripts/vm/start-new-vm.sh
+++ b/xci/scripts/vm/start-new-vm.sh
@@ -118,7 +118,7 @@ fi
COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-manager qemu-kvm)
case ${ID,,} in
- *suse)
+ *suse*)
pkg_mgr_cmd="sudo zypper -q -n ref"
pkg_mgr_cmd+=" && sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu"
;;
diff --git a/xci/var/idf.yml b/xci/var/idf.yml
index 8d9352b6..148508d9 100644
--- a/xci/var/idf.yml
+++ b/xci/var/idf.yml
@@ -11,9 +11,6 @@
idf:
version: 0.1
- osa:
- kolla:
- k8s:
net_config: &net_config
admin:
interface: 0
@@ -37,13 +34,63 @@ idf:
xci:
pod_name: vpod1
net_config: *net_config
- nodes_roles:
- opnfv_host: [opnfv_host]
- node1: [compute, storage]
- node2: [compute, storage]
- node3: [controller]
- node4: [controller]
- node5: [controller]
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
+
+ osa:
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ kubespray:
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kude-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
# net_config network to be used by the PXE
pxe_network: public
@@ -55,7 +102,7 @@ xci:
ip: 192.168.122.1
extra_addresses:
- opnfv_host: 192.168.122.2
+ opnfv: 192.168.122.2
# network mapping
network_mapping:
diff --git a/xci/xci-deploy.sh b/xci/xci-deploy.sh
index c0c1a8ef..07f4d39d 100755
--- a/xci/xci-deploy.sh
+++ b/xci/xci-deploy.sh
@@ -29,6 +29,28 @@ submit_bug_report() {
echo "-------------------------------------------------------------------------"
}
+log_xci_information() {
+ local scenario_version scenario_sha
+
+ cd ${XCI_SCENARIOS_CACHE}/${DEPLOY_SCENARIO}
+ scenario_sha=$(git rev-parse HEAD)
+ scenario_version=$(git describe --exact 2>/dev/null || echo "master")
+ cd -
+ echo "Info: Starting XCI Deployment"
+ echo "Info: Deployment parameters"
+ echo "-------------------------------------------------------------------------"
+ echo "OPNFV scenario: $DEPLOY_SCENARIO"
+ echo "Scenario version: ${scenario_version} (sha: ${scenario_sha})"
+ echo "xci flavor: $XCI_FLAVOR"
+ echo "xci installer: $INSTALLER_TYPE"
+ echo "infra deployment: $INFRA_DEPLOYMENT"
+ echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
+ [[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+ [[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+ [[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ echo "-------------------------------------------------------------------------"
+}
+
exit_trap() {
submit_bug_report
collect_xci_logs
@@ -98,20 +120,10 @@ trap exit_trap ERR
# We are using sudo so we need to make sure that env_reset is not present
sudo sed -i "s/^Defaults.*env_reset/#&/" /etc/sudoers
-#-------------------------------------------------------------------------------
-# Log info to console
-#-------------------------------------------------------------------------------
-echo "Info: Starting XCI Deployment"
-echo "Info: Deployment parameters"
-echo "-------------------------------------------------------------------------"
-echo "OPNFV scenario: $DEPLOY_SCENARIO"
-echo "xci flavor: $XCI_FLAVOR"
-echo "xci installer: $INSTALLER_TYPE"
-echo "infra deployment: $INFRA_DEPLOYMENT"
-echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
-[[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
-[[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
-[[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+#
+# Bootstrap environment for XCI Deployment
+#
+echo "Info: Preparing host environment for the XCI deployment"
echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -150,11 +162,15 @@ echo "-------------------------------------------------------------------------"
ansible_lint
echo "-------------------------------------------------------------------------"
-#-------------------------------------------------------------------------------
# Get scenario variables overrides
#-------------------------------------------------------------------------------
-source $(find $XCI_PATH/xci/scenarios/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null || \
- source $(find $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null || :
+source $(find $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null &&
+ echo "Sourced ${DEPLOY_SCENARIO} overrides files successfully!" || :
+
+#-------------------------------------------------------------------------------
+# Log info to console
+#-------------------------------------------------------------------------------
+log_xci_information
# Deploy infrastructure based on the selected deloyment method
echo "Info: Deploying hardware using '${INFRA_DEPLOYMENT}'"