summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--INFO.yaml47
-rw-r--r--bifrost/playbooks/opnfv-virtual.yaml99
-rwxr-xr-xbifrost/scripts/bifrost-provision.sh138
-rw-r--r--ci/README.md7
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/images/arch-layout-k8s-ha.pngbin0 -> 56989 bytes
-rw-r--r--docs/images/arch-layout-k8s-noha.pngbin0 -> 268126 bytes
-rw-r--r--docs/requirements.txt3
-rw-r--r--docs/specs/infra_manager.rst130
-rw-r--r--docs/specs/k8-calico-onap.rst141
-rw-r--r--docs/specs/k8-odl-coe.rst105
-rw-r--r--docs/xci-criterias-cls.rst74
-rw-r--r--docs/xci-overview.rst2
-rw-r--r--docs/xci-user-guide.rst147
-rw-r--r--tox.ini25
-rw-r--r--xci/README.rst31
-rwxr-xr-xxci/config/aio-vars5
-rwxr-xr-xxci/config/env-vars51
-rwxr-xr-xxci/config/ha-vars7
-rwxr-xr-xxci/config/mini-vars7
-rwxr-xr-xxci/config/noha-vars7
-rwxr-xr-xxci/config/pinned-versions32
-rwxr-xr-xxci/config/user-vars40
-rw-r--r--xci/files/install-ansible.sh161
-rw-r--r--xci/files/requirements.yml10
-rwxr-xr-xxci/files/xci-destroy-env.sh (renamed from bifrost/scripts/destroy-env.sh)45
-rw-r--r--xci/files/xci-lib.sh298
-rw-r--r--xci/infra/bifrost/README.md (renamed from bifrost/README.md)0
-rw-r--r--xci/infra/bifrost/infra-provision.sh86
-rw-r--r--xci/infra/bifrost/playbooks/opnfv-virtual.yml167
-rw-r--r--xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml34
-rw-r--r--xci/infra/bifrost/playbooks/wait-for-baremetal.yml17
-rw-r--r--xci/infra/bifrost/playbooks/xci-prepare-env.yml118
-rw-r--r--xci/infra/bifrost/playbooks/xci-setup-nodes.yml76
-rwxr-xr-xxci/infra/bifrost/scripts/bifrost-env.sh43
-rw-r--r--xci/infra/bifrost/vars/debian.yml19
-rw-r--r--xci/infra/bifrost/vars/redhat.yml19
-rw-r--r--xci/infra/bifrost/vars/suse.yml19
-rw-r--r--xci/installer/kubespray/README64
-rwxr-xr-xxci/installer/kubespray/deploy.sh157
-rw-r--r--xci/installer/kubespray/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/kubespray/playbooks/configure-installer.yml50
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/kubespray/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/kubespray/playbooks/group_vars/all54
-rw-r--r--xci/installer/kubespray/playbooks/post-deployment.yml42
-rwxr-xr-xxci/installer/osa/deploy.sh45
-rw-r--r--xci/installer/osa/files/aio/flavor-vars.yml3
-rw-r--r--xci/installer/osa/files/aio/inventory2
-rw-r--r--xci/installer/osa/files/ansible-role-requirements.yml152
-rw-r--r--xci/installer/osa/files/global-requirement-pins.txt13
-rw-r--r--xci/installer/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/installer/osa/files/ha/inventory11
-rw-r--r--xci/installer/osa/files/ha/openstack_user_config.yml60
-rw-r--r--xci/installer/osa/files/ha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/mini/flavor-vars.yml21
-rw-r--r--xci/installer/osa/files/mini/inventory8
-rw-r--r--xci/installer/osa/files/mini/user_variables.yml7
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory9
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml7
-rw-r--r--xci/installer/osa/files/openstack_services.yml158
-rw-r--r--xci/installer/osa/files/setup-openstack.yml4
-rw-r--r--xci/installer/osa/files/user_variables_proxy.yml22
-rw-r--r--xci/installer/osa/files/user_variables_xci.yml (renamed from xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml)11
-rw-r--r--xci/installer/osa/playbooks/bootstrap-scenarios.yml23
-rw-r--r--xci/installer/osa/playbooks/configure-localhost.yml75
-rw-r--r--xci/installer/osa/playbooks/configure-opnfvhost.yml280
-rw-r--r--xci/installer/osa/playbooks/configure-targethosts.yml63
-rw-r--r--xci/installer/osa/playbooks/post-deployment.yml66
-rw-r--r--xci/installer/osh/README50
-rwxr-xr-xxci/installer/osh/deploy.sh170
-rw-r--r--xci/installer/osh/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/osh/playbooks/configure-installer.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-kubenet.yml51
-rw-r--r--xci/installer/osh/playbooks/configure-opnfvhost.yml101
-rw-r--r--xci/installer/osh/playbooks/configure-targethosts.yml40
-rw-r--r--xci/installer/osh/playbooks/group_vars/all.yml55
-rw-r--r--xci/installer/osh/playbooks/install-openstack-helm.yml24
-rw-r--r--xci/installer/osh/playbooks/post-deployment.yml42
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml109
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml18
-rw-r--r--xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml12
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service11
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml130
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml31
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml33
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j24
-rw-r--r--xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml7
-rw-r--r--xci/opnfv-scenario-requirements.yml172
-rw-r--r--xci/playbooks/configure-localhost.yml116
-rwxr-xr-xxci/playbooks/dynamic_inventory.py240
-rw-r--r--xci/playbooks/get-opnfv-scenario-requirements.yml165
-rw-r--r--xci/playbooks/manage-ssh-keys.yml56
-rw-r--r--xci/playbooks/prepare-tests.yml8
-rw-r--r--xci/playbooks/provision-vm-nodes.yml42
-rw-r--r--xci/playbooks/roles/.gitignore8
-rw-r--r--xci/playbooks/roles/bootstrap-host/defaults/main.yml11
-rwxr-xr-xxci/playbooks/roles/bootstrap-host/files/network-config-suse (renamed from xci/playbooks/roles/configure-network/files/network-config-suse)0
-rw-r--r--xci/playbooks/roles/bootstrap-host/handlers/main.yml12
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/main.yml15
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network.yml64
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml98
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml32
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml93
-rw-r--r--xci/playbooks/roles/bootstrap-host/tasks/time.yml (renamed from xci/playbooks/roles/synchronize-time/tasks/main.yml)9
l---------xci/playbooks/roles/bootstrap-host/templates/kubespray1
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j239
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j226
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 (renamed from xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2)7
-rw-r--r--xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j21
l---------xci/playbooks/roles/bootstrap-host/templates/osh1
-rw-r--r--xci/playbooks/roles/bootstrap-host/vars/main.yml70
-rw-r--r--xci/playbooks/roles/clone-repository/tasks/main.yml4
-rw-r--r--xci/playbooks/roles/configure-network/tasks/main.yml103
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j275
l---------xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j21
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j266
l---------xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j21
l---------xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j21
-rw-r--r--xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j266
-rw-r--r--xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j29
-rw-r--r--xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j210
-rw-r--r--xci/playbooks/roles/configure-network/templates/suse/suse.routes.j21
-rw-r--r--xci/playbooks/roles/configure-nfs/tasks/main.yml2
-rw-r--r--xci/playbooks/roles/create-nodes/README.md160
-rw-r--r--xci/playbooks/roles/create-nodes/defaults/main.yml31
-rw-r--r--xci/playbooks/roles/create-nodes/files/virtualbmc.conf3
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml91
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/create_vm.yml198
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml32
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/main.yml54
-rw-r--r--xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml139
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-admin.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j211
-rw-r--r--xci/playbooks/roles/create-nodes/templates/net.xml.j214
-rw-r--r--xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j27
-rw-r--r--xci/playbooks/roles/create-nodes/templates/vm.xml.j269
-rw-r--r--xci/playbooks/roles/create-nodes/vars/debian.yml13
-rw-r--r--xci/playbooks/roles/create-nodes/vars/redhat.yml17
-rw-r--r--xci/playbooks/roles/create-nodes/vars/suse.yml15
-rw-r--r--xci/playbooks/roles/prepare-functest/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-functest/tasks/main.yml32
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/env.j24
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j212
-rw-r--r--xci/playbooks/roles/prepare-tests/defaults/main.yml14
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/main.yml56
-rw-r--r--xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml19
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/env.j215
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j246
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j252
-rw-r--r--xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j247
-rw-r--r--xci/playbooks/roles/prepare-tests/vars/main.yml17
-rw-r--r--xci/scenarios/README.rst1
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/.gitkeep0
-rw-r--r--xci/scenarios/os-nosdn-nofeature/README.rst2
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml255
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml170
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml172
-rw-r--r--xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml35
-rw-r--r--xci/scenarios/os-odl-nofeature/.gitkeep0
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml256
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml171
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml173
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/user_variables_os-odl-nofeature.yml39
-rw-r--r--xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml26
-rw-r--r--xci/scenarios/os-odl-nofeature/xci_overrides7
-rwxr-xr-xxci/scripts/update-osa-version-files.sh23
-rwxr-xr-xxci/scripts/vm/start-new-vm.sh128
-rw-r--r--xci/var/Debian.yml5
-rw-r--r--xci/var/RedHat.yml5
-rw-r--r--xci/var/Suse.yml5
-rw-r--r--xci/var/ericsson-pod2-idf.yml187
-rw-r--r--xci/var/ericsson-pod2-pdf.yml269
-rw-r--r--xci/var/idf.yml164
-rw-r--r--xci/var/lf-pod4-idf.yml222
-rw-r--r--xci/var/lf-pod4-pdf.yml198
-rw-r--r--xci/var/opnfv.yml88
-rw-r--r--xci/var/opnfv_vm_idf.yml (renamed from xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml)21
-rw-r--r--xci/var/opnfv_vm_pdf.yml53
-rw-r--r--xci/var/pdf.yml168
-rwxr-xr-xxci/xci-deploy.sh158
186 files changed, 7680 insertions, 3119 deletions
diff --git a/.gitignore b/.gitignore
index af9d0080..925736c1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
*,~
.*.sw?
-/docs_build/
+docs_build/*
/docs_output/
/releng/
.idea
@@ -33,7 +33,7 @@ coverage.xml
nosetests.xml
testapi_venv/
.cache
-.tox
+.tox/
*.retry
job_output/
# Clear VM files
@@ -42,7 +42,7 @@ job_output/
build.log
*.d/
_static/
-conf.py
*.html
html/
xci/logs/
+docs/_build/*
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 00000000..43c73870
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,47 @@
+---
+subproject: 'Cross Community CI (XCI)'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+subproject_lead: &opnfv_releng_ptl
+ name: 'Fatih Degirmenci'
+ email: 'fatih.degirmenci@ericsson.com'
+ id: 'fdegir'
+ company: 'Ericsson'
+ timezone: 'Europe/Stockholm'
+primary_contact: *opnfv_releng_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/RELENG'
+ key: 'RELENG'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[xci]'
+realtime_discussion:
+ type: 'irc'
+ server: 'freenode.net'
+ channel: '#opnfv-pharos'
+meetings:
+ - type: 'irc'
+ agenda: 'https://etherpad.opnfv.org/p/xci-meetings'
+ channel: '#opnfv-pharos'
+ repeats: 'weekly'
+ day: 'Wednesday'
+ time: '14:00 UTC'
+committers:
+ - <<: *opnfv_releng_ptl
+ - name: 'Markos Chandras'
+ company: 'SUSE'
+ email: 'mchandras@suse.de'
+ id: 'mchandras'
+ timezone: 'Europe/London'
+ - name: 'Manuel Buil'
+ company: 'SUSE'
+ email: 'mbuil@suse.com'
+ id: 'mbuil'
+ timezone: 'Europe/Madrid'
+ - name: 'Panagiotis Karalis'
+ company: 'Intracom Telecom'
+ email: 'panos.pkaralis@gmail.com'
+ id: 'pkaralis'
+ timezone: 'Europe/Athens'
diff --git a/bifrost/playbooks/opnfv-virtual.yaml b/bifrost/playbooks/opnfv-virtual.yaml
deleted file mode 100644
index 4e985db8..00000000
--- a/bifrost/playbooks/opnfv-virtual.yaml
+++ /dev/null
@@ -1,99 +0,0 @@
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 RedHat and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-- hosts: localhost
- connection: local
- name: "Setting pre-test conditions"
- become: yes
- ignore_errors: yes
- tasks:
- - name: Remove pre-existing leases file
- file: path=/var/lib/misc/dnsmasq.leases state=absent
-- hosts: localhost
- connection: local
- name: "Executes install, enrollment, and testing in one playbook"
- become: no
- gather_facts: yes
- pre_tasks:
- - name: "Override the ipv4_gateway setting"
- set_fact:
- ipv4_gateway: "192.168.122.1"
- roles:
- - { role: bifrost-prep-for-install, when: skip_install is not defined }
- environment:
- http_proxy: "{{ lookup('env','http_proxy') }}"
- https_proxy: "{{ lookup('env','https_proxy') }}"
-- hosts: localhost
- connection: local
- name: "Executes install, enrollment, and testing in one playbook"
- become: yes
- gather_facts: yes
- roles:
- - role: bifrost-keystone-install
- - role: bifrost-ironic-install
- cleaning: false
- testing: true
- # NOTE(TheJulia): While the next step creates a ramdisk, some elements
- # do not support ramdisk-image-create as they invoke steps to cleanup
- # the ramdisk which causes ramdisk-image-create to believe it failed.
- - role: bifrost-create-dib-image
- dib_imagename: "{{ http_boot_folder }}/ipa"
- build_ramdisk: false
- dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
- dib_os_release: "jessie"
- dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
- dib_notmpfs: true
- when: create_ipa_image | bool == true
- - role: bifrost-create-dib-image
- dib_imagetype: "qcow2"
- dib_imagename: "{{deploy_image}}"
- dib_env_vars:
- DIB_PYTHON_VERSION: 2
- dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
- dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
- extra_dib_elements: "{{ lookup('env', 'EXTRA_DIB_ELEMENTS') | default('') }}"
- dib_elements: "vm enable-serial-console simple-init devuser openssh-server growroot pip-and-virtualenv {{ extra_dib_elements }}"
- dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
- dib_notmpfs: true
- when: create_image_via_dib | bool == true and transform_boot_image | bool == false
- - role: bifrost-keystone-client-config
- user: "{{ ansible_env.SUDO_USER }}"
- clouds:
- bifrost:
- config_username: "{{ ironic.keystone.default_username }}"
- config_password: "{{ ironic.keystone.default_password }}"
- config_project_name: "baremetal"
- config_region_name: "{{ keystone.bootstrap.region_name }}"
- config_auth_url: "{{ keystone.bootstrap.public_url }}"
- environment:
- http_proxy: "{{ lookup('env','http_proxy') }}"
- https_proxy: "{{ lookup('env','https_proxy') }}"
-- hosts: baremetal
- name: "Enroll node with Ironic"
- become: no
- connection: local
- roles:
- - role: ironic-enroll-dynamic
- - { role: ironic-inspect-node, when: inspect_nodes | default('false') | bool == true }
-- hosts: baremetal
- name: "Create configuration drive files and deploy machines"
- vars:
- multinode_testing: "{{ inventory_dhcp | bool == true }}"
- become: no
- connection: local
- roles:
- - role: bifrost-configdrives-dynamic
- - role: bifrost-deploy-nodes-dynamic
-- hosts: baremetal
- name: "Deploy machines."
- become: no
- connection: local
- serial: 1
- roles:
- - role: bifrost-prepare-for-test-dynamic
diff --git a/bifrost/scripts/bifrost-provision.sh b/bifrost/scripts/bifrost-provision.sh
deleted file mode 100755
index 2e887670..00000000
--- a/bifrost/scripts/bifrost-provision.sh
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -eu
-set -o pipefail
-
-export PYTHONUNBUFFERED=1
-SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
-BIFROST_HOME=$SCRIPT_HOME/..
-ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack}
-ENABLE_VENV="false"
-USE_DHCP="false"
-USE_VENV="true"
-BUILD_IMAGE=true
-PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-# This is normally exported by XCI env but we should initialize it here
-# in case we run this script on its own for debug purposes
-XCI_ANSIBLE_VERBOSITY=${XCI_ANSIBLE_VERBOSITY:-}
-
-# Ensure the right inventory files is used based on branch
-CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
- INVENTORY_FILE_FORMAT="baremetal_json_file"
-else
- BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'}
- INVENTORY_FILE_FORMAT="baremetal_csv_file"
-fi
-export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
-
-# Default settings for VMs
-export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
-export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"opnfv controller00 compute00"}
-export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
-export VM_CPU=${VM_CPU:-4}
-export VM_DISK=${VM_DISK:-100}
-export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
-export VM_DISK_CACHE=${VM_DISK_CACHE:-none}
-
-# Settings for bifrost
-TEST_PLAYBOOK="opnfv-virtual.yaml"
-USE_INSPECTOR=true
-USE_CIRROS=false
-TESTING_USER=root
-DOWNLOAD_IPA=true
-CREATE_IPA_IMAGE=false
-INSPECT_NODES=true
-INVENTORY_DHCP=false
-INVENTORY_DHCP_STATIC_IP=false
-WRITE_INTERFACES_FILE=true
-
-# Settings for console access
-export DIB_DEV_USER_PWDLESS_SUDO=yes
-export DIB_DEV_USER_PASSWORD=devuser
-
-# Settings for distro: xenial/ubuntu-minimal, 7/centos7, 42.2/suse
-export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
-export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
-
-# DIB OS packages
-export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"}
-
-# Additional dib elements
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
-
-if [ ${USE_VENV} = "true" ]; then
- export VENV=/opt/stack/bifrost
- $SCRIPT_HOME/env-setup.sh &>/dev/null
- # Note(cinerama): activate is not compatible with "set -u";
- # disable it just for this line.
- set +u
- source ${VENV}/bin/activate
- set -u
- ANSIBLE=${VENV}/bin/ansible-playbook
- ENABLE_VENV="true"
-else
- $SCRIPT_HOME/env-setup.sh &>/dev/null
- ANSIBLE=${HOME}/.local/bin/ansible-playbook
-fi
-
-# Change working directory
-cd $BIFROST_HOME/playbooks
-
-# NOTE(hwoarang): Disable selinux as we are hitting issues with it from time to
-# time. Remove this when Centos7 is a proper gate on bifrost so we know that
-# selinux works as expected.
-if [[ -e /etc/centos-release ]]; then
- echo "*************************************"
- echo "WARNING: Disabling selinux on CentOS7"
- echo "*************************************"
- sudo setenforce 0
-fi
-
-# Create the VMS
-${ANSIBLE} ${XCI_ANSIBLE_VERBOSITY} \
- -i inventory/localhost \
- test-bifrost-create-vm.yaml \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-passthrough' \
- -e test_vm_memory_size=${VM_MEMORY_SIZE} \
- -e enable_venv=${ENABLE_VENV} \
- -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
- -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE}
-
-# Execute the installation and VM startup test
-${ANSIBLE} ${XCI_ANSIBLE_VERBOSITY} \
- -i inventory/bifrost_inventory.py \
- ${TEST_PLAYBOOK} \
- -e use_cirros=${USE_CIRROS} \
- -e testing_user=${TESTING_USER} \
- -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
- -e test_vm_cpu='host-passthrough' \
- -e inventory_dhcp=${INVENTORY_DHCP} \
- -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
- -e enable_venv=${ENABLE_VENV} \
- -e enable_inspector=${USE_INSPECTOR} \
- -e inspect_nodes=${INSPECT_NODES} \
- -e download_ipa=${DOWNLOAD_IPA} \
- -e create_ipa_image=${CREATE_IPA_IMAGE} \
- -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
- -e ipv4_gateway=192.168.122.1 \
- -e wait_timeout=${PROVISION_WAIT_TIMEOUT} \
- -e enable_keystone=false
-EXITCODE=$?
-
-if [ $EXITCODE != 0 ]; then
- echo "************************************"
- echo "Provisioning failed. See logs folder"
- echo "************************************"
-fi
-
-exit $EXITCODE
diff --git a/ci/README.md b/ci/README.md
new file mode 100644
index 00000000..c0873dad
--- /dev/null
+++ b/ci/README.md
@@ -0,0 +1,7 @@
+The scripts located in this folder are used by OPNFV XCI/Jenkins
+and they are not supposed to be used by users and developers.
+
+The scripts are executed by Jenkins jobs directly in execute-shell
+or by simple wrappers so the most of the XCI specific logic is
+developed and maintained for XCI CI gets verified like the rest of
+XCI scripts.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..86ab8c57
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # flake8: noqa
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..305b679e
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: releng-xci
diff --git a/docs/images/arch-layout-k8s-ha.png b/docs/images/arch-layout-k8s-ha.png
new file mode 100644
index 00000000..e0870305
--- /dev/null
+++ b/docs/images/arch-layout-k8s-ha.png
Binary files differ
diff --git a/docs/images/arch-layout-k8s-noha.png b/docs/images/arch-layout-k8s-noha.png
new file mode 100644
index 00000000..0ee8bceb
--- /dev/null
+++ b/docs/images/arch-layout-k8s-noha.png
Binary files differ
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..f26b0414
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,3 @@
+lfdocs-conf
+sphinxcontrib-httpdomain
+sphinx-opnfv-theme
diff --git a/docs/specs/infra_manager.rst b/docs/specs/infra_manager.rst
new file mode 100644
index 00000000..a8ecb548
--- /dev/null
+++ b/docs/specs/infra_manager.rst
@@ -0,0 +1,130 @@
+PDF and IDF support in XCI
+###########################
+:date: 2018-04-30
+
+This spec introduces the work required to adapt XCI to use PDF and IDF which
+will be used for virtual and baremetal deployments
+
+Definition of Terms
+===================
+* Baremetal deployment: Deployment on physical servers as opposed to deploying
+software on virtual machines or containers running in the same physical server
+
+* Virtual deployment: Deployment on virtual machines, i.e. the servers where
+nodes will be deployed are virtualized. For example, in OpenStack, computes and
+controllers will be virtual machines. This deployment is normally done on just
+one physical server
+
+* PDF: It stands for POD Descriptor File, which is a document that lists the
+hardware characteristics of a set of physical or virtual machines which form
+the infrastructure. Example:
+
+https://git.opnfv.org/pharos/tree/config/pdf/pod1.yaml
+
+* IDF: It stands for Installer Descriptor File, which is a document that
+includes useful information for the installers to accomplish the baremetal
+deployment. Example:
+
+https://git.opnfv.org/fuel/tree/mcp/config/labs/local/idf-pod1.yaml
+
+Problem description
+===================
+
+Currently, XCI only supports virtualized deployments running in one server. This
+is good when the user has limited resources, however, baremetal is the preferred
+way to deploy NFV platforms in lab or production environments. Besides, this
+limits the scope of the testing greatly because we cannot test NFV hardware
+specific features such as SRIOV.
+
+Proposed change
+===============
+
+Introduce the infra_manager tool which will prepare the infrastructure for XCI
+to drive the deployment in a set of virtual or baremetal nodes. This tool will
+execute two tasks:
+
+1 - Creation of virtual nodes or initialization of the preparations for
+baremetal nodes
+2 - OS provisioning on nodes, both virtual or baremetal
+
+Once those steps are ready, XCI will continue with the deployment of the
+scenario on the provisioned nodes.
+
+The infra_manager tool will consume the PDF and IDF files describing the
+infrastructure as input. It will then use a <yet-to-be-created-tool> to do
+step 1 and bifrost to boot the Operating System in the nodes.
+
+Among other services Bifrost uses:
+- Disk image builder (dib) to generate the OS images
+- dnsmasq as the DHCP server which will provide the pxe boot mechanism
+- ipmitool to manage the servers
+
+Bifrost will be deployed inside a VM in the jumphost.
+
+For the time being, we will create the infrastructure based on the defined XCI
+flavors, however, the implementation should not hinder the possibility of
+having one pdf and idf per scenario, defining the characteristics and the
+number of nodes to be deployed.
+
+Code impact
+-----------
+
+The new code will be introduced in a new directory called infra_manager under
+releng-xci/xci/prototypes
+
+Tentative User guide
+--------------------
+
+Assuming the user cloned releng-xci in the jumphost, the following should be
+done:
+
+1 - Move the idf and pdf files which describe the infrastructure to
+releng-xci/xci/prototypes/infra_manager/var. There is an example under xci/var
+
+2 - Export the XCI_FLAVOR variable (e.g. export XCI_FLAVOR=noha)
+
+3 - Run the <yet-to-be-created-tool> to create the virtual nodes based on the
+provided PDF information (cpu, ram, disk...) or initialize the preparations for
+baremetal nodes
+
+4 - Start the bifrost process to boot the nodes
+
+5 - Run the VIM deployer script:
+releng-xci/xci/installer/$inst/deploy.sh
+
+where $inst = {osa, kubespray, kolla}
+
+In case of problems, the best way to debug is accessing the bifrost vm and use:
+
+* bifrost-utils
+* ipmitool
+* check the DHCP messages in /var/log/syslog
+
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ Manuel Buil (mbuil)
+ Jack Morgan (jmorgan1)
+ Somebody_else_please (niceperson)
+
+Work items
+----------
+
+1. Provide support for a dynamically generated inventory based on PDF and IDF.
+This mechanism could be used for both baremetal and virtual deployments.
+
+2. Contribute the servers-prepare.sh script
+
+3. Contribute the nodes-deploy.sh script
+
+4. Integrate the three previous components correctly
+
+5. Provide support for the XCI supported operating systems (opensuse, Ubuntu,
+centos)
+
+6. Allow pdf and idf per scenario
diff --git a/docs/specs/k8-calico-onap.rst b/docs/specs/k8-calico-onap.rst
new file mode 100644
index 00000000..445e5c71
--- /dev/null
+++ b/docs/specs/k8-calico-onap.rst
@@ -0,0 +1,141 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. Copyright 2018 Intel Corporation
+
+.. Links
+.. _Open Networking Automation Platform: https://www.onap.org/
+.. _ONAP metric analysis: https://onap.biterg.io/
+.. _ONAP on Kubernetes: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_quickstart_guide.html
+.. _Helm: https://docs.helm.sh/
+.. _ONAP on OpenStack: https://wiki.onap.org/display/DW/ONAP+Installation+in+Vanilla+OpenStack
+.. _OOM Minimum Hardware Configuration: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_cloud_setup_guide.html#minimum-hardware-configuration
+.. _OOM Software Requirements: http://onap.readthedocs.io/en/latest/submodules/oom.git/docs/oom_cloud_setup_guide.html#software-requirements
+.. _seed code: https://gitlab.com/Orange-OpenSource/onap_oom_automatic_installation
+.. _Orange ONAP OOM Deployment Resource Requirements: https://gitlab.com/Orange-OpenSource/kubespray_automatic_installation/blob/521fa87b20fdf4643f30fc28e5d70bdf9f1c98f3/vars/pdf.yaml
+
+This spec introduces the work required to include the XCI scenario
+for `Open Networking Automation Platform`_ (ONAP) through the ONAP
+Operations Manager(OOM) tool. This tool provides the ability to manage
+the entire life-cycle of an ONAP installation on top of a Kubernetes
+deployment.
+
+Problem description
+===================
+According to the `ONAP metric analysis`_, more than 26K commit
+changes have been submited since its announcement. Every patchset
+that is merged raises a Jenkins Job for the creation and deployment
+of a Docker container image for the corresponding service. Those new
+images are consumed by deployment methods like `ONAP on Kubernetes`_
+and `ONAP on OpenStack`_) during the installation of ONAP services.
+
+Given that ONAP is constantly changing, an early issue detected can
+be crucial for ensuring the proper operation of OOM tool.
+
+Minimum Hardware Requirements
+=============================
+
+Initially, No HA flavor will be the only supported flavor in order to
+bring a reference implementation of the scenario. Support for other
+flavors will be introduced based on this implementation.
+
+According to the `OOM Minimum Hardware Configuration`_, ONAP requires
+large amount of resources, especially on Kubernetes Worker nodes.
+
+Given that No HA flavor has multiple worker nodes, the containers can
+be distributed between the nodes resulting in a smaller footprint of
+of resources.
+
+The No HA scenario consists of 1 Kubernetes master node and 2 Kubernetes
+Worker nodes. Total resource requirements should be calculated based on
+the number of nodes.
+
+This recommendation is work in progress and based on Orange
+implementation which can be seen from
+`Orange ONAP OOM Deployment Resource Requirements`_.
+The resource requirements are subject to change and the scenario will
+be updated as necessary.
+
+Hardware for Kubernetes Master Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 8GB
+* HD: 150GB
+* vCores: 8
+
+Hardware for Kubernetes Worker Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 64GB
+* HD: 80GB
+* vCores: 16
+
+Proposed change
+===============
+
+In order to guarantee the proper installation and validation of ONAP
+services, this spec proposes two phases that complements each other:
+
+1. Creation k8-calico-onap scenario for the installation of ONAP
+services. This new scenario will be designed to validate the
+installation process provided by OOM tool.
+2. Adding Integration tests for ensuring that ONAP is operating
+properly. This process should cover Design and Runtime phases.
+
+Code impact
+-----------
+New code will be created based on the existing k8-calico-nofeature
+scenario and will be placed in scenarios/k8-calico-onap directory
+in releng-xci-scenario repo. The ONAP installation should proceed
+once the VIM has been installed and before the OPNFV tests run.
+
+
+The default configuration for the virtual resources (4 vCores, 8GB RAM,
+and 100GB HD) offered by XCI does not satisfy the ONAP needs. The
+scenario override mechanism will be used to bring up nodes with
+the necessary amount of resources. This will be replaced by PDF and
+IDF once they become available. PDF and IDF implementation is a
+separate work item and it is not expected as dependency for the
+implementation of this scenario.
+
+Software Requirements
+---------------------
+
+OOM has gone through significant changes during Beijing release
+cycle. This resulted in changed way of installing ONAP.
+
+In its current release, new software is necessary to install ONAP
+as listed below and on `OOM Software Requirements`_..
+
+Helm: 2.8.x
+kubectl: 1.8.10
+
+The OOM also provides a Makefile that collects instructions for the
+creation of ONAP packages into the Tiller repository. To determine
+which ONAP services are going to be enabled, this configuration can
+be done by the OOM configuration, this new role will be placed in
+scenarios/k8-calico-onap/role/k8-calico-onap/tasks folder in
+releng-xci-scenario repository.
+
+Tentative User guide
+--------------------
+TBD
+
+Implementation
+==============
+The Orange team has been working on this scenario for a while, this
+new role can use and adapt their `seed code`_ during the implementation.
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ Victor Morales (electrocucaracha)
+ Fatih Degirmenci (fdegir)
+ Jack Morgan (jmorgan1)
+
+Work items
+----------
+TBD
+
+Glossary
+--------
diff --git a/docs/specs/k8-odl-coe.rst b/docs/specs/k8-odl-coe.rst
new file mode 100644
index 00000000..cd29456c
--- /dev/null
+++ b/docs/specs/k8-odl-coe.rst
@@ -0,0 +1,105 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. Copyright 2018 Ericsson AB and Others
+
+.. Links
+.. _OpenDaylight COE: https://wiki.opendaylight.org/view/COE:Main
+.. _setting-up-coe-dev-environment: https://github.com/opendaylight/coe/blob/master/docs/setting-up-coe-dev-environment.rst
+.. _ansible-opendaylight: https://git.opendaylight.org/gerrit/gitweb?p=integration/packaging/ansible-opendaylight.git;a=tree
+
+This spec proposes adding an k8-odl-coe XCI scenario for OpenDaylight as the
+networking provider for Kubernetes using the OpenDaylight COE (Container
+Orchestration Engine) and NetVirt projects.
+
+Problem Description
+===================
+
+Currently OpenDaylight's advanced networking capabilities are not leveraged
+with Kubernetes in any scenarios. This spec proposes a reference platform for
+deployments that want to use OpenDaylight as a networking backend for
+Kubernetes.
+
+Minimum Hardware Requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Hardware for Kubernetes Master Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 16 GB (20 GB for ha flavor i.e. for OpenDaylight Clustering)
+* HD: 80 GB
+* vCores: 6
+
+Hardware for Kubernetes Worker Node(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* RAM: 12 GB
+* HD: 80 GB
+* vCores: 6
+
+Supported XCI Sandbox Flavors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This scenario will support deployments on Mini, No HA and HA XCI Sandbox Flavors.
+
+Proposed Change
+===============
+
+1. Provide Pod Descriptor Files (PDF) and IDF (Installer Descriptor Files)
+ specific to this scenario to install Kubernetes with OpenDaylight COE.
+2. Introduce a new scenario k8-odl-coe in releng-xci-scenarios repository.
+3. Reuse the role from k8-nosdn-nofeature scenario to install Kubernetes.
+ It has kube_network_plugin option to 'cloud' in k8s-cluster.yml so that
+ Kubespray doesn't configure networking between pods. This enables
+ OpenDaylight to be chosen as a networking backend in steps 4-7.
+4. Enhance upstream `ansible-opendaylight`_ role to deploy OpenDaylight with
+ COE Watcher on k8s master node(s) and CNI plugin on the k8s master and
+ worker node(s).
+5. Add the required Ansible tasks in k8-odl-coe role to direct XCI and
+ ansible-opendaylight role to configure k8s with OpenDaylight as the
+ networking backend for pod connectivity.
+6. Run the Health Check by testing the pods' connectivity.
+
+The COE Watcher binary and COE CNI plugin are built from OpenDaylight COE
+source code. The user will have flexibility to choose its SHA from XCI's
+ansible-role-requirements.yml file.
+
+Code Impact
+-----------
+
+Code specific to the k8-odl-coe scenario will be added to the xci/scenarios
+directory of the releng-xci-scenarios repository.
+
+User Guide
+----------
+
+No user guide will be provided.
+
+Implementation
+==============
+
+See the Proposed Change section.
+
+Assignee(s)
+-----------
+
+Primary assignees:
+
+* Prem Sankar G (premsa)
+* Periyasamy Palanisamy (epalper)
+* Fatih Degirmenci (fdegir)
+
+Work Items
+----------
+
+1. Enhance the akka.conf.j2 in upstream ansible-opendaylight role to work
+ with k8s deployments (i.e. run ODL cluster on k8s master nodes).
+ Currently this works only for the deployments based on Openstack-Ansible.
+2. Enhance upstream ansible-opendaylight role to install odl-netvirt-coe and
+ odl-restconf Karaf features, build COE watcher and CNI plugin binaries
+ from source.
+3. Implement configure-kubenet.yml to choose OpenDaylight COE as the
+ networking backend.
+4. Implement Health Check tests.
+
+Glossary
+--------
diff --git a/docs/xci-criterias-cls.rst b/docs/xci-criterias-cls.rst
new file mode 100644
index 00000000..0a0f8f97
--- /dev/null
+++ b/docs/xci-criterias-cls.rst
@@ -0,0 +1,74 @@
+.. _xci-criterias-cls:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Fatih Degirmenci (fatih.degirmenci@ericsson.com)
+
+=============================================
+XCI Promotion Criterias and Confidence Levels
+=============================================
+
+This document is structured in a way to explain the current Promotion Criterias and Confidence
+Levels XCI uses to test and promote the scenarios. This is followed by other chapters to
+start the conversation around how these criterias can be improved depending on the features
+and scenarios that are onboarded to XCI or declared interest in participating.
+
+The expectation is to update this document collaboratively with the feature projects, scenario
+owners, XCI team, test projects and release management to find right/sufficient/necessary
+level of testing that are relevant to the features and scenarios.
+
+This document should be seen as guidance for the projects taking part in XCI until
+the OPNFV CD-Based Release Model and the criterias set for the CI Loops for that track
+become available. Until this happens, CI Loops will be constructed/updated by taking input
+from this document to provide feedback to the projects based on the test scope set by the
+projects themselves.
+
+The CD-Based Release Model will supersede the information and criterias set in this document.
+
+Existing CI Loops and Promotion Criterias
+=========================================
+
+XCI determined various CI Loops that run for the scenarios that take part in XCI.
+These loops are
+
+* verify
+* post-merge
+
+Currently, XCI uses verify and post-merge loops to verify the changes and promote
+the scenarios to the next loop in the CI Flow as candidates. The details of what
+is done by each loop currently are listed below.
+
+verify
+------
+
+The changes and subsequent patches enter this pipeline and get verified against
+the most basic criteria OPNFV has.
+
+* virtual noha deployment
+* functest healthcheck
+
+The checks done within this loop is common for all the scenarios and features no matter if
+they are OpenStack or Kubernetes scenarios.
+
+The changes that get Verified+1 from this pipeline is deemed to be good and
+can be merged to master if there is sufficient +2 votes from the XCI and/or project committers.
+
+post-merge
+----------
+
+The changes that are merged to master enter this pipeline and get verified
+against the same criteria as the verify pipeline.
+
+* virtual noha deployment
+* functest healthcheck
+
+The checks done within this loop is common for all the scenarios no matter if
+they are OpenStack or Kubernetes scenarios.
+
+The changes that are successfully verified get promoted for the next loop in
+the pipeline.
+
+Evolving CI Loops and Promotion Criterias
+=========================================
+
+TBD
diff --git a/docs/xci-overview.rst b/docs/xci-overview.rst
index 575eb37c..9b225ec1 100644
--- a/docs/xci-overview.rst
+++ b/docs/xci-overview.rst
@@ -138,7 +138,7 @@ Multi-distro Support
--------------------
Giving choice and not imposing things on developers and users are two
-of the important aspects of XCI. This means that if they want to have all in one
+of the important aspects of XCI. This means that if they want to have smaller
deployments, they should be able to do that by using
:ref:`different flavors <sandbox-flavors>` provided by XCI.
diff --git a/docs/xci-user-guide.rst b/docs/xci-user-guide.rst
index 7a411257..5e76ca16 100644
--- a/docs/xci-user-guide.rst
+++ b/docs/xci-user-guide.rst
@@ -41,6 +41,7 @@ The sandbox provides
* multiple OPNFV scenarios to install
* ability to select different versions of upstream components to base the work on
* ability to enable additional OpenStack services or disable others
+* ability to install kubernetes with different network plugins
One last point to highlight here is that the XCI itself uses the sandbox for
development and test purposes so it is continuously tested to ensure it works
@@ -50,10 +51,11 @@ purposes.
Components of the Sandbox
===================================
-The sandbox uses OpenStack projects for VM node creation, provisioning
-and OpenStack installation. XCI Team provides playbooks, roles, and scripts
-to ensure the components utilized by the sandbox work in a way that serves
-the users in the best possible way.
+The sandbox uses OpenStack tools for VM node creation and provisioning.
+OpenStack and Kubernetes installations are done using the tools from corresponding
+upstream projects with no changes to them. XCI Team provides playbooks,
+roles, and scripts to ensure the components utilized by the sandbox
+work in a way that serves the users in the best possible way.
* **openstack/bifrost:** Bifrost (pronounced bye-frost) is a set of Ansible
playbooks that automates the task of deploying a base image onto a set
@@ -70,6 +72,13 @@ the users in the best possible way.
More information about this project can be seen on
`OpenStack Ansible documentation <https://docs.openstack.org/developer/openstack-ansible/>`_.
+* **kubernetes-incubator/kubespray:** Kubespray is a composition of Ansible playbooks,
+ inventory, provisioning tools, and domain knowledge for generic Kubernetes
+ clusters configuration management tasks. The aim of kubespray is deploying a
+ production ready Kubernetes cluster.
+ More information about this project can be seen on
+ `Kubespray documentation <https://kubernetes.io/docs/getting-started-guides/kubespray/>`_.
+
* **opnfv/releng-xci:** OPNFV Releng Project provides additional scripts, Ansible
playbooks and configuration options in order for developers to have an easy
way of using openstack/bifrost and openstack/openstack-ansible by just
@@ -85,29 +94,24 @@ deployed using VM nodes.
Available flavors are listed on the table below.
-+------------------+------------------------+---------------------+-------------------------+
-| Flavor | Number of VM Nodes | VM Specs Per Node | Time Estimates |
-+==================+========================+=====================+=========================+
-| All in One (aio) | | 1 VM Node | | vCPUs: 8 | | Provisioning: 10 mins |
-| | | controller & compute | | RAM: 12GB | | Deployment: 90 mins |
-| | | on single/same node | | Disk: 80GB | | Total: 100 mins |
-| | | 1 compute node | | NICs: 1 | | |
-+------------------+------------------------+---------------------+-------------------------+
-| Mini | | 3 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins |
-| | | 1 deployment node | | RAM: 12GB | | Deployment: 65 mins |
-| | | 1 controller node | | Disk: 80GB | | Total: 77 mins |
-| | | 1 compute node | | NICs: 1 | | |
-+------------------+------------------------+---------------------+-------------------------+
-| No HA | | 4 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins |
-| | | 1 deployment node | | RAM: 12GB | | Deployment: 70 mins |
-| | | 1 controller node | | Disk: 80GB | | Total: 82 mins |
-| | | 2 compute nodes | | NICs: 1 | | |
-+------------------+------------------------+---------------------+-------------------------+
-| HA | | 6 VM Nodes | | vCPUs: 6 | | Provisioning: 15 mins |
-| | | 1 deployment node | | RAM: 12GB | | Deployment: 105 mins |
-| | | 3 controller nodes | | Disk: 80GB | | Total: 120 mins |
-| | | 2 compute nodes | | NICs: 1 | | |
-+------------------+------------------------+---------------------+-------------------------+
++------------------+------------------------+---------------------+--------------------------+--------------------------+
+| Flavor | Number of VM Nodes | VM Specs Per Node | Time Estimates Openstack | Time Estimates Kubernetes|
++==================+========================+=====================+==========================+==========================+
+| Mini | | 3 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins | | Provisioning: 12 mins |
+| | | 1 deployment node | | RAM: 12GB | | Deployment: 65 mins | | Deployment: 35 mins |
+| | | 1 controller node | | Disk: 80GB | | Total: 77 mins | | Total: 47 mins |
+| | | 1 compute node | | NICs: 1 | | | | |
++------------------+------------------------+---------------------+--------------------------+--------------------------+
+| No HA | | 4 VM Nodes | | vCPUs: 6 | | Provisioning: 12 mins | | Provisioning: 12 mins |
+| | | 1 deployment node | | RAM: 12GB | | Deployment: 70 mins | | Deployment: 35 mins |
+| | | 1 controller node | | Disk: 80GB | | Total: 82 mins | | Total: 47 mins |
+| | | 2 compute nodes | | NICs: 1 | | | | |
++------------------+------------------------+---------------------+--------------------------+--------------------------+
+| HA | | 6 VM Nodes | | vCPUs: 6 | | Provisioning: 15 mins | | Provisioning: 15 mins |
+| | | 1 deployment node | | RAM: 12GB | | Deployment: 105 mins | | Deployment: 40 mins |
+| | | 3 controller nodes | | Disk: 80GB | | Total: 120 mins | | Total: 55 mins |
+| | | 2 compute nodes | | NICs: 1 | | | | |
++------------------+------------------------+---------------------+--------------------------+--------------------------+
The specs for VMs are configurable and the more vCPU/RAM the better.
@@ -122,8 +126,8 @@ depending on
* installed/activated OpenStack services
* internet connection bandwidth
-Flavor Layouts
---------------
+Flavor Layouts - OpenStack Based Deployments
+--------------------------------------------
All flavors are created and deployed based on the upstream OpenStack Ansible (OSA)
guidelines.
@@ -141,14 +145,6 @@ ongoing.
The differences between the flavors are documented below.
-**All in One**
-
-As shown on the table in the previous section, this flavor consists of a single
-node. All the OpenStack services, including compute run on the same node.
-
-The flavor All in One (aio) is deployed based on the process described in the
-upstream documentation. Please check `OpenStack Ansible Developer Quick Start <https://docs.openstack.org/openstack-ansible/pike/contributor/quickstart-aio.html>`_ for details.
-
**Mini/No HA/HA**
These flavors consist of multiple nodes.
@@ -165,6 +161,38 @@ flavors.
.. image:: images/arch-layout-test.png
:scale: 75 %
+Flavor Layouts - Kubernetes Based Deployments
+---------------------------------------------
+
+All flavors are created and deployed based on the upstream kubespray guidelines.
+
+For network plugins, calico is used. flannel, weaver, contive, canal and cilium
+are supported currently
+
+The differences between the flavors are documented below.
+
+**Mini/No HA/HA**
+
+These flavors consist of multiple nodes.
+
+* **opnfv**: This node is used for driving the installation towards target nodes
+ in order to ensure the deployment process is isolated from the physical host
+ and always done on a clean machine.
+* **master**: provide the kubernetes cluster’s control plane.
+* **node**: a worker machine in Kubernetes, previously known as a minion.
+
+HA flavor has 3 master nodes and a load balancer is set up as part of the deployment process.
+The access to the Kubernetes cluster is done through the load balancer.
+
+Please see the diagrams below for the host and service layout for these
+flavors.
+
+.. image:: images/arch-layout-k8s-noha.png
+ :scale: 75 %
+
+.. image:: images/arch-layout-k8s-ha.png
+ :scale: 75 %
+
User Guide
==========
@@ -200,12 +228,17 @@ How to Use
| ``cd releng-xci/xci``
-4. Execute the sandbox script
+4. If you want to deploy Kubernetes based scenario, set the variables as below. Otherwise skip.
+
+ | ``export INSTALLER_TYPE=kubespray``
+ | ``export DEPLOY_SCENARIO=k8-nosdn-nofeature``
+
+5. Execute the sandbox script
| ``./xci-deploy.sh``
Issuing above command will start the sandbox deployment using the default
-flavor ``aio`` and the verified versions of upstream components.
+flavor ``mini`` and the verified versions of upstream components.
(`pinned-versions <https://git.opnfv.org/releng-xci/tree/xci/config/pinned-versions>`_).
The sandbox should be ready between 1,5 and 2 hours depending on the host
machine.
@@ -241,8 +274,14 @@ default.
5. Set the version to use for openstack-ansible
+ 1) if deploying OpenStack based scenario
+
| ``export OPENSTACK_OSA_VERSION=master``
+ 2) if deploying Kubernetes based scenario
+
+ | ``export KUBESPRAY_VERSION=master``
+
6. Set where the logs should be stored
| ``export LOG_PATH=/home/jenkins/xcilogs``
@@ -256,7 +295,7 @@ behaviors, especially if it is changed to ``master``. If you are not
sure about how good the version you intend to use is, it is advisable to
use the pinned versions instead.
-**Verifying the Basic Operation**
+**Verifying the Openstack Basic Operation**
You can verify the basic operation using the commands below.
@@ -276,6 +315,23 @@ You can also access the Horizon UI by using the URL, username, and
the password displayed on your console upon the completion of the
deployment.
+**Verifying the Kubernetes Basic Operation**
+
+You can verify the basic operation using the commands below.
+
+1. Login to opnfv host
+
+ | ``ssh root@192.168.122.2``
+
+2. Issue kubectl commands
+
+ | ``kubectl get nodes``
+
+You can also access the Kubernetes Dashboard UI by using the URL,
+username, and the password displayed on your console upon the
+completion of the deployment.
+
+
**Debugging Tips**
If ``xci-deploy.sh`` fails midway through and you happen to fix whatever
@@ -295,11 +351,12 @@ Here are steps that take place upon the execution of the sandbox script
2. Installs ansible on the host where sandbox script is executed.
3. Creates and provisions VM nodes based on the flavor chosen by the user.
4. Configures the host where the sandbox script is executed.
-5. Configures the deployment host which the OpenStack installation will
- be driven from.
-6. Configures the target hosts where OpenStack will be installed.
-7. Configures the target hosts as controller(s) and compute(s) nodes.
-8. Starts the OpenStack installation.
+5. Configures the deployment host which the OpenStack/Kubernetes
+ installation will be driven from.
+6. Configures the target hosts where OpenStack/Kubernetes will be installed.
+7. Configures the target hosts as controller(s)/compute(s) or master(s)/worker(s)
+ depending on the deployed scenario.
+8. Starts the OpenStack/Kubernetes installation.
.. image:: images/xci-basic-flow.png
:height: 640px
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..6aa16066
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,25 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = docs,docs-linkcheck
+skipsdist = True
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+
+[testenv:docs]
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/linkcheck
diff --git a/xci/README.rst b/xci/README.rst
index d7555d46..a18d92ee 100644
--- a/xci/README.rst
+++ b/xci/README.rst
@@ -160,6 +160,37 @@ execute sandbox script
./xci-deploy.sh
+Baremetal Usage
+--------------
+
+The previous deployments are based on VMs, i.e. controllers and computes are
+VMs. It is also possible to deploy on baremetal and for that a pdf and idf file
+which describes the hardware needs to be provided to the sandbox script:
+
+clone OPNFV releng-xci repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+change into directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+set the sandbox flavor
+
+ export XCI_FLAVOR=noha
+
+set the version to use for openstack-ansible
+
+ export OPENSTACK_OSA_VERSION=master
+
+set where the logs should be stored
+
+ export LOG_PATH=/home/jenkins/xcilogs
+
+execute sandbox script
+
+ ./xci-deploy.sh -i var/ericsson-pod2-idf.yml -p var/ericsson-pod2-pdf.yml
+
==============
User Variables
==============
diff --git a/xci/config/aio-vars b/xci/config/aio-vars
index e5a1aee9..cff181a9 100755
--- a/xci/config/aio-vars
+++ b/xci/config/aio-vars
@@ -9,10 +9,9 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=1
-export TEST_VM_NODE_NAMES=opnfv
+export NUM_NODES=1
+export NODE_NAMES=opnfv
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-8}
export VM_DISK=${VM_DISK:-80}
export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
-export VM_DISK_CACHE=none
diff --git a/xci/config/env-vars b/xci/config/env-vars
index c7744ca1..a90e8533 100755
--- a/xci/config/env-vars
+++ b/xci/config/env-vars
@@ -2,22 +2,46 @@
# !!! Changing or overriding these will most likely break everything altogether !!!
# Please do not change these settings if you are not developing for XCI!
#-------------------------------------------------------------------------------
-export OPNFV_RELENG_GIT_URL=https://gerrit.opnfv.org/gerrit/releng-xci.git
-export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost
-export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible
-export OPENSTACK_OSA_OPENRC_GIT_URL=https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
+
+# Repositories
+export OPNFV_RELENG_GIT_URL=${OPNFV_RELENG_GIT_URL:-https://gerrit.opnfv.org/gerrit/releng-xci.git}
+export OPENSTACK_BIFROST_GIT_URL=${OPENSTACK_BIFROST_GIT_URL:-https://git.openstack.org/openstack/bifrost}
+export OPENSTACK_OSA_GIT_URL=${OPENSTACK_OSA_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible}
+export OPENSTACK_OSA_OPENRC_GIT_URL=${OPENSTACK_OSA_OPENRC_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-openstack_openrc}
+export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-sigs/kubespray.git}
+export OSH_GIT_URL=${OSH_GIT_URL:-https://github.com/openstack/openstack-helm.git}
+export OSH_INFRA_GIT_URL=${OSH_INFRA_GIT_URL:-https://github.com/openstack/openstack-helm-infra.git}
+export OPENSTACK_OSA_HAPROXY_GIT_URL=${OPENSTACK_OSA_HAPROXY_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-haproxy_server}
+export KEEPALIVED_GIT_URL=${KEEPALIVED_GIT_URL:-https://github.com/evrardjp/ansible-keepalived}
+
+export OSH_HELM_BINARY_URL=${OSH_HELM_BINARY_URL:-https://storage.googleapis.com/kubernetes-helm}
+export OSH_HELM_BINARY_VERSION=${OSH_HELM_BINARY_VERSION:-v2.13.1}
+
+
+# Configuration
export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
export OPNFV_HOST_IP=192.168.122.2
-export XCI_FLAVOR_ANSIBLE_FILE_PATH=$XCI_PATH/xci/installer/$XCI_INSTALLER/files/$XCI_FLAVOR
-export CI_LOOP=${CI_LOOP:-daily}
-export JOB_NAME=${JOB_NAME:-false}
+export XCI_FLAVOR_ANSIBLE_FILE_PATH=$XCI_PATH/xci/installer/$INSTALLER_TYPE/files/$XCI_FLAVOR
+
# XCI_CACHE is a cache on localhost where repositories and scenarios are cloned.
export XCI_CACHE=${XCI_PATH}/.cache
+
# OPNFV_XCI_CACHE is similar to XCI_CACHE but refers to the remote OPNFV host.
export OPNFV_XCI_CACHE="/root/releng-xci/.cache"
export XCI_SCENARIOS_CACHE="${XCI_CACHE}/repos/scenarios"
export XCI_PLAYBOOKS=${XCI_PATH}/xci/playbooks
+# Functest parameters
+export FUNCTEST_MODE=${FUNCTEST_MODE:-"tier"}
+export FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME:-"healthcheck"}
+# TODO: Investigate and fix why the env var FUNCTEST_VERSION set by Jenkins job doesn't take effect
+export FUNCTEST_VERSION=${FUNCTEST_VERSION:-"hunter"}
+
+# CI paremeters
+export CI_LOOP=${CI_LOOP:-"daily"}
+export BUILD_TAG=${BUILD_TAG:-"notag"}
+export NODE_NAME=${NODE_NAME:-$(hostname)}
+
#-------------------------------------------------------------------------------
# Paths where git repositories of XCI Components will be cloned on the OPNFV host
#-------------------------------------------------------------------------------
@@ -29,8 +53,15 @@ export LOG_PATH=${LOG_PATH:-${XCI_PATH}/xci/logs}
# This currently matches to OSA Ansible version but it doesn't really
# matter since bifrost and OSA will use the Ansible version they need.
# Overall, it's better to use what OSA supports so we can use new features.
-export XCI_ANSIBLE_PIP_VERSION=2.3.2.0
+# OSA currently has 2.5.5 which breaks due to missing
+# https://github.com/ansible/ansible/commit/67859c3476501d5d9839fd904aec55468d09593a
+# This was fixed in 2.5.6 so remove the pin when OSA updates to newer version.
+#export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")}
+export XCI_ANSIBLE_PIP_VERSION="2.7.8"
+
export ANSIBLE_HOST_KEY_CHECKING=False
-# subject of the certificate
-export XCI_SSL_SUBJECT=${XCI_SSL_SUBJECT:-"/C=US/ST=California/L=San Francisco/O=IT/CN=xci.releng.opnfv.org"}
export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature"}
+# attempt to sync Ansible version used by Kubespray with the rest
+export XCI_KUBE_ANSIBLE_PIP_VERSION=$XCI_ANSIBLE_PIP_VERSION
+# OpenStack global requirements version
+export OPENSTACK_REQUIREMENTS_VERSION=${OPENSTACK_REQUIREMENTS_VERSION:-$(awk '/requirements_git_install_branch:/ {print $2}' ${XCI_PATH}/xci/installer/osa/files/openstack_services.yml)}
diff --git a/xci/config/ha-vars b/xci/config/ha-vars
index 4c7cd872..4c40fb33 100755
--- a/xci/config/ha-vars
+++ b/xci/config/ha-vars
@@ -9,10 +9,11 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
+export NUM_NODES=6
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-16384}
-export VM_DISK_CACHE=none
diff --git a/xci/config/mini-vars b/xci/config/mini-vars
index 48b38ce8..aaa4cb88 100755
--- a/xci/config/mini-vars
+++ b/xci/config/mini-vars
@@ -9,10 +9,11 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="opnfv controller00 compute00"
+export NUM_NODES=3
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-12288}
-export VM_DISK_CACHE=none
diff --git a/xci/config/noha-vars b/xci/config/noha-vars
index cb8901b8..e887ddb8 100755
--- a/xci/config/noha-vars
+++ b/xci/config/noha-vars
@@ -9,10 +9,11 @@
#-------------------------------------------------------------------------------
# Configure VM Nodes
#-------------------------------------------------------------------------------
-export TEST_VM_NUM_NODES=4
-export TEST_VM_NODE_NAMES="opnfv controller00 compute00 compute01"
+export NUM_NODES=4
+[[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00 compute01"
+[[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1 node2"
+[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1 node2"
export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
export VM_CPU=${VM_CPU:-6}
export VM_DISK=${VM_DISK:-80}
export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-12288}
-export VM_DISK_CACHE=none
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 023c9999..440972ae 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -25,7 +25,31 @@
#-------------------------------------------------------------------------------
# use releng-xci from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# HEAD of bifrost "master" as of 11.12.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"8b4c956bf0ec6c1784e41af2a0598cb49c41461f"}
-# HEAD of osa "master" as of 11.12.2017
-export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"7b3aac28a0a87e5966527829f6b0abcbc2303cc7"}
+# HEAD of bifrost "master" as of 02.07.2019
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"cd559480c95867d272b8a32240e50c390646665b"}
+# HEAD of ironic "master" as of 02.07.2019
+export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"1beb8068f95f90a570c72b82f6e518110312b696"}
+# HEAD of ironic-client "master" as of 02.07.2019
+export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"eae60397bfcbed322b2121f77c35ac74d0c6b74c"}
+# HEAD of ironic-inspector "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"0b38536d1c9ab92952e6ecd069ea13facf012830"}
+# HEAD of ironic-inspector-client "master" as of 02.07.2019
+export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"81ae133bd570ea7359b4797ee5699d2d4233b445"}
+# HEAD of osa "stable/rocky" as of 04.01.2019
+export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"2087cd98f28b35f655ca398d25d2a6c71e38328e"}
+export OPENSTACK_OSH_VERSION="rocky"
+# HEAD of osh "master" as of 17.07.2019
+export OSH_VERSION=${OSH_VERSION:-"dadf9946e076df2b09556f4a18107dc487788cdd"}
+# HEAD of osh-infra "master" as of 16.07.2019
+export OSH_INFRA_VERSION=${OSH_INFRA_VERSION:-"e96bdd9fb6235573acf5d4d1d019dca1e1446b7d"}
+export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \
+ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
+ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
+export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
+ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
+ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
+# Kubespray release v2.11.0 dated 31.08.2019
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"v2.11.0"}
+# Kubernetes version supported by the pinned kubespray version
+# this is needed for pulling in kubectl
+export KUBERNETES_VERSION=${KUBERNETES_VERSION:-"v1.15.3"}
diff --git a/xci/config/user-vars b/xci/config/user-vars
index 64f2882b..d3d7b2f1 100755
--- a/xci/config/user-vars
+++ b/xci/config/user-vars
@@ -20,10 +20,36 @@
# or
# export XCI_FLAVOR="ha"
#-------------------------------------------------------------------------------
-export XCI_FLAVOR=${XCI_FLAVOR:-aio}
-export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
+export XCI_FLAVOR=${XCI_FLAVOR:-mini}
+export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; ID=${ID%%-*}; echo ${ID,,})}
export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
-export XCI_INSTALLER=${XCI_INSTALLER:-osa}
+
+#-------------------------------------------------------------------------------
+# Set INSTALLER
+#-------------------------------------------------------------------------------
+# Currently, there are two kinds of installers in XCI: osa and kubespray
+# Examples:
+# export INSTALLER_TYPE="osa"
+# or
+# export INSTALLER_TYPE="kubespray"
+export INSTALLER_TYPE=${INSTALLER_TYPE:-osa}
+
+#Wait upstream in openstack-helm (OSH) to support opensuse
+if [ "$XCI_DISTRO" == "opensuse" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export XCI_DISTRO=ubuntu-bionic
+ export OSH_DISTRO=opensuse
+elif [ "$XCI_DISTRO" == "ubuntu" ] && [ "$INSTALLER_TYPE" == "osh" ]; then
+ export OSH_DISTRO=ubuntu
+fi
+
+#-------------------------------------------------------------------------------
+# Set DEPLOYMENT
+#-------------------------------------------------------------------------------
+# Currently, there is a single infra deployment method: bifrost
+# Examples:
+# export INFRA_DEPLOYMENT="bifrost"
+export INFRA_DEPLOYMENT=${INFRA_DEPLOYMENT:-bifrost}
+
#-------------------------------------------------------------------------------
# Configure some other stuff
@@ -31,10 +57,10 @@ export XCI_INSTALLER=${XCI_INSTALLER:-osa}
# Set the verbosity for ansible
#
# Examples:
-# XCI_ANSIBLE_VERBOSITY="-v"
-# or
-# XCI_ANSIBLE_VERBOSITY="-vvvv"
-export XCI_ANSIBLE_VERBOSITY=${XCI_ANSIBLE_VERBOSITY:-""}
+# XCI_ANSIBLE_PARAMS="-v -e foo=bar"
+export XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-""}
export RUN_TEMPEST=${RUN_TEMPEST:-false}
+export CORE_OPENSTACK_INSTALL=${CORE_OPENSTACK_INSTALL:-false}
+export BIFROST_CREATE_IMAGE_VIA_DIB=${BIFROST_CREATE_IMAGE_VIA_DIB:-true}
# Set this to to true to force XCI to re-create the target OS images
export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
diff --git a/xci/files/install-ansible.sh b/xci/files/install-ansible.sh
deleted file mode 100644
index 979d9904..00000000
--- a/xci/files/install-ansible.sh
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/bin/bash
-# NOTE(hwoarang): Most parts of this this file were taken from the
-# bifrost repository (scripts/install-deps.sh). This script contains all
-# the necessary distro specific code to install ansible and it's dependencies.
-
-set -eu
-
-declare -A PKG_MAP
-
-# workaround: for latest bindep to work, it needs to use en_US local
-export LANG=c
-
-CHECK_CMD_PKGS=(
- gcc
- libffi
- libopenssl
- lsb-release
- make
- net-tools
- python-devel
- python
- venv
- wget
-)
-
-source /etc/os-release || source /usr/lib/os-release
-case ${ID,,} in
- *suse)
- OS_FAMILY="Suse"
- INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
- CHECK_CMD="zypper search --match-exact --installed"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-devel
- [libopenssl]=libopenssl-devel
- [lsb-release]=lsb-release
- [make]=make
- [net-tools]=net-tools
- [python]=python
- [python-devel]=python-devel
- [venv]=python-virtualenv
- [wget]=wget
- )
- EXTRA_PKG_DEPS=( python-xml )
- sudo zypper -n ref
- # NOTE (cinerama): we can't install python without removing this package
- # if it exists
- if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
- sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
- fi
- ;;
-
- ubuntu|debian)
- OS_FAMILY="Debian"
- export DEBIAN_FRONTEND=noninteractive
- INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
- CHECK_CMD="dpkg -l"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-dev
- [libopenssl]=libssl-dev
- [lsb-release]=lsb-release
- [make]=make
- [net-tools]=net-tools
- [python]=python-minimal
- [python-devel]=libpython-dev
- [venv]=python-virtualenv
- [wget]=wget
- )
- EXTRA_PKG_DEPS=()
- sudo apt-get update
- ;;
-
- rhel|fedora|centos)
- OS_FAMILY="RedHat"
- PKG_MANAGER=$(which dnf || which yum)
- INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
- CHECK_CMD="rpm -q"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-devel
- [libopenssl]=openssl-devel
- [lsb-release]=redhat-lsb
- [make]=make
- [net-tools]=net-tools
- [python]=python
- [python-devel]=python-devel
- [venv]=python-virtualenv
- [wget]=wget
- )
- sudo yum updateinfo
- EXTRA_PKG_DEPS=()
- ;;
-
- *) echo "ERROR: Supported package manager not found. Supported: apt, dnf, yum, zypper"; exit 1;;
-esac
-
-if ! $(python --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[python]}
-fi
-if ! $(gcc -v &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[gcc]}
-fi
-if ! $(wget --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[wget]}
-fi
-
-if ! $(python -m virtualenv --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[venv]}
-fi
-
-for pkg in ${CHECK_CMD_PKGS[@]}; do
- if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
- fi
-done
-
-if [ -n "${EXTRA_PKG_DEPS-}" ]; then
- for pkg in ${EXTRA_PKG_DEPS}; do
- if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
- ${INSTALLER_CMD} ${pkg}
- fi
- done
-fi
-
-# If we're using a venv, we need to work around sudo not
-# keeping the path even with -E.
-PYTHON=$(which python)
-
-# To install python packages, we need pip.
-#
-# We can't use the apt packaged version of pip since
-# older versions of pip are incompatible with
-# requests, one of our indirect dependencies (bug 1459947).
-#
-# Note(cinerama): We use pip to install an updated pip plus our
-# other python requirements. pip breakages can seriously impact us,
-# so we've chosen to install/upgrade pip here rather than in
-# requirements (which are synced automatically from the global ones)
-# so we can quickly and easily adjust version parameters.
-# See bug 1536627.
-#
-# Note(cinerama): If pip is linked to pip3, the rest of the install
-# won't work. Remove the alternatives. This is due to ansible's
-# python 2.x requirement.
-if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
- sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
-fi
-
-if ! which pip; then
- wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
- sudo -H -E ${PYTHON} /tmp/get-pip.py
-fi
-
-PIP=$(which pip)
-echo "Using pip: $(${PIP} --version)"
-sudo -H -E ${PIP} -q install --upgrade virtualenv
-sudo -H -E ${PIP} -q install --upgrade pip
-# upgrade setuptools, as latest version is needed to install some projects
-sudo -H -E ${PIP} -q install --upgrade setuptools
-${PIP} install -q --user --upgrade ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/xci/files/requirements.yml b/xci/files/requirements.yml
new file mode 100644
index 00000000..1e097b09
--- /dev/null
+++ b/xci/files/requirements.yml
@@ -0,0 +1,10 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Intel Corporation.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- src: ruzickap.proxy_settings
diff --git a/bifrost/scripts/destroy-env.sh b/xci/files/xci-destroy-env.sh
index 3ee66642..058d6569 100755
--- a/bifrost/scripts/destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -14,28 +14,39 @@ if [[ $(whoami) != "root" ]]; then
exit 1
fi
+flavors=(aio mini noha ha)
+
# Start fresh
rm -rf /opt/stack
# HOME is normally set by sudo -H
rm -rf ${HOME}/.config/openstack
+rm -rf ${HOME}/.ansible
+# keepalived role fails ansible lint when cached
+rm -rf ${HOME}/releng-xci/xci/playbooks/roles/keepalived
+# Wipe repos
+rm -rf ${XCI_CACHE}/repos
-# bifrost installs everything on venv so we need to look there if virtualbmc is not installed on the host.
-if which vbmc &>/dev/null || { [[ -e /opt/stack/bifrost/bin/activate ]] && source /opt/stack/bifrost/bin/activate; }; then
- # Delete all libvirt VMs and hosts from vbmc (look for a port number)
- for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
- virsh destroy $vm || true
- virsh undefine $vm || true
- vbmc delete $vm
- done
- which vbmc &>/dev/null || { [[ -e /opt/stack/bifrost/bin/activate ]] && deactivate; }
+if which ${XCI_VENV}/bin/vbmc &>/dev/null; then
+ # Delete all libvirt VMs and hosts from vbmc (look for a port number)
+ for vm in $(${XCI_VENV}/bin/vbmc list | awk '/[0-9]/{{ print $2 }}'); do
+ if which virsh &>/dev/null; then
+ virsh destroy $vm || true
+ virsh undefine $vm || true
+ fi
+ ${XCI_VENV}/bin/vbmc delete $vm
+ done
fi
-# Destroy all XCI VMs if the previous operation failed
-[[ -n ${XCI_FLAVOR} ]] && \
- for vm in ${TEST_VM_NODE_NAMES}; do
- virsh destroy $vm || true
- virsh undefine $vm || true
+# Destroy all XCI VMs on all flavors
+for varfile in ${flavors[@]}; do
+ source ${XCI_PATH}/xci/config/${varfile}-vars
+ for vm in ${NODE_NAMES}; do
+ if which virsh &>/dev/null; then
+ virsh destroy $vm &> /dev/null || true
+ virsh undefine $vm &> /dev/null || true
+ fi
done
+done
service ironic-conductor stop || true
@@ -71,3 +82,9 @@ service libvirtd restart
service ironic-api restart || true
service ironic-conductor start || true
service ironic-inspector restart || true
+
+rm -rf ${XCI_VENV}
+# We also need to clear up previous vbmc config dirs
+rm -rf ${HOME}/.vbmc
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/files/xci-lib.sh b/xci/files/xci-lib.sh
new file mode 100644
index 00000000..860153b9
--- /dev/null
+++ b/xci/files/xci-lib.sh
@@ -0,0 +1,298 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Avoid double sourcing the file
+[[ -n ${XCI_LIB_SOURCED:-} ]] && return 0 || export XCI_LIB_SOURCED=1
+
+function usage() {
+ echo "
+Usage: $(basename ${0}) [-i <idf>] [-p <pdf>]
+
+ -h: This message
+ -i: Installer Descriptor File (IDF). (Default ${XCI_PATH}/xci/var/idf.yml)
+ -p: Pod Descriptor File (PDF). (Default ${XCI_PATH}/xci/var/pdf.yml)
+ "
+ exit 0
+}
+
+function parse_cmdline_opts() {
+ IDF=${XCI_PATH}/xci/var/idf.yml
+ PDF=${XCI_PATH}/xci/var/pdf.yml
+
+ while getopts ":hi:p:" o; do
+ case "${o}" in
+ i) IDF="${OPTARG}" ;;
+ p) PDF="${OPTARG}" ;;
+ h) usage ;;
+ *) echo "ERROR: Invalid option '-${OPTARG}'"; usage ;;
+ esac
+ done
+
+ # Do all the exports
+ export PDF=$(realpath ${PDF})
+ export IDF=$(realpath ${IDF})
+}
+
+function bootstrap_xci_env() {
+ # Declare our virtualenv
+ export XCI_VENV=${XCI_PATH}/venv/
+ # source user vars
+ source $XCI_PATH/xci/config/user-vars
+ # source pinned versions
+ source $XCI_PATH/xci/config/pinned-versions
+ # source flavor configuration
+ source "$XCI_PATH/xci/config/${XCI_FLAVOR}-vars"
+ # source installer configuration
+ source "$XCI_PATH/xci/installer/${INSTALLER_TYPE}/env" &>/dev/null || true
+ # source xci configuration
+ source $XCI_PATH/xci/config/env-vars
+ # baremetal variable to true if the vendor in the pdf is not libvirt
+ grep -o vendor.* ${PDF} | grep -q libvirt && export BAREMETAL=false || export BAREMETAL=true
+}
+
+function install_ansible() {
+ set -eu
+
+ # Use the upper-constraints file from the pinned requirements repository.
+ local uc="https://raw.githubusercontent.com/openstack/requirements/${OPENSTACK_REQUIREMENTS_VERSION}/upper-constraints.txt"
+ local osa_uc="https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/global-requirement-pins.txt"
+ local install_map
+
+ declare -A PKG_MAP
+
+ # workaround: for latest bindep to work, it needs to use en_US local
+ export LANG="C"
+
+ CHECK_CMD_PKGS=(
+ gcc
+ libffi
+ libopenssl
+ lsb-release
+ make
+ net-tools
+ python-devel
+ python
+ python-pyyaml
+ venv
+ wget
+ curl
+ )
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse*)
+ OS_FAMILY="Suse"
+ INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
+ CHECK_CMD="zypper search --match-exact --installed"
+ PKG_MAP=(
+ [gcc]=gcc
+ [libffi]=libffi-devel
+ [libopenssl]=libopenssl-devel
+ [lsb-release]=lsb-release
+ [make]=make
+ [net-tools]=net-tools
+ [pip]=python-pip
+ [python]=python
+ [python-devel]=python-devel
+ [python-pyyaml]=python-PyYAML
+ [venv]=python-virtualenv
+ [wget]=wget
+ [curl]=curl
+ )
+ EXTRA_PKG_DEPS=( python-xml )
+ sudo zypper -n ref
+ # NOTE (cinerama): we can't install python without removing this package
+ # if it exists
+ if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
+ sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
+ fi
+ ;;
+
+ ubuntu|debian)
+ OS_FAMILY="Debian"
+ export DEBIAN_FRONTEND=noninteractive
+ INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
+ CHECK_CMD="dpkg -l"
+ PKG_MAP=(
+ [gcc]=gcc
+ [libffi]=libffi-dev
+ [libopenssl]=libssl-dev
+ [lsb-release]=lsb-release
+ [make]=make
+ [net-tools]=net-tools
+ [pip]=python-pip
+ [python]=python-minimal
+ [python-devel]=libpython-dev
+ [python-pyyaml]=python-yaml
+ [venv]=python-virtualenv
+ [wget]=wget
+ [curl]=curl
+ )
+ EXTRA_PKG_DEPS=( apt-utils )
+ sudo apt-get update -qq > /dev/null
+ ;;
+
+ rhel|fedora|centos)
+ OS_FAMILY="RedHat"
+ PKG_MANAGER=$(which dnf || which yum)
+ INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
+ CHECK_CMD="rpm -q"
+ PKG_MAP=(
+ [gcc]=gcc
+ [libffi]=libffi-devel
+ [libopenssl]=openssl-devel
+ [lsb-release]=redhat-lsb
+ [make]=make
+ [net-tools]=net-tools
+ [pip]=python2-pip
+ [python]=python
+ [python-devel]=python-devel
+ [python-pyyaml]=PyYAML
+ [venv]=python-virtualenv
+ [wget]=wget
+ [curl]=curl
+ )
+ sudo $PKG_MANAGER updateinfo > /dev/null
+ EXTRA_PKG_DEPS=( deltarpm )
+ ;;
+
+ *) echo "ERROR: Supported package manager not found. Supported: apt, dnf, yum, zypper"; exit 1;;
+ esac
+
+ # Build instllation map
+ for pkgmap in ${CHECK_CMD_PKGS[@]}; do
+ install_map+=(${PKG_MAP[$pkgmap]} )
+ done
+
+ install_map+=(${EXTRA_PKG_DEPS[@]} )
+
+ ${INSTALLER_CMD} ${install_map[@]} > /dev/null
+
+ # We need to prepare our virtualenv now
+ virtualenv --quiet --no-site-packages ${XCI_VENV}
+ set +u
+ source ${XCI_VENV}/bin/activate
+ set -u
+
+ # We are inside the virtualenv now so we should be good to use pip and python from it.
+ pip -q install --upgrade pip==9.0.3 # We need a version which supports the '-c' parameter
+ pip -q install --upgrade -c $uc -c $osa_uc ara==0.16.4 virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21
+
+ ara_location=$(python -c "import os,ara; print(os.path.dirname(ara.__file__))")
+ export ANSIBLE_CALLBACK_PLUGINS="/etc/ansible/roles/plugins/callback:${ara_location}/plugins/callbacks"
+}
+
+ansible_lint() {
+ set -eu
+ local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks xci/installer/osh/playbooks)
+ # Extract role from scenario information
+ local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}$/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev)
+
+ # clear XCI_CACHE
+ rm -rf ${XCI_CACHE}/repos/openstack-ansible-tests
+
+ # Clone OSA rules too
+ git clone --quiet --depth 1 https://github.com/openstack/openstack-ansible-tests.git \
+ ${XCI_CACHE}/repos/openstack-ansible-tests
+
+ # Because of https://github.com/willthames/ansible-lint/issues/306, ansible-lint does not understand
+ # import and includes yet so we need to trick it with a fake playbook so we can test our roles. We
+ # only test the role for the scenario we are testing
+ echo "Building testing playbook for role: ${testing_role}"
+ cat > ${XCI_PATH}/xci/playbooks/test-playbook.yml << EOF
+ - name: Testing playbook
+ hosts: localhost
+ roles:
+ - ${testing_role}
+EOF
+
+ # Only check our own playbooks
+ for dir in ${playbooks_dir[@]}; do
+ for play in $(ls ${XCI_PATH}/${dir}/*.yml); do
+ echo -en "Checking '${play}' playbook..."
+ ansible-lint --nocolor -R -r \
+ ${XCI_CACHE}/repos/openstack-ansible-tests/ansible-lint ${play}
+ echo -en "[OK]\n"
+ done
+ done
+
+ # Remove testing playbook
+ rm ${XCI_PATH}/xci/playbooks/test-playbook.yml
+}
+
+collect_xci_logs() {
+ echo "----------------------------------"
+ echo "Info: Collecting XCI logs"
+ echo "----------------------------------"
+
+ # Create the ARA log directory and store the sqlite source database
+ mkdir -p ${LOG_PATH}/ara/ ${LOG_PATH}/opnfv/ara
+
+ rsync -q -a "${HOME}/.ara/ansible.sqlite" "${LOG_PATH}/ara/"
+ rsync -q -a root@${OPNFV_HOST_IP}:releng-xci/${LOG_PATH#$XCI_PATH/}/ ${LOG_PATH}/opnfv/ &> /dev/null || true
+ rsync -q -a root@${OPNFV_HOST_IP}:.ara/ansible.sqlite ${LOG_PATH}/opnfv/ara/ &> /dev/null || true
+
+ sudo -H -E bash -c 'chown ${SUDO_UID}:${SUDO_GID} -R ${LOG_PATH}/'
+}
+
+submit_bug_report() {
+ cd ${XCI_PATH}
+ echo ""
+ echo "-------------------------------------------------------------------------"
+ echo "Oh nooooo! The XCI deployment failed miserably :-("
+ echo ""
+ echo "If you need help, please choose one of the following options"
+ echo "* #opnfv-pharos @ freenode network"
+ echo "* opnfv-tech-discuss mailing list (https://lists.opnfv.org/mailman/listinfo/opnfv-tech-discuss)"
+ echo " - Please prefix the subject with [XCI]"
+ echo "* https://jira.opnfv.org (Release Engineering project)"
+ echo ""
+ echo "Do not forget to submit the following information on your bug report:"
+ echo ""
+ git diff --quiet && echo "releng-xci tree status: clean" || echo "releng-xci tree status: local modifications"
+ echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
+ echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+ echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+ echo "xci flavor: $XCI_FLAVOR"
+ echo "xci installer: $INSTALLER_TYPE"
+ echo "xci scenario: $DEPLOY_SCENARIO"
+ echo "Environment variables:"
+ env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\|BIFROST\|DIB\)'
+ echo "-------------------------------------------------------------------------"
+}
+
+log_xci_information() {
+ local scenario_version scenario_sha
+
+ cd ${XCI_SCENARIOS_CACHE}/${DEPLOY_SCENARIO}
+ scenario_sha=$(git rev-parse HEAD)
+ scenario_version=$(git describe --exact 2>/dev/null || echo "master")
+ cd -
+ echo "Info: Starting XCI Deployment"
+ echo "Info: Deployment parameters"
+ echo "-------------------------------------------------------------------------"
+ echo "OPNFV scenario: $DEPLOY_SCENARIO"
+ echo "Scenario version: ${scenario_version} (sha: ${scenario_sha})"
+ echo "xci flavor: $XCI_FLAVOR"
+ echo "xci installer: $INSTALLER_TYPE"
+ echo "infra deployment: $INFRA_DEPLOYMENT"
+ echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
+ [[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+ [[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+ [[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ [[ "$INSTALLER_TYPE" == "osh" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
+ echo "-------------------------------------------------------------------------"
+}
+
+exit_trap() {
+ submit_bug_report
+ collect_xci_logs
+}
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/bifrost/README.md b/xci/infra/bifrost/README.md
index 60d07244..60d07244 100644
--- a/bifrost/README.md
+++ b/xci/infra/bifrost/README.md
diff --git a/xci/infra/bifrost/infra-provision.sh b/xci/infra/bifrost/infra-provision.sh
new file mode 100644
index 00000000..b0617733
--- /dev/null
+++ b/xci/infra/bifrost/infra-provision.sh
@@ -0,0 +1,86 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#-------------------------------------------------------------------------------
+# Start provisioning VM nodes
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci and openstack/bifrost repositories
+# - combines opnfv/releng-xci and openstack/bifrost scripts/playbooks
+# - destroys VMs, removes ironic db, leases, logs
+# - creates and provisions VMs for the chosen flavor
+#-------------------------------------------------------------------------------
+
+BIFROST_ROOT_DIR="$(dirname $(realpath ${BASH_SOURCE[0]}))"
+export ANSIBLE_ROLES_PATH="$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles:${XCI_CACHE}/repos/bifrost/playbooks/roles"
+export ANSIBLE_LIBRARY="$HOME/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:${XCI_CACHE}/repos/bifrost/playbooks/library"
+
+echo "Info: Create XCI VM resources"
+echo "-------------------------------------------------------------------------"
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -e num_nodes=${NUM_NODES} \
+ -e vm_domain_type=${VM_DOMAIN_TYPE} \
+ -e baremetal_json_file=/tmp/baremetal.json \
+ -e xci_distro=${XCI_DISTRO} \
+ -e pdf_file=${PDF} \
+ -e idf_file=${IDF} \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-setup-nodes.yml
+
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --private-key=${XCI_PATH}/xci/scripts/vm/id_rsa_for_dib \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/xci-prepare-env.yml
+
+source ${XCI_CACHE}/repos/bifrost/scripts/bifrost-env.sh
+
+# This is hardcoded to delegate to localhost but we really need to delegate to opnfv instead.
+sed -i "/delegate_to:/d" ${XCI_CACHE}/repos/bifrost/playbooks/roles/bifrost-deploy-nodes-dynamic/tasks/main.yml
+
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser \
+ -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ -e use_cirros=false \
+ -e testing_user=root \
+ -e test_vm_num_nodes=${NUM_NODES} \
+ -e test_vm_cpu='host-model' \
+ -e inventory_dhcp=${BIFROST_INVENTORY_DHCP} \
+ -e inventory_dhcp_static_ip=false \
+ -e enable_inspector=true \
+ -e inspect_nodes=true \
+ -e download_ipa=${BIFROST_DOWNLOAD_IPA} \
+ -e create_ipa_image=${BIFROST_CREATE_IPA} \
+ -e write_interfaces_file=true \
+ -e ipv4_gateway=192.168.122.1 \
+ -e wait_timeout=3600 \
+ -e enable_keystone=false \
+ -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION:-master} \
+ -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master} \
+ -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION:-master} \
+ -e ironic_git_branch=${BIFROST_IRONIC_VERSION:-master} \
+ -e create_image_via_dib=${BIFROST_CREATE_IMAGE_VIA_DIB:-true} \
+ -e xci_distro=${XCI_DISTRO} \
+ -e ironic_url="http://192.168.122.2:6385/" \
+ ${BIFROST_ROOT_DIR}/playbooks/opnfv-virtual.yml
+
+
+if [ "${BAREMETAL}" = true ]; then
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ --user=devuser -i ${XCI_PATH}/xci/playbooks/dynamic_inventory.py \
+ -i ${XCI_CACHE}/repos/bifrost/playbooks/inventory/bifrost_inventory.py \
+ ${BIFROST_ROOT_DIR}/playbooks/wait-for-baremetal.yml
+fi
+
+echo "-----------------------------------------------------------------------"
+echo "Info: VM nodes are provisioned!"
+echo "-----------------------------------------------------------------------"
diff --git a/xci/infra/bifrost/playbooks/opnfv-virtual.yml b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
new file mode 100644
index 00000000..f97eae4b
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/opnfv-virtual.yml
@@ -0,0 +1,167 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 RedHat and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- hosts: opnfv
+ name: "Host and Ironic bootstrapping"
+ become: yes
+ gather_facts: yes
+ vars_files:
+ - "../vars/{{ ansible_os_family | lower }}.yml"
+ pre_tasks:
+ - name: Remove pre-existing leases file
+ file: path=/var/lib/misc/dnsmasq.leases state=absent
+ - name: "Override the ipv4_gateway setting"
+ set_fact:
+ ipv4_gateway: "192.168.122.1"
+ - name: Prepare the XCI pre-built images
+ block:
+ - name: Create the PXE directory
+ file:
+ path: /httpboot
+ state: directory
+ - name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: /httpboot/deployment_image.qcow2.sha256.txt
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+ - name: Extract checksum
+ shell: awk '{print $1}' /httpboot/deployment_image.qcow2.sha256.txt
+ register: _image_checksum
+ - fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+ - set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+ - name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: /httpboot/deployment_image.qcow2
+ force: no
+ - name: Set correct mode for {{ xci_distro }}.qcow2 file
+ file:
+ path: /httpboot/deployment_image.qcow2
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+ when: create_image_via_dib | bool == false
+ - name: Ensure /etc/hosts has good defaults
+ lineinfile:
+ create: yes
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
+ - name: Install required packages
+ package:
+ name: "{{ bifrost_required_devel_packages }}"
+ state: present
+
+ roles:
+ - role: bifrost-prep-for-install
+ when: skip_install is not defined
+ - role: bifrost-keystone-install
+ - role: bifrost-ironic-install
+ cleaning: false
+ testing: false
+ enabled_hardware_types: ipmi
+ network_interface: "{{ ansible_default_ipv4.interface }}"
+ # Create the IPA image for ironic to boot the nodes and write the final distro in the hard drive
+ # fedora is used because it is the only one working with ericsson-pod2 (it has support for newer hardware)
+ - role: bifrost-create-dib-image
+ dib_imagename: "{{ http_boot_folder }}/ipa"
+ build_ramdisk: false
+ dib_os_element: "{{ ipa_dib_os_element|default('fedora') }}"
+ dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
+ dib_notmpfs: true
+ when:
+ - create_ipa_image | bool == true
+ # Create the final distro image
+ - role: bifrost-create-dib-image
+ dib_imagetype: "qcow2"
+ dib_imagename: "{{deploy_image}}"
+ dib_env_vars:
+ DIB_PYTHON_VERSION: 2
+ dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
+ dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
+ extra_dib_elements: "{{ lookup('env', 'EXTRA_DIB_ELEMENTS') | default('') }}"
+ dib_elements: "vm enable-serial-console simple-init devuser openssh-server growroot pip-and-virtualenv {{ extra_dib_elements }}"
+ dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
+ dib_notmpfs: true
+ when:
+ - create_image_via_dib | bool == true
+ - transform_boot_image | bool == false
+ - role: bifrost-keystone-client-config
+ clouds:
+ bifrost:
+ config_username: "{{ ironic.keystone.default_username }}"
+ config_password: "{{ ironic.keystone.default_password }}"
+ config_project_name: "baremetal"
+ config_region_name: "{{ keystone.bootstrap.region_name }}"
+ config_auth_url: "{{ keystone.bootstrap.public_url }}"
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- hosts: baremetal
+ name: "Enrollment and Deployment"
+ vars:
+ multinode_testing: "{{ inventory_dhcp | bool == true }}"
+ become: no
+ gather_facts: False
+ tasks:
+ - name: Gathering facts
+ setup:
+ delegate_to: opnfv
+ delegate_facts: False
+ - name: Find network interface in the OPNFV node
+ set_fact:
+ network_interface: "{{ ansible_default_ipv4.interface }}"
+ - import_role:
+ name: ironic-enroll-dynamic
+ private: True
+ delegate_to: opnfv
+ - import_role:
+ name: ironic-inspect-node
+ private: True
+ delegate_to: opnfv
+ when: inspect_nodes | default('false') | bool == true
+ - import_role:
+ name: bifrost-configdrives-dynamic
+ private: True
+ vars:
+ ipv4_nameserver: "{{ host_info[inventory_hostname]['public']['dns'] | list }}"
+ delegate_to: opnfv
+ - import_role:
+ name: bifrost-deploy-nodes-dynamic
+ private: True
+ delegate_to: opnfv
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+
+- hosts: baremetal
+ name: "Deploy machines."
+ become: no
+ serial: 1
+ gather_facts: False
+ tasks:
+ #- name: Gathering facts
+ #setup:
+ #delegate_to: opnfv
+ #delegate_facts: False
+ - import_role:
+ name: bifrost-prepare-for-test-dynamic
+ delegate_to: opnfv
diff --git a/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
new file mode 100644
index 00000000..7f7ad670
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/roles/common/venv_python_path.yml
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: "If VENV is set in the environment, enable installation into venv"
+ set_fact:
+ enable_venv: true
+ when: lookup('env', 'VENV') | length > 0
+
+- name: "Retrieve venv python path"
+ shell: "/bin/echo -e \"import sys\\nprint(':'.join(sys.path))\" | {{ ansible_python.get('executable', '/usr/bin/python').split('/')[-1] }}"
+ environment: "{{ bifrost_venv_env | default({}) }}"
+ register: venv_pythonpath_result
+ when: enable_venv
+
+- name: "Compute venv python path"
+ set_fact:
+ venv_pythonpath:
+ PYTHONPATH: "{{ venv_pythonpath_result.get('stdout', '') }}"
+ when: enable_venv
+
+- name: "Compute proper complete venv including proper Python path"
+ set_fact:
+ venv: "{{ venv | default({}) | combine(bifrost_venv_env | default({})) | combine(venv_pythonpath | default({})) }}"
+
diff --git a/xci/infra/bifrost/playbooks/wait-for-baremetal.yml b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
new file mode 100644
index 00000000..96aab29c
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml
@@ -0,0 +1,17 @@
+# ironic needs to boot the server again to install the OS in the hard drive
+# we are currently modifying opnfv vm networking config while ironic is
+# doing that and it sometimes fail because of networking glitches. We should
+# wait until the OS is installed to do the opnfv config
+
+- hosts: baremetal
+ name: "Wait for baremetal blades to be ready"
+ become: no
+ gather_facts: False
+ tasks:
+ - name: "Wait for nodes to reboot."
+ wait_for: state=stopped port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+ - name: "Wait for nodes to become available."
+ wait_for: state=started port=22 host={{ ipv4_address }} timeout=5000
+ delegate_to: opnfv
+
diff --git a/xci/infra/bifrost/playbooks/xci-prepare-env.yml b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
new file mode 100644
index 00000000..d576324d
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/xci-prepare-env.yml
@@ -0,0 +1,118 @@
+- name: Prepare deployment host
+ hosts: deployment_host
+ gather_facts: True
+ tasks:
+ - name: Ensure common private key has correct permissions
+ file:
+ path: "{{ xci_path }}/xci/scripts/vm/id_rsa_for_dib"
+ mode: "0600"
+
+ - name: Remove host from known_hosts file if necessary
+ shell:
+ ssh-keygen -R {{ hostvars['opnfv'].ip }}
+ failed_when: false
+
+- name: Prepare the OPNFV host
+ hosts: opnfv
+ gather_facts: True
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+
+ - name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+ - name: Determine local user
+ become: no
+ local_action: command whoami
+ changed_when: False
+ register: _ansible_user
+
+ - name: Fetch local SSH key
+ delegate_to: localhost
+ become: no
+ slurp:
+ src: "/home/{{ _ansible_user.stdout }}/.ssh/id_rsa.pub"
+ register: _local_ssh_key
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (devuser)"
+ authorized_key:
+ exclusive: no
+ user: devuser
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+
+ - name: "Configure {{ inventory_hostname }} authorized_keys file (root)"
+ authorized_key:
+ exclusive: no
+ user: root
+ state: present
+ manage_dir: yes
+ key: "{{ _local_ssh_key['content'] | b64decode }}"
+ comment: "deployer's key"
+ become: yes
+
+ - name: Ensure /httpboot directory exists
+ file:
+ path: /httpboot
+ state: directory
+ become: yes
+
+ # Directory must exist before passing the static config
+ - name: "Setup Inventory DHCP Hosts Directory"
+ file:
+ path: "/etc/dnsmasq.d/bifrost.dhcp-hosts.d"
+ state: directory
+ owner: "root"
+ group: "root"
+ mode: 0755
+ become: yes
+
+ - name: Copy bifrost files
+ copy:
+ src: "{{ item.src }}"
+ dest: "{{ item.dst }}"
+ with_items:
+ - { src: '/tmp/baremetal.json', dst: '/tmp/baremetal.json' }
+ - { src: '/tmp/baremetalstaticips', dst: '/etc/dnsmasq.d/bifrost.dhcp-hosts.d/baremetalstaticips' }
+ become: yes
+
+ - name: Copy original qcow2 image to OPNFV VM
+ synchronize:
+ src: "{{ xci_cache }}/{{ item }}"
+ dest: /httpboot/
+ recursive: yes
+ delete: yes
+ with_items:
+ - "deployment_image.qcow2"
+ - "deployment_image.qcow2.sha256.txt"
+ become: yes
+
+ - name: Configure DNS on openSUSE
+ block:
+ - stat:
+ path: /etc/resolv.conf.netconfig
+ register: _resolv_conf_netconfig
+ - shell: |
+ mv /etc/resolv.conf.netconfig /etc/resolv.conf
+ become: yes
+ when: _resolv_conf_netconfig.stat.exists
+ when: ansible_pkg_mgr == 'zypper'
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route del default"
+ become: yes
+
+ #TODO: Find a way to do this with Ansible
+ - name: Make sure the default gateway is correct
+ shell: "ip route add default via {{ host_info[inventory_hostname].public.gateway }}"
+ become: yes
diff --git a/xci/infra/bifrost/playbooks/xci-setup-nodes.yml b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
new file mode 100644
index 00000000..a0f92159
--- /dev/null
+++ b/xci/infra/bifrost/playbooks/xci-setup-nodes.yml
@@ -0,0 +1,76 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- hosts: deployment_host
+ name: "Bootstrap XCI hardware resources and prepare provisioning environment"
+ gather_facts: yes
+ vars_files:
+ - "{{ pdf_file }}"
+ - "{{ idf_file }}"
+ - "{{ xci_path }}/xci/var/opnfv_vm_pdf.yml"
+ - "{{ xci_path }}/xci/var/opnfv_vm_idf.yml"
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: create-nodes
+ become: yes
+ - role: clone-repository
+ project: "opnfv/bifrost"
+ repo: "{{ openstack_bifrost_git_url }}"
+ dest: "{{ xci_cache }}/repos/bifrost"
+ version: "{{ openstack_bifrost_version }}"
+ tasks:
+ - name: Wait for host to come back to life
+ local_action:
+ module: wait_for
+ host: "{{ opnfv_vm_ip }}"
+ delay: 15
+ state: started
+ port: 22
+ connect_timeout: 10
+ timeout: 10180
+
+ # No ansible module for brctl found
+ - name: Add pxe interface to the bridge
+ shell: "brctl addif {{ item.bridge }} {{ item.interface }}"
+ become: true
+ when: baremetal | bool == true
+ with_items:
+ - { bridge: "{{ network_bridge_admin }}", interface: "{{ network_interface_admin }}" }
+ - { bridge: "{{ network_bridge_mgmt }}", interface: "{{ network_interface_mgmt }}" }
+
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Synchronize local development bifrost repository to XCI paths
+ # command module is much faster than the copy module
+ synchronize:
+ src: "{{ openstack_bifrost_dev_path }}"
+ dest: "{{ xci_cache }}/repos/bifrost"
+ recursive: yes
+ delete: yes
+ when:
+ - openstack_bifrost_dev_path != ""
+ - name: combine opnfv/releng-xci and openstack/bifrost scripts/playbooks
+ copy:
+ src: "{{ xci_path}}/xci/infra/bifrost/"
+ dest: "{{ xci_cache }}/repos/bifrost"
+ - name: "Ensure /etc/hosts has good defaults"
+ lineinfile:
+ dest: "/etc/hosts"
+ regexp: "{{ item.regexp }}.*({{ ansible_hostname }}|localhost).*"
+ line: "{{ item.contents }}"
+ become: yes
+ with_items:
+ - { regexp: '^127\.0\.0\.1', contents: '127.0.0.1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost' }
+ - { regexp: '^::1', contents: '::1 {{ ansible_hostname }} {{ ansible_fqdn }} localhost ipv6-localhost ipv6-loopback' }
diff --git a/xci/infra/bifrost/scripts/bifrost-env.sh b/xci/infra/bifrost/scripts/bifrost-env.sh
new file mode 100755
index 00000000..7d882125
--- /dev/null
+++ b/xci/infra/bifrost/scripts/bifrost-env.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# dib configuration
+case ${XCI_DISTRO,,} in
+ # These should ideally match the CI jobs
+ ubuntu)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
+ ;;
+ centos)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+ opensuse)
+ export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
+ export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
+ export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
+ ;;
+esac
+
+export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
+
+if [ "${BAREMETAL}" = true ]; then
+ export BIFROST_INVENTORY_DHCP=true
+ export BIFROST_DOWNLOAD_IPA=false
+ export BIFROST_CREATE_IPA=true
+else
+ export BIFROST_INVENTORY_DHCP=false
+ export BIFROST_DOWNLOAD_IPA=true
+ export BIFROST_CREATE_IPA=false
+fi
+
+pip install -q --upgrade -r "${XCI_CACHE}/repos/bifrost/requirements.txt"
diff --git a/xci/infra/bifrost/vars/debian.yml b/xci/infra/bifrost/vars/debian.yml
new file mode 100644
index 00000000..95303b38
--- /dev/null
+++ b/xci/infra/bifrost/vars/debian.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-dev
+ - libssl-dev
+ - lsb-release
+ - make
+ - net-tools
+ - libpython-dev
+ - wget
+ - iptables
diff --git a/xci/infra/bifrost/vars/redhat.yml b/xci/infra/bifrost/vars/redhat.yml
new file mode 100644
index 00000000..056c4d61
--- /dev/null
+++ b/xci/infra/bifrost/vars/redhat.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - openssl-devel
+ - redhat-lsb
+ - make
+ - net-tools
+ - python-devel
+ - wget
+ - iptables
diff --git a/xci/infra/bifrost/vars/suse.yml b/xci/infra/bifrost/vars/suse.yml
new file mode 100644
index 00000000..8e2e9041
--- /dev/null
+++ b/xci/infra/bifrost/vars/suse.yml
@@ -0,0 +1,19 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+bifrost_required_devel_packages:
+ - gcc
+ - libffi-devel
+ - libopenssl-devel
+ - make
+ - net-tools
+ - python-devel
+ - python-xml
+ - wget
+ - iptables
diff --git a/xci/installer/kubespray/README b/xci/installer/kubespray/README
new file mode 100644
index 00000000..04202c28
--- /dev/null
+++ b/xci/installer/kubespray/README
@@ -0,0 +1,64 @@
+The xci/installer/kubespray/files/k8s-cluster.yml is obtained from kubespray.
+You can change the parameters according to your needs.
+When starting the deploy, it will be copied to the right directory and will be used by kubespray
+
+For example:
+ kube_network_plugin: calico
+ docker_storage_options: -s overlay2
+ kube_service_addresses: 10.233.0.0/18
+
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. aio: Single host which acts as the deployment host, master and node.
+ 2. mini: One deployment host, 1 master host and 1 node host.
+ 3. noha: One deployment host, 1 master host and 2 node hosts.
+ 4. ha: One deployment host, 3 master hosts and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:(one of following, which you can choose. Default is calico)
+ - calico
+ - flannel
+ - contive
+ - weave
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run kubespray
+
+ export INSTALLER_TYPE=kubespray
+ export DEPLOY_SCENARIO=k8-nosdn-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
new file mode 100755
index 00000000..af80b38f
--- /dev/null
+++ b/xci/installer/kubespray/deploy.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+K8_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $K8_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/; \
+ cp -f ~/admin.conf ~/.kube/config"
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "known_hosts entry from opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/kubespray/playbooks/configure-installer.yml b/xci/installer/kubespray/playbooks/configure-installer.yml
new file mode 100644
index 00000000..d88ee55c
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-installer.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/kubespray/playbooks/group_vars/all b/xci/installer/kubespray/playbooks/group_vars/all
new file mode 100644
index 00000000..328f8dba
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/group_vars/all
@@ -0,0 +1,54 @@
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/kubespray/playbooks/post-deployment.yml b/xci/installer/kubespray/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index b8637f22..8b3a67d0 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -38,8 +38,8 @@ fi
echo "Info: Configuring localhost for openstack-ansible"
echo "-----------------------------------------------------------------------"
-cd $OSA_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i inventory configure-localhost.yml
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i "localhost," configure-localhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured localhost host for openstack-ansible"
@@ -57,7 +57,8 @@ echo "Info: Configured localhost host for openstack-ansible"
echo "Info: Configuring opnfv deployment host for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ansible-galaxy install -r ${XCI_PATH}/xci/files/requirements.yml -p $HOME/.ansible/roles
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-opnfvhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured opnfv deployment host for openstack-ansible"
@@ -67,7 +68,6 @@ echo "Info: Configured opnfv deployment host for openstack-ansible"
#-------------------------------------------------------------------------------
# This playbook is only run for the all flavors except aio since aio is configured
# by an upstream script.
-
# This playbook
# - adds public keys to target hosts
# - configures network
@@ -77,7 +77,7 @@ if [[ $XCI_FLAVOR != "aio" ]]; then
echo "Info: Configuring target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
cd $OSA_XCI_PLAYBOOKS
- ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory \
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
configure-targethosts.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured target hosts"
@@ -90,7 +90,7 @@ fi
#-------------------------------------------------------------------------------
echo "Info: Setting up target hosts for openstack-ansible"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log "
scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log
echo "-----------------------------------------------------------------------"
@@ -112,7 +112,7 @@ echo "Info: Set up target hosts for openstack-ansible successfuly"
echo "Info: Gathering facts"
echo "-----------------------------------------------------------------------"
ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \
- ansible ${XCI_ANSIBLE_VERBOSITY} -m setup -a gather_subset=network,hardware,virtual all"
+ ansible -m setup -a gather_subset=network,hardware,virtual all"
echo "-----------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -123,15 +123,10 @@ echo "-----------------------------------------------------------------------"
echo "Info: Setting up infrastructure"
echo "-----------------------------------------------------------------------"
echo "xci: running ansible playbook setup-infrastructure.yml"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log"
scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log
echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
- echo "Error: OpenStack node setup failed!"
- exit 1
-fi
#-------------------------------------------------------------------------------
# Verify database cluster
@@ -157,18 +152,30 @@ echo "Info: Database cluster verification successful!"
#-------------------------------------------------------------------------------
echo "Info: Installing OpenStack on target hosts"
echo "-----------------------------------------------------------------------"
-ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_VERBOSITY} \
+ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \
releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log"
scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log
echo "-----------------------------------------------------------------------"
-# check the log to see if we have any error
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
- echo "Error: OpenStack installation failed!"
- exit 1
-fi
+echo
echo "Info: OpenStack installation is successfully completed!"
#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSA_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo
+echo "Info: Post-deployment scenario role execution done"
+
+#-------------------------------------------------------------------------------
# - Getting OpenStack login information
#-------------------------------------------------------------------------------
echo "Info: Openstack login details"
diff --git a/xci/installer/osa/files/aio/flavor-vars.yml b/xci/installer/osa/files/aio/flavor-vars.yml
deleted file mode 100644
index 6ac1e0fe..00000000
--- a/xci/installer/osa/files/aio/flavor-vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/xci/installer/osa/files/aio/inventory b/xci/installer/osa/files/aio/inventory
deleted file mode 100644
index 9a3dd9ee..00000000
--- a/xci/installer/osa/files/aio/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml
index 195244a0..e787aff5 100644
--- a/xci/installer/osa/files/ansible-role-requirements.yml
+++ b/xci/installer/osa/files/ansible-role-requirements.yml
@@ -7,180 +7,180 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# these versions are based on the osa commit 7b3aac28a0a87e5966527829f6b0abcbc2303cc7 on 2017-12-11
-# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=7b3aac28a0a87e5966527829f6b0abcbc2303cc7
+# these versions are based on the osa commit e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 on 2018-12-11
+# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?h=refs/heads/stable/rocky&id=e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02
- name: ansible-hardening
scm: git
src: https://git.openstack.org/openstack/ansible-hardening
- version: 46a94c72518f83d27b25a5fa960dde7130956215
+ version: 14e6bb6a411b6b03bf258144be66845a5831705c
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: eba07d7dd7962d90301c49fc088551f9b35f367a
+ version: 4b2584d699c79ac65acfeb2157a97327df6f0fd6
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 32c27505c6e0ee00ea0fb4a1c62240c60f17a0e3
+ version: 671e7129ad3dcf20bdda942842f9f76203bf5a5e
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 9a8302cbba24ea4e5907567e5f93e874d30d79df
+ version: 6dbac51e5b74ffdee429375f6c22739e7a5ef017
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: f2bfbd38513ac8d61ba4e02a4d5ef6cbbca259cc
+ version: 7a7036f6d15ce3117a925217b66cba806034bb96
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 34a04f7b24c80297866bc5ab56618e2211b1d5f9
+ version: 278aaca502533b33b9714393e47b536654055c58
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 9966fd96fede46c3b00c9e069e402eae90c66f17
+ version: 6bc259471283162b3cb8ec0c4bc736f81254d050
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
- version: 5deafcab39de162ac1550c58246963974e8dcf4e
+ version: 64764d25ab868417f1138a7b9605f2eb94cbfd02
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 68f81c679be88577633f98e8b9252a62bdcef754
+ version: 14a74f2fb60fa7865cf34f75e3196e802847b9d1
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: 6b529de0315fe6cd12f6e78c00a5f2f2d3a01e28
+ version: 83e20af591b00fc796eba0e0e1c7650faaa20cd7
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: ae6f721dc0342e1e7b45ff2448ab51f7539dc01f
+ version: e058c81a44859c7bcd3eeaac49a8f25b423e38a4
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 05c7f09d181de1809fd596cc0d879c49e3f86bbf
+ version: 0028cedcccc4913bd1c604404c84be16164d1fe5
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: cd9d4ef7d8614d241fa40ba33c1c205fd2b47fa1
+ version: 5a54cc6ba50875c4068e4cdfe3cb23ae1603e257
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: d594c2debc249daa5b7f6f2890f546093efd1ee5
+ version: 805ef5349db7d8af0132b546ff56a36ec80ea7db
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: ce871dee75511f94bfd24dde8f97e573cf6d3ead
+ version: 9b8d7483d69e60f4ae71ceb6a3336ff81f355c38
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: c3e191037d0978479e3cb95a59b2986adab28c69
+ version: f9ce44edb809c92735fa093334fa1d79cc538126
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 55bb04eaad4dd5c7fdad742b3557dc30dc9d45bf
+ version: 221dcccfef3efa1a187678f71c59d81d7e930a92
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 536dd3446e0fc7fc68ab42b982ac9affc4215787
+ version: a824d8d4dc6de6563f186449838e94c69a869e02
+- name: os_congress
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_congress
+ version: 0e6ccb63dba466bb1b7a11e94db7a420c716c06d
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: a65d7a3394aef340ff94587dd0bb48133ed00763
+ version: 74c33e9788607f772d8402c4f5cfc79eb379278b
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 43aa00424f233a6125f7a9216cec42da1d8ca4c5
+ version: 7ec6a11b98715530e3cd5adbf682c2834e3122a8
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: b1f7574dc529f8298a983d8d0e09520e90b571a8
+ version: db881f143223723b38f5d197e8e4b6dd4e057c6f
- name: os_heat
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 0b3eb9348d55d6b1cf077a2c45b297f9a1be730d
+ version: 14b8927123aa9b0cf47f365c1ab9f82147ce4bdc
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: da72526dc1757688ecec8914344e330aaa0be720
+ version: b088034eeaa73ac781fe271588ba03871c88118e
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: a90558f7a216e5e661c5d1a4048dbe30559542d1
+ version: 6ecf38f1296080a33366528ad40d513539138925
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 736d1707339cb99396578018a6bda7af9184fb02
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 9b4c104a252c453bcd798fec9dbae7224b3d8001
+ version: 316f22626d242e33ce56fad367ef3570e0d8ab8b
- name: os_neutron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_neutron
- version: 962cd92243641092412b6ef09a41bbf5e698c4a1
+ version: 3032836715b4055041554583fa2ed685ab076c25
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 53df001c9034f198b9349def3c9158f8bbe43ff3
+ version: 9db5bf5ab6f82c1947d05a1ec7cd6e3ef304760f
- name: os_octavia
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 02ad3c68802287a1ba54cf10de085dcd14c324d8
+ version: 508ea6d834153d0eb6da5bd32d10472f483c6dfa
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: bc9075dba204e64d11cb397017d32b0c2297eed0
+ version: 8e98112b858ecffbb92c6ae342237af87416b7fa
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: 3c45121050ba21bd284f054d7b82a338f347157f
+ version: ed7aa2d64a2ea3508c7d88a9e869524fdf0e9353
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: f31217bb097519f15755f2337165657d7eb6b014
+ version: a88edf84964819870ef990d25b3bfa514186249a
- name: os_tacker
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
- version: d95902891c4e6200510509c066006c921cfff8df
+ version: bbce8657c13d2545bf632eb81bb78329a5479798
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 866dedbcba180ca82c3c93823cef3db2d3241d1b
+ version: 08341f4a19b2ed2231b790496c9f7cf2b4eda2e6
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: b425fa316999d0863a44126f239a33d8c3fec3a6
+ version: eaca0137de0d3d7bd57a68eecfecf52e3171f591
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: d2f60237761646968a4b39b15185fb5c84e7386f
+ version: a84ae0d744047fe41a0c028213de8daa52f72aee
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: 311f76890c8f99cb0b46958775d84de614609323
+ version: deccf93bdda1aa873b956418168368284509c99b
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: 59a3f444c263235d8f0f584da8768656179fa02a
+ version: 630a6dfdcb46ba719ddb7fd7a4875259c5602b15
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 7889f37cdd2a90b4b98e8ef2e886f1fd4950fc0a
+ version: dd143b381b2fb94a3ba435f951e8b9338353a48d
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: 310cfe9506d3742be10790533ad0d16100d81498
+ version: ed5e61c8bc2aabb905918bb2751ae985b1cfe229
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: ba7bb699c0c874c7977add86ca308ca18be8f9a8
+ version: 9318bafbe60fed5f026c1e216d693bce745b9f99
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
+ version: d2ba81107ade1cf53c8b93590465c21ad2bc4530
- name: bird
scm: git
src: https://github.com/logan2211/ansible-bird
- version: 5033c412398cf6f98097a9ac274a6f12810c807e
+ version: 0fdb4848b5aca949ffade9be5a2ae254979e673e
- name: etcd
scm: git
src: https://github.com/logan2211/ansible-etcd
@@ -188,40 +188,44 @@
- name: unbound
scm: git
src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
+ version: 3bb7414f46b757e943507b65ca4c9f1080a008b0
- name: resolvconf
scm: git
src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
-- name: ceph-defaults
+ version: '1.4'
+- name: ceph-ansible
scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: 19884aaac1bc58921952af955c66602ccca89e93
-- name: ceph-common
+ src: https://github.com/ceph/ceph-ansible
+ version: a5aca6ebbc341feb34b9ec0d73e16aeeedae63ac
+- name: opendaylight
scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: 08804bd46dff42ebff64e7f27c86f2265fe4d6fc
-- name: ceph-config
+ src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
+ version: 0aebbc250b34ac5ac14b37bdf9b1a2e1cfaa5a76
+- name: haproxy_endpoints
scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: e070537f443c3ae5d262835c8b0a7a992850283b
-- name: ceph-mon
+ src: https://github.com/logan2211/ansible-haproxy-endpoints
+ version: 8e3a24a35beb16d717072dc83895c5a1f92689fb
+- name: nspawn_container_create
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create
scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: 309b7e339e057d56d9dd38bdd61998b900f45ba8
-- name: ceph-mgr
+ version: 2bcf03f1cca550731789d5b53c7d0806ef5f5ff7
+- name: nspawn_hosts
+ src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts
scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: fe8f0864500b54cc7c9f897b871ba2cdf1d37096
-- name: ceph-osd
+ version: f69e101b9191682986272b766747f107b8a7a136
+- name: systemd_service
+ src: https://git.openstack.org/openstack/ansible-role-systemd_service
scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: e022d6773bc827e75ad051b429dec786a75d68f4
-- name: opendaylight
+ version: a085a50c338b2eeaa87ed50eaaa22564d7c12968
+- name: systemd_mount
+ src: https://git.openstack.org/openstack/ansible-role-systemd_mount
scm: git
- src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: ef1367ad15ad10ac8cc9416f6fd49fd8b350d377
-- name: haproxy_endpoints
+ version: ee6263b3ce6502712ff4d6fb56474066df1773e4
+- name: systemd_networkd
+ src: https://git.openstack.org/openstack/ansible-role-systemd_networkd
scm: git
- src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
+ version: b024d0a3d97caf06b962a1f19450511b108dc5eb
+- name: python_venv_build
+ src: https://git.openstack.org/openstack/ansible-role-python_venv_build
+ scm: git
+ version: 5fdd8e00633f28606fc531a449d741e8c772a9fc
diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt
index aa3b1169..ec198a79 100644
--- a/xci/installer/osa/files/global-requirement-pins.txt
+++ b/xci/installer/osa/files/global-requirement-pins.txt
@@ -5,10 +5,17 @@
#
# Use this file with caution!
#
+
+### Pinned for gnocchi's dependency pycradox
+# https://github.com/sileht/pycradox/commit/2209f89fd65ecf31bea8eac6405acce2543e7b84
+Cython<0.28
+
###
### These are pinned to ensure exactly the same behaviour forever! ###
### These pins are updated through the sources-branch-updater script ###
###
-pip==9.0.1
-setuptools==36.6.0
-wheel==0.30.0
+# Bumping pip to version 10 fails in tempest when trying to install
+# packages with an empty list.
+pip==18.0
+setuptools==40.0.0
+wheel==0.31.1
diff --git a/xci/installer/osa/files/ha/flavor-vars.yml b/xci/installer/osa/files/ha/flavor-vars.yml
deleted file mode 100644
index 167502c9..00000000
--- a/xci/installer/osa/files/ha/flavor-vars.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'controller01': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'controller02': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.6',
- 'MGMT_IP': '172.29.236.14',
- 'VXLAN_IP': '172.29.240.14',
- 'STORAGE_IP': '172.29.244.14'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.7',
- 'MGMT_IP': '172.29.236.15',
- 'VXLAN_IP': '172.29.240.15',
- 'STORAGE_IP': '172.29.244.15'
- }
-}
diff --git a/xci/installer/osa/files/ha/inventory b/xci/installer/osa/files/ha/inventory
deleted file mode 100644
index 94b1d074..00000000
--- a/xci/installer/osa/files/ha/inventory
+++ /dev/null
@@ -1,11 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml
index 360aa5cb..dc2ec183 100644
--- a/xci/installer/osa/files/ha/openstack_user_config.yml
+++ b/xci/installer/osa/files/ha/openstack_user_config.yml
@@ -77,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -97,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -115,18 +115,18 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -139,27 +139,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -170,43 +170,43 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -225,10 +225,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -238,10 +238,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -251,5 +251,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/xci/installer/osa/files/ha/user_variables.yml b/xci/installer/osa/files/ha/user_variables.yml
index 72960a01..8c2e9f0c 100644
--- a/xci/installer/osa/files/ha/user_variables.yml
+++ b/xci/installer/osa/files/ha/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/mini/flavor-vars.yml b/xci/installer/osa/files/mini/flavor-vars.yml
deleted file mode 100644
index 0d446ba2..00000000
--- a/xci/installer/osa/files/mini/flavor-vars.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
-}
diff --git a/xci/installer/osa/files/mini/inventory b/xci/installer/osa/files/mini/inventory
deleted file mode 100644
index eb73e5e3..00000000
--- a/xci/installer/osa/files/mini/inventory
+++ /dev/null
@@ -1,8 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
diff --git a/xci/installer/osa/files/mini/user_variables.yml b/xci/installer/osa/files/mini/user_variables.yml
index 9ec9e405..b4d847bc 100644
--- a/xci/installer/osa/files/mini/user_variables.yml
+++ b/xci/installer/osa/files/mini/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
deleted file mode 100644
index 3c69a34b..00000000
--- a/xci/installer/osa/files/noha/flavor-vars.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- }
-}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
deleted file mode 100644
index b4f9f6d0..00000000
--- a/xci/installer/osa/files/noha/inventory
+++ /dev/null
@@ -1,9 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
index 66573428..5e7ed83c 100644
--- a/xci/installer/osa/files/noha/user_variables.yml
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -21,6 +21,9 @@
# # Debug and Verbose options.
debug: false
+# package_state: present should give us a better chance to finish
+package_state: present
+
# Allow root logins
security_sshd_permit_root_login: yes
@@ -62,6 +65,7 @@ barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
+cinder_volume_clear: none
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
@@ -159,7 +163,6 @@ openrc_nova_endpoint_type: "publicURL"
openrc_os_endpoint_type: "publicURL"
openrc_clouds_yml_interface: "public"
openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+openrc_insecure: true
keystone_service_adminuri_insecure: true
keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml
index 86501634..64718e33 100644
--- a/xci/installer/osa/files/openstack_services.yml
+++ b/xci/installer/osa/files/openstack_services.yml
@@ -31,192 +31,270 @@
## Global Requirements
requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 691711c0effddd9cbaaadba3d494c15bc422fdd5 # HEAD of "master" as of 24.11.2017
+requirements_git_install_branch: 32f8fa388d3b8367320a3308a350f28254a82d65 # HEAD of "stable/rocky" as of 11.12.2018
+requirements_git_track_branch: stable/rocky
## Aodh service
aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: 359043dc774be847cb539d18d13e336d40453e72 # HEAD of "master" as of 24.11.2017
+aodh_git_install_branch: ae5e710cd5ade867ebd0e6666bad95f82d130210 # HEAD of "stable/rocky" as of 11.12.2018
aodh_git_project_group: aodh_all
+aodh_git_track_branch: stable/rocky
## Barbican service
barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 5617d605f2e12840933e4a9d6417912cdbb811d5 # HEAD of "master" as of 24.11.2017
+barbican_git_install_branch: 0a1a9917e791d0c6fc8534a052700af5f5cbe9d0 # HEAD of "stable/rocky" as of 11.12.2018
barbican_git_project_group: barbican_all
+barbican_git_track_branch: stable/rocky
## Ceilometer service
ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: bd464f1f572ba150f52e284de430d13045dc6c18 # HEAD of "master" as of 24.11.2017
-ceilometer_git_project_group: ceilometer_all
+ceilometer_git_install_branch: 018ff32fe0200a041297c386eb8b381f1bec0e71 # HEAD of "stable/rocky" as of 11.12.2018
+ceilometer_git_project_group: all
+ceilometer_git_track_branch: stable/rocky
## Cinder service
cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 80558687d0fa55f2adf699e7369ebe3dbc3591bf # HEAD of "master" as of 24.11.2017
+cinder_git_install_branch: 8dbf5d7882a6271514a3075a02cd080e44b709d5 # HEAD of "stable/rocky" as of 11.12.2018
cinder_git_project_group: cinder_all
+cinder_git_track_branch: stable/rocky
## Designate service
designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 2f75586379e8d611f37e06d385e79d0bc2c84ca1 # HEAD of "master" as of 24.11.2017
+designate_git_install_branch: af1bb8a36a704bb1a226fe5154f828e152ef23e1 # HEAD of "stable/rocky" as of 11.12.2018
designate_git_project_group: designate_all
+designate_git_track_branch: stable/rocky
## Horizon Designate dashboard plugin
designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 571e127e5f853aa4dbdd377d831e32f8ff81eafe # HEAD of "master" as of 24.11.2017
+designate_dashboard_git_install_branch: faa67c87ad3cd5563da722f13b3adaee5bfe350f # HEAD of "stable/rocky" as of 11.12.2018
designate_dashboard_git_project_group: horizon_all
+designate_dashboard_git_track_branch: stable/rocky
## Dragonflow service
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 7bf00cf315659252f03f6c65f6159a924da6f978 # HEAD of "master" as of 24.11.2017
+dragonflow_git_install_branch: 945b1e368c651ffa3655f42df724d9f13a7b6b96 # FROZEN HEAD of "master" as of 17.08.2018
dragonflow_git_project_group: neutron_all
+dragonflow_git_track_branch: None
## Glance service
glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: d88bd2ca8ef95810441dae640d3c6b9e79eca353 # HEAD of "master" as of 24.11.2017
+glance_git_install_branch: 4982c24f0aeb64f9d20159e543a90e31fc325dce # HEAD of "stable/rocky" as of 11.12.2018
glance_git_project_group: glance_all
+glance_git_track_branch: stable/rocky
## Heat service
heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: f4a06c2a92a361dbb401107b4ea1ab60972f473e # HEAD of "master" as of 24.11.2017
+heat_git_install_branch: 98eea44d5d91b74e1ab28c052e4fbc4b533d5f83 # HEAD of "stable/rocky" as of 11.12.2018
heat_git_project_group: heat_all
+heat_git_track_branch: stable/rocky
+## Horizon Heat dashboard plugin
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
+heat_dashboard_git_repo: https://git.openstack.org/openstack/heat-dashboard
+heat_dashboard_git_install_branch: bc7f5068bbb6f7974eaffa2d865a859ff0fd0069 # FROZEN HEAD of "master" as of 17.08.2018
+heat_dashboard_git_project_group: horizon_all
+heat_dashboard_git_track_branch: None
## Horizon service
horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 846d269d90e01e463b510474040e0ad984a5679f # HEAD of "master" as of 24.11.2017
+horizon_git_install_branch: 0ccfce882749998f3a6a7f9bfc6fa74ea346ca53 # HEAD of "stable/rocky" as of 11.12.2018
horizon_git_project_group: horizon_all
+horizon_git_track_branch: stable/rocky
## Horizon Ironic dashboard plugin
ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: d6199d51171e6c8700663b0b0618ee0adf033b4d # HEAD of "master" as of 24.11.2017
+ironic_dashboard_git_install_branch: c700f3a613f3d78875caf7588e7bdf42a5db83cb # HEAD of "stable/rocky" as of 11.12.2018
ironic_dashboard_git_project_group: horizon_all
+ironic_dashboard_git_track_branch: stable/rocky
## Horizon Magnum dashboard plugin
magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 6160d903fae9c652b459c93c218e0ea75924a85d # HEAD of "master" as of 24.11.2017
+magnum_dashboard_git_install_branch: 2e9cb253eaee45a57f07369e432369dbff8fc173 # HEAD of "stable/rocky" as of 11.12.2018
magnum_dashboard_git_project_group: horizon_all
+magnum_dashboard_git_track_branch: stable/rocky
## Horizon LBaaS dashboard plugin
neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: ef650294bcc7447d441e6a710c39d64e384e1b27 # HEAD of "master" as of 24.11.2017
+neutron_lbaas_dashboard_git_install_branch: 84fd20a474e8165ddbf5cf4bd14b7eb7da63ed41 # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_dashboard_git_project_group: horizon_all
+neutron_lbaas_dashboard_git_track_branch: stable/rocky
## Horizon FWaaS dashboard plugin
neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: 6de122d4753a6db24d2dc4c22a71e702ed980e82 # HEAD of "master" as of 24.11.2017
+neutron_fwaas_dashboard_git_install_branch: 4adf5599211ef90696da94b2fee3aac730f3b7bc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_dashboard_git_project_group: horizon_all
+neutron_fwaas_dashboard_git_track_branch: stable/rocky
## Horizon Sahara dashboard plugin
sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 3e5c59e6229dac8b303029058fcee9d61200ebc8 # HEAD of "master" as of 24.11.2017
+sahara_dashboard_git_install_branch: 6e3f7538ce7779612d8e82b069597c06c2225a77 # HEAD of "stable/rocky" as of 11.12.2018
sahara_dashboard_git_project_group: horizon_all
+sahara_dashboard_git_track_branch: stable/rocky
## Keystone service
keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: 70fe4ec09b55def21361a32c8fa7f12e7c891ab1 # HEAD of "master" as of 24.11.2017
+keystone_git_install_branch: 295ccda8190b39a505c397d2f4d9e4896dc538cf # HEAD of "stable/rocky" as of 11.12.2018
keystone_git_project_group: keystone_all
+keystone_git_track_branch: stable/rocky
## Neutron service
neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: d1277c1630570ca45b490c48371e3f7e97be78c3 # HEAD of "master" as of 24.11.2017
+neutron_git_install_branch: ae2ef681403d1f103170ea70df1010f006244752 # HEAD of "stable/rocky" as of 11.12.2018
neutron_git_project_group: neutron_all
+neutron_git_track_branch: stable/rocky
neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: b1123e7a759248dfa63afdf8b86aafd692572ebd # HEAD of "master" as of 24.11.2017
+neutron_lbaas_git_install_branch: 1353bad713fd97418a9984016da49df8cfa8825b # HEAD of "stable/rocky" as of 11.12.2018
neutron_lbaas_git_project_group: neutron_all
+neutron_lbaas_git_track_branch: stable/rocky
neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 79e4eb81dd05588bcf68b92d46c62f0d26153542 # HEAD of "master" as of 24.11.2017
+neutron_vpnaas_git_install_branch: 0876f4dfe7e2f57305110e035efa753bfb711a3f # HEAD of "stable/rocky" as of 11.12.2018
neutron_vpnaas_git_project_group: neutron_all
+neutron_vpnaas_git_track_branch: stable/rocky
neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 74eac2ca2980e6162d9c88ee6bd48830386c392a # HEAD of "master" as of 24.11.2017
+neutron_fwaas_git_install_branch: 5ece265b65247ee81a9335d5a685fa9f0a68b0fc # HEAD of "stable/rocky" as of 11.12.2018
neutron_fwaas_git_project_group: neutron_all
+neutron_fwaas_git_track_branch: stable/rocky
neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 183c3fa4840d22be1974534eb9e1b28b552f4a42 # HEAD of "master" as of 24.11.2017
+neutron_dynamic_routing_git_install_branch: ae3a01ca1fd6270fc27b3c6bae11afc0f17563d5 # HEAD of "stable/rocky" as of 11.12.2018
neutron_dynamic_routing_git_project_group: neutron_all
+neutron_dynamic_routing_git_track_branch: stable/rocky
+# Networking Calico is following master
networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 24.11.2017
+networking_calico_git_install_branch: 79c7e00360ddb5fd3c38e60e5bbb3399928d9172 # HEAD of "master" as of 11.12.2018
networking_calico_git_project_group: neutron_all
+networking_calico_git_track_branch: stable/rocky
+
+networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
+networking_odl_git_install_branch: 1cef1f0939a405eea4cb87e712794e8fa26b5166 # HEAD of "stable/rocky" as of 11.12.2018
+networking_odl_git_project_group: neutron_all
+networking_odl_git_track_branch: stable/rocky
+
+networking_ovn_git_repo: https://git.openstack.org/openstack/networking-ovn
+networking_ovn_git_install_branch: e077aa93b1dc244b59864236d7c673f852e4e3ba # HEAD of "stable/rocky" as of 11.12.2018
+networking_ovn_git_project_group: neutron_all
+
+# BGPVPN is frozen until further notice due to
+# https://github.com/openstack/networking-bgpvpn/commit/e9a0ea199b47f76f69545e04bdb4db44869c388b#diff-b4ef698db8ca845e5845c4618278f29a
+networking_bgpvpn_git_repo: https://git.openstack.org/openstack/networking-bgpvpn
+networking_bgpvpn_git_install_branch: 3b93ddacd390d92fb144e5660324d4da064ad9a4 # FROZEN HEAD of "stable/rocky" as of 31.03.2018
+networking_bgpvpn_git_project_group: neutron_all
+networking_bgpvpn_git_track_branch: None
+
+networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
+networking_sfc_git_install_branch: f0eddef3d53bbad417038f9d32b196ace2ebd0b2 # HEAD of "stable/rocky" as of 11.12.2018
+networking_sfc_git_project_group: neutron_all
+networking_sfc_git_track_branch: stable/rocky
+
## Nova service
nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 22a790ef45b0523e8cf2ed97d14e050431c90fd9 # HEAD of "master" as of 24.11.2017
+nova_git_install_branch: 8066142a1e381536291232250b3237e5c01ed1f4 # HEAD of "stable/rocky" as of 11.12.2018
nova_git_project_group: nova_all
+nova_git_track_branch: stable/rocky
## PowerVM Virt Driver
nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: f2de4441e39b0f66cf31f854b228e9e7037f04de # HEAD of "master" as of 24.11.2017
+nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 11.12.2018
nova_powervm_git_project_group: nova_all
+nova_powervm_git_track_branch: stable/rocky
## LXD Virt Driver
+# please update the branch (sha and update the comment) when stable/rocky is branched on this repo.
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: e498de603b31c189fd32a6067d45a36575b96b0a # HEAD of "master" as of 24.11.2017
+nova_lxd_git_install_branch: bc8d540c95b3209321658000fd74b0e5065a7ee2 # FROZEN HEAD of "master" as of 17.08.2018
nova_lxd_git_project_group: nova_all
+nova_lxd_git_track_branch: None
## Sahara service
sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 395856c513b1efad82db8fa78fb1cbfe0f3a6749 # HEAD of "master" as of 24.11.2017
+sahara_git_install_branch: ddb518fd81b82308bdd01e58ebf6ed7a48c544ae # HEAD of "stable/rocky" as of 11.12.2018
sahara_git_project_group: sahara_all
+sahara_git_track_branch: stable/rocky
## Swift service
swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 3135878d2fe9909f49fcadeeb9cc6c6933d06127 # HEAD of "master" as of 24.11.2017
+swift_git_install_branch: 7fdf66ab70da705774a4ae9c328a3e762bb2f3b4 # HEAD of "stable/rocky" as of 11.12.2018
swift_git_project_group: swift_all
+swift_git_track_branch: stable/rocky
## Swift3 middleware
+# please remove this when swift role is configured without this middleware (and uses swift code only)
swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 24.11.2017
+swift_swift3_git_install_branch: 90db5d1510b2a770387961e7bf0fbeae8101ba45 # FROZEN HEAD of "master" as of 17.08.2018
swift_swift3_git_project_group: swift_all
+swift_swift3_git_track_branch: None
## Ironic service
ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: 27ce77142bfb9ac56e85db37e0923a0eb47f2f7a # HEAD of "master" as of 24.11.2017
+ironic_git_install_branch: 6a6c0d882fe8ac299d18df75d2bbd111b170ad48 # HEAD of "stable/rocky" as of 11.12.2018
ironic_git_project_group: ironic_all
+ironic_git_track_branch: stable/rocky
+
## Magnum service
magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 4bf3b3263870a4ec81cf372713cacec446b3ee84 # HEAD of "master" as of 24.11.2017
+magnum_git_install_branch: 765e207a5d3a45b8523cb2c34e5d74541da481e6 # HEAD of "stable/rocky" as of 11.12.2018
magnum_git_project_group: magnum_all
+magnum_git_track_branch: stable/rocky
+
## Trove service
trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: b09d0eb3135047891a369d3c0eb2c6e9ae649f5b # HEAD of "master" as of 24.11.2017
+trove_git_install_branch: 2953676e81fc22099e72ea7d0f27002a59aa779f # HEAD of "stable/rocky" as of 11.12.2018
trove_git_project_group: trove_all
+trove_git_track_branch: stable/rocky
## Horizon Trove dashboard plugin
trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 14a4609606d42cae827b8fc6b44453caea258976 # HEAD of "master" as of 24.11.2017
+trove_dashboard_git_install_branch: c6482d8f7ebeb980a99cc89593245be381675984 # HEAD of "stable/rocky" as of 11.12.2018
trove_dashboard_git_project_group: horizon_all
+trove_dashboard_git_track_branch: stable/rocky
+
## Octavia service
octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: bb9bb2d05b268cff9846e0a09ad3940be5fe5a80 # HEAD of "master" as of 24.11.2017
+octavia_git_install_branch: ec4c88e23ebeb786491158682f9a7dd42928f97a # HEAD of "stable/rocky" as of 12.14.2018
octavia_git_project_group: octavia_all
+octavia_git_track_branch: stable/rocky
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 24.11.2017
-molteniron_git_project_group: molteniron_all
## Tacker service
tacker_git_repo: https://git.openstack.org/openstack/tacker
-tacker_git_install_branch: cc03b5d952527b8cad2e2e309a97d55afb1ca559 # HEAD of "master" as of 24.11.2017
+tacker_git_install_branch: 279b1a2840b9f28377476e0d11ca83ce2e88a0b2 # HEAD of "stable/rocky" as of 11.12.2018
tacker_git_project_group: tacker_all
+tacker_git_track_branch: stable/rocky
+
+## Congress service
+congress_git_repo: https://git.openstack.org/openstack/congress
+congress_git_install_branch: 6862ac9f356a5403e1e37050e12f032f661bae96 # HEAD of "stable/rocky" as of 11.12.2018
+congress_git_project_group: congress_all
+congress_git_track_branch: stable/rocky
+
+## Horizon Octavia dashboard plugin
+octavia_dashboard_git_repo: https://git.openstack.org/openstack/octavia-dashboard
+octavia_dashboard_git_install_branch: 80766f9390492c24de38911d7240c5490c7ef562 # HEAD of "stable/rocky" as of 11.12.2018
+octavia_dashboard_git_project_group: horizon_all
+octavia_dashboard_git_track_branch: stable/rocky
diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml
index c2cb1c79..904215b7 100644
--- a/xci/installer/osa/files/setup-openstack.yml
+++ b/xci/installer/osa/files/setup-openstack.yml
@@ -19,9 +19,13 @@
- include: os-nova-install.yml
- include: os-neutron-install.yml
- include: os-heat-install.yml
+- include: os-ceilometer-install.yml
- include: os-horizon-install.yml
+ when: not core_openstack | default(False)
- include: os-swift-install.yml
- include: os-ironic-install.yml
+ when: not core_openstack | default(False)
+- include: os-barbican-install.yml
- include: os-tacker-install.yml
- include: os-tempest-install.yml
when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/xci/installer/osa/files/user_variables_proxy.yml b/xci/installer/osa/files/user_variables_proxy.yml
new file mode 100644
index 00000000..d25c3181
--- /dev/null
+++ b/xci/installer/osa/files/user_variables_proxy.yml
@@ -0,0 +1,22 @@
+---
+# Copyright 2018, Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+http_proxy_env_url: ""
+no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
+global_environment_variables:
+ HTTP_PROXY: "{{ http_proxy_env_url }}"
+ NO_PROXY: "{{ no_proxy_env }}"
+ http_proxy: "{{ http_proxy_env_url }}"
+ no_proxy: "{{ no_proxy_env }}"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml b/xci/installer/osa/files/user_variables_xci.yml
index 25cd6839..1d69f532 100644
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml
+++ b/xci/installer/osa/files/user_variables_xci.yml
@@ -1,5 +1,5 @@
---
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright 2018, SUSE LINUX GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Enable clustering for opendaylight
-cluster: true \ No newline at end of file
+debug: False
+install_method: source
diff --git a/xci/installer/osa/playbooks/bootstrap-scenarios.yml b/xci/installer/osa/playbooks/bootstrap-scenarios.yml
deleted file mode 100644
index 98acf73b..00000000
--- a/xci/installer/osa/playbooks/bootstrap-scenarios.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-#
-# This file is aimed to be used by scenarios to plug into the XCI.
-# Ideally, all they need to do at this point is to include their
-# role using a statement like the following one
-#
-# - name: Include foobar role
-# include_role:
-# name: "foobar"
-# when: DEPLOY_SCENARIO == "foobar"
-
-- name: Prepare everything to run the os-nosdn-nofeature scenario
- include_role:
- name: "os-nosdn-nofeature"
- when: DEPLOY_SCENARIO == 'os-nosdn-nofeature'
-- name: Prepare everything to run the os-odl-nofeature scenario
- include_role:
- name: "os-odl-nofeature"
- when: DEPLOY_SCENARIO == 'os-odl-nofeature'
-- name: Prepare everything to run the os-odl-sfc scenario
- include_role:
- name: "os-odl-sfc"
- when: DEPLOY_SCENARIO == 'os-odl-sfc'
diff --git a/xci/installer/osa/playbooks/configure-localhost.yml b/xci/installer/osa/playbooks/configure-localhost.yml
deleted file mode 100644
index caa5d673..00000000
--- a/xci/installer/osa/playbooks/configure-localhost.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
- connection: local
-
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: "{{ item }}"
- failed_when: false
- with_items:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
-
- - name: cleanup leftovers of previous deployment
- file:
- path: "{{ item }}"
- state: absent
- recurse: no
- with_items:
- - "{{ XCI_CACHE }}/repos"
- - "{{ LOG_PATH }} "
- - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
-
- roles:
- - role: clone-repository
- project: "openstack/openstack-ansible-openstack_openrc"
- repo: "{{ OPENSTACK_OSA_OPENRC_GIT_URL }}"
- dest: roles/openstack-ansible-openstack_openrc
- version: "master"
- - role: clone-repository
- project: "openstack/openstack-ansible"
- repo: "{{ OPENSTACK_OSA_GIT_URL }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- version: "{{ OPENSTACK_OSA_VERSION }}"
-
- tasks:
- - name: create log directory {{LOG_PATH}}
- file:
- path: "{{LOG_PATH}}"
- state: directory
- recurse: no
- - name: check if certificate directory /etc/ssl/certs exists already
- stat: path=/etc/ssl/certs
- register: check_etc_ssl_certs
- - name: create certificate directory /etc/ssl/certs
- become: true
- file:
- path: "/etc/ssl/certs"
- state: directory
- when: check_etc_ssl_certs.stat.exists == false
- - name: create key directory /etc/ssl/private
- become: true
- file:
- path: "/etc/ssl/private"
- state: directory
- - name: generate self signed certificate
- command: openssl req -new -nodes -x509 -subj "{{ XCI_SSL_SUBJECT }}" -days 3650 -keyout "/etc/ssl/private/xci.key" -out "/etc/ssl/certs/xci.crt" -extensions v3_ca
- become: true
- - name: Synchronize local development OSA repository to XCI paths
- # command module is much faster than the copy module
- synchronize:
- src: "{{ OPENSTACK_OSA_DEV_PATH }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- recursive: yes
- delete: yes
- when:
- - OPENSTACK_OSA_DEV_PATH != ""
diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml
index de922d3c..07ad683b 100644
--- a/xci/installer/osa/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml
@@ -10,176 +10,190 @@
- hosts: opnfv
remote_user: root
vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ item }}"
with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
- name: Set facts for remote deployment
set_fact:
remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
- remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{XCI_INSTALLER}}/files/{{ XCI_FLAVOR }}"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{installer_type}}/files/{{ xci_flavor }}"
remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
roles:
- - role: configure-network
- when: XCI_FLAVOR != "aio"
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
tasks:
- - name: generate SSH keys
- shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
args:
- creates: "{{ ansible_env.HOME }}/.ssh/id_rsa"
- - name: fetch public key
- fetch:
- src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
- dest: "{{ XCI_PATH }}/xci/files/authorized_keys"
- flat: yes
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
- name: Copy releng-xci to remote host
synchronize:
- src: "{{ XCI_PATH }}/"
+ archive: yes
+ src: "{{ xci_path }}/"
dest: "{{ remote_xci_path }}"
- recursive: yes
delete: yes
- - name: copy flavor inventory
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/inventory {{ remote_xci_playbooks }}"
- - name: copy openstack_deploy
- shell: "/bin/cp -rf {{OPENSTACK_OSA_PATH}}/etc/openstack_deploy {{OPENSTACK_OSA_ETC_PATH}}"
- - name: copy openstack_user_config.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/openstack_user_config.yml {{OPENSTACK_OSA_ETC_PATH}}"
- failed_when: false
- - name: copy all user override files
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables.yml {{OPENSTACK_OSA_ETC_PATH}}"
- failed_when: false
- - name: copy cinder.yml
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/cinder.yml {{OPENSTACK_OSA_ETC_PATH}}/env.d"
- - name: Configure AIO tempest
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: Re-create OpenStack-Ansible /etc directory
+ file:
+ path: "{{ openstack_osa_etc_path }}"
+ state: "{{ item }}"
+ with_items:
+ - absent
+ - directory
+
+ - name: Remove upstream OpenStack-Ansible files
+ file:
+ path: "{{ openstack_osa_path }}/playbooks/{{ item }}"
+ state: absent
+ with_items:
+ - inventory
+ - setup-openstack.yml
+
+ - name: Copy OpenStack-Ansible configuration files
+ command: "/bin/cp -rf {{ item.src }} {{ item.dest }}"
+ args:
+ creates: "{{ item.dest }}/{{ item.src | basename }}"
+ with_items:
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/env.d", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/conf.d", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ openstack_osa_path }}/etc/openstack_deploy/user_secrets.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/openstack_user_config.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/user_variables.yml", dest: "{{ openstack_osa_etc_path }}" }
+ - { src: "{{ remote_xci_flavor_files }}/ceph.yml", dest: "{{ openstack_osa_etc_path }}/conf.d/", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_flavor_files }}/user_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_ceph.yml", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_flavor_files }}/user_variables_ceph.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_ceph.yml", cond: xci_ceph_enabled }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/cinder.yml", dest: "{{ openstack_osa_etc_path }}/env.d" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_xci.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_xci.yml" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/user_variables_proxy.yml", dest: "{{ openstack_osa_etc_path }}/user_variables_proxy.yml", cond: "{{ lookup('env', 'http_proxy') != '' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml", dest: "{{ openstack_osa_path }}/playbooks" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/ansible-role-requirements.yml", dest: "{{openstack_osa_path}}/ansible-role-requirements.yml", cond: "{{ openstack_osa_version != 'master' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/global-requirement-pins.txt", dest: "{{openstack_osa_path}}/global-requirement-pins.txt", cond: "{{ openstack_osa_version != 'master' }}" }
+ - { src: "{{ remote_xci_path }}/xci/installer/osa/files/openstack_services.yml", dest: "{{ openstack_osa_path }}/playbooks/defaults/repo_packages/openstack_services.yml", cond: "{{ openstack_osa_version != 'master' }}" }
+ when: item.cond is not defined or (item.cond is defined and item.cond | bool)
+ loop_control:
+ label: "{{ item.src }}"
+
+ - name: Configure OpenStack-Ansible components
lineinfile:
- path: "{{ OPENSTACK_OSA_ETC_PATH }}/user_variables.yml"
- line: "{{ item }}: {{ RUN_TEMPEST | bool }}"
+ path: "{{ openstack_osa_etc_path }}/user_variables.yml"
+ line: "{{ item.component }}: {{ item.value }}"
state: present
with_items:
- - "tempest_install"
- - "tempest_run"
- - block:
- - name: copy ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/conf.d/"
- - name: copy user_ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_ceph.yml"
- - name: copy user_variables_ceph.yml
- shell: "/bin/cp -rf {{ remote_xci_flavor_files }}/user_variables_ceph.yml {{OPENSTACK_OSA_ETC_PATH}}/user_variables_ceph.yml"
- when: XCI_CEPH_ENABLED == "true"
- # TODO: We need to get rid of this as soon as the issue is fixed upstream
- - name: change the haproxy state from disable to enable
- replace:
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-keystone-install.yml"
- regexp: '(\s+)haproxy_state: disabled'
- replace: '\1haproxy_state: enabled'
- - name: copy OPNFV OpenStack playbook
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/setup-openstack.yml {{OPENSTACK_OSA_PATH}}/playbooks"
- - name: copy pinned versions of OSA Roles and global requirements
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/{{ item }} {{OPENSTACK_OSA_PATH}}/{{ item }}"
- with_items:
- - "ansible-role-requirements.yml"
- - "global-requirement-pins.txt"
- when:
- - OPENSTACK_OSA_VERSION != "master"
- - name: copy pinned versions of OpenStack services
- shell: "/bin/cp -rf {{ remote_xci_path }}/xci/installer/osa/files/openstack_services.yml {{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
+ - { component: "tempest_install", value: "{{ run_tempest | bool }}" }
+ - { component: "tempest_run", value: "{{ run_tempest | bool }}" }
+ - { component: "core_openstack", value: "{{ core_openstack_install | bool }}" }
+
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
when:
- - OPENSTACK_OSA_VERSION != "master"
- - include: bootstrap-scenarios.yml
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
- name: bootstrap ansible on opnfv host
command: "/bin/bash ./scripts/bootstrap-ansible.sh"
args:
- chdir: "{{OPENSTACK_OSA_PATH}}"
- - name: install python Crypto module
- package:
- name: "{{ python_crypto_package_name }}"
- - name: install PyYAML
+ creates: "/usr/local/bin/openstack-ansible"
+ chdir: "{{openstack_osa_path}}"
+
+ - name: install opnfv pip required packages
pip:
- name: pyyaml
+ name: "{{ item }}"
state: present
- - name: generate password token
- command: "python pw-token-gen.py --file {{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
- args:
- chdir: "{{OPENSTACK_OSA_PATH}}/scripts"
- - name: check if certificate directory /etc/ssl/certs exists already
- stat: path=/etc/ssl/certs
- register: check_etc_ssl_certs
- - name: create certificate directory /etc/ssl/certs
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+ with_items:
+ - pyyaml
+ - python-neutronclient
+ - python-openstackclient
+ - name: Install ARA callback plugin in OSA virtualenv
+ pip:
+ name: ara
+ version: 0.16.4
+ state: present
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+ executable: '/opt/ansible-runtime/bin/pip'
+ - name: Determine ARA callback location
+ command: "/opt/ansible-runtime/bin/python -c 'import os,ara; print(os.path.dirname(ara.__file__))'"
+ changed_when: False
+ register: _ara_install_dir
+ - name: Create local Ansible plugins directory
file:
- path: "/etc/ssl/certs"
+ path: "{{ ansible_env.HOME }}/.ansible/plugins/callback/ara"
state: directory
- when: check_etc_ssl_certs.stat.exists == false
- - name: create key directory /etc/ssl/private
+ - name: Configure ARA callback
file:
- path: "/etc/ssl/private"
- state: directory
- - name: copy certificate to /etc/ssl/certs
- copy:
- src: "/etc/ssl/certs/xci.crt"
- dest: "/etc/ssl/certs/"
- - name: read remote key from /etc/ssl/private
- set_fact:
- xci_ssl_key: "{{ lookup('pipe', 'sudo cat /etc/ssl/private/xci.key' ) }}"
- - name: copy key to /etc/ssl/private
- copy:
- content: "{{ xci_ssl_key }}"
- dest: "/etc/ssl/private/xci.key"
- become: true
- - name: install opnfv required packages
- package:
- name: "{{ opnfv_required_packages }}"
- state: latest
- # Docker is needed for functest
- - name: Ensure Docker service is started and enabled
- service:
- name: "{{ docker_service_name }}"
- state: started
- enabled: yes
- - name: install opnfv required pip packages
- pip:
- name: "{{ opnfv_required_pip }}"
- state: present
-
-- hosts: localhost
- remote_user: root
-
- tasks:
- - name: Append public keys to authorized_keys
- shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/files/authorized_keys"
+ path: "{{ ansible_env.HOME }}/.ansible/plugins/callback/ara/callbacks"
+ src: "{{ _ara_install_dir.stdout }}/plugins/callbacks"
+ force: yes
+ state: link
+ - name: generate password token
+ command: "python pw-token-gen.py --file {{openstack_osa_etc_path}}/user_secrets.yml"
+ args:
+ chdir: "{{openstack_osa_path}}/scripts"
+ changed_when: True
-- hosts: opnfv
- remote_user: root
- vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
- pre_tasks:
- - name: Load distribution variables
+ - name: Reload OpenStack-Ansible variables
include_vars:
- file: "{{ item }}"
- failed_when: false
- with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/user_variables.yml"
- roles:
- - role: "openstack-ansible-openstack_openrc"
+ file: "{{ xci_flavor_ansible_file_path }}/user_variables.yml"
- tasks:
- - name: add extra insecure flag to generated openrc
- blockinfile:
- dest: "{{ ansible_env.HOME }}/openrc"
- block: |
- export OS_INSECURE=true
+ - name: Generate openrc
+ include_role:
+ name: "openstack-ansible-openstack_openrc"
- name: fetch generated openrc
fetch:
src: "{{ ansible_env.HOME }}/openrc"
- dest: "{{ XCI_PATH }}/.cache/openrc"
+ dest: "{{ xci_path }}/.cache/openrc"
flat: true
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osa/playbooks/configure-targethosts.yml b/xci/installer/osa/playbooks/configure-targethosts.yml
index fb43a920..dfa17696 100644
--- a/xci/installer/osa/playbooks/configure-targethosts.yml
+++ b/xci/installer/osa/playbooks/configure-targethosts.yml
@@ -1,49 +1,36 @@
---
-- hosts: all
- remote_user: root
- tasks:
- - name: add public key to host
- copy:
- src: "{{ XCI_PATH }}/xci/files/authorized_keys"
- dest: /root/.ssh/authorized_keys
-
-- hosts: controller
+- hosts: openstack
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
remote_user: root
vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/opnfv.yml"
pre_tasks:
- name: Load distribution variables
include_vars:
file: "{{ item }}"
with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- roles:
- - role: configure-network
- # we need to force sync time with ntp or the nodes will be out of sync timewise
- - role: synchronize-time
-
-- hosts: compute
- remote_user: root
- vars_files:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
-
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: "{{ item }}"
- with_items:
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
- - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml"
- roles:
- - role: configure-network
- # we need to force sync time with ntp or the nodes will be out of sync timewise
- - role: synchronize-time
- - role: configure-ceph
- when: XCI_CEPH_ENABLED == "true"
-
-- hosts: compute00
- remote_user: root
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+ - role: bootstrap-host
- role: configure-nfs
+ when:
+ - "'compute' in group_names"
+ - role: configure-ceph
+ when:
+ - xci_ceph_enabled == "true"
+ - "'compute' in group_names"
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osa/playbooks/post-deployment.yml b/xci/installer/osa/playbooks/post-deployment.yml
new file mode 100644
index 00000000..36c052c9
--- /dev/null
+++ b/xci/installer/osa/playbooks/post-deployment.yml
@@ -0,0 +1,66 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml"
+ - "{{ xci_path }}/xci/installer/osa/files/{{ xci_flavor }}/user_variables.yml"
+
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
+ HTTP_PROXY: "{{ lookup('env','http_proxy') }}"
+ HTTPS_PROXY: "{{ lookup('env','https_proxy') }}"
+ NO_PROXY: "{{ lookup('env','no_proxy') }}"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ roles:
+ - role: ruzickap.proxy_settings
+ proxy_settings_http_proxy: "{{ lookup('env','http_proxy') }}"
+ proxy_settings_https_proxy: "{{ lookup('env','https_proxy') }}"
+ proxy_settings_ftp_proxy: "{{ lookup('env','ftp_proxy') }}"
+ proxy_settings_no_proxy: "{{ lookup('env','no_proxy') }}"
+
+ tasks:
+ - name: "Configure http_proxy_env_url"
+ lineinfile:
+ path: "{{openstack_osa_etc_path}}/user_variables_proxy.yml"
+ regexp: "^http_proxy_env_url:.*"
+ line: "{{ 'http_proxy_env_url: ' + lookup('env','http_proxy') }}"
+ when:
+ - lookup('env','http_proxy') != ""
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/README b/xci/installer/osh/README
new file mode 100644
index 00000000..902ac10e
--- /dev/null
+++ b/xci/installer/osh/README
@@ -0,0 +1,50 @@
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. mini: One deployment host, 1 master host and 1 node host.
+ 2. noha: One deployment host, 1 master host and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:
+ - calico
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run openstack-helm
+
+ export INSTALLER_TYPE=osh
+ export DEPLOY_SCENARIO=k8-calico-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/osh/deploy.sh b/xci/installer/osh/deploy.sh
new file mode 100755
index 00000000..e56845b8
--- /dev/null
+++ b/xci/installer/osh/deploy.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o errexit
+set -o nounset
+set -o pipefail
+
+OSH_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i dynamic_inventory.py configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure installer
+#-------------------------------------------------------------------------------
+# TODO: summarize what this playbook does
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring kubespray installer"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured kubespray installer"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $OSH_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \
+ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \
+ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \
+ -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+
+
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} \
+ -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes installation is successfully completed!"
+echo "-----------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Execute post-installation tasks
+#-------------------------------------------------------------------------------
+# Playbook post.yml is used in order to execute any post-deployment tasks that
+# are required for the scenario under test.
+#-------------------------------------------------------------------------------
+# copy admin.conf
+ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\
+ cp -f ~/admin.conf ~/.kube/config;"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Running post-deployment scenario role"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ post-deployment.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Post-deployment scenario role execution done"
+echo "-----------------------------------------------------------------------"
+echo
+echo "Login opnfv host ssh root@$OPNFV_HOST_IP
+according to the user-guide to create a service
+https://kubernetes.io/docs/user-guide/walkthrough/k8s201/"
+echo
+echo "-----------------------------------------------------------------------"
+echo "Info: Kubernetes login details"
+echo "-----------------------------------------------------------------------"
+echo
+# Get the dashboard URL
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP;
+echo "Info: known_hosts entry for opnfv host from previous deployment found and deleted"
+fi
+DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard")
+DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}')
+KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config")
+echo "Info: Kubernetes Dashboard URL:"
+echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p"
+
+# Get the dashboard user and password
+MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}')
+if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP;
+then
+ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP;
+echo "Info: known_hosts entry for master host from previous deployment found and deleted"
+fi
+USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv")
+USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}')
+PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}')
+echo "Info: Dashboard username: ${USERNAME}"
+echo "Info: Dashboard password: ${PASSWORD}"
+
+echo "-----------------------------------------------------------------------"
+echo "Info: Continue with running the openstack-helm installation"
+echo "-----------------------------------------------------------------------"
+cd $OSH_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -v -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \
+ install-openstack-helm.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Openstack-helm installation execution done"
+echo "-----------------------------------------------------------------------"
+echo
+
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/installer/osh/files/ha/inventory/group_vars/all.yml b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/osh/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/osh/playbooks/configure-installer.yml b/xci/installer/osh/playbooks/configure-installer.yml
new file mode 100644
index 00000000..383f55fc
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-installer.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2019 Ericsson Software Technology and Others
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: delete existing kubespray/inventory/opnfv directory
+ file:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+ state: absent
+
+ - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv
+ copy:
+ src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/"
+ dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv"
+
+ - name: update kubespray k8s-cluster.yml for xci
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" }
+ - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" }
+ - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" }
+ - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" }
+
+# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile
+# module is that escaping curly braces does not work with with_items. what happens is that
+# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point
+# to home folder of the user executing this task at runtime.
+ - name: update kubespray artifacts_dir
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml"
+ regexp: "artifacts_dir:.*"
+ line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'"
+
+ - name: change dashboard server type to NodePort
+ lineinfile:
+ path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2"
+ insertafter: 'targetPort'
+ line: " type: NodePort"
diff --git a/xci/installer/osh/playbooks/configure-kubenet.yml b/xci/installer/osh/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..18a126c1
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-kubenet.yml
@@ -0,0 +1,51 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ remote_user: root
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario.find('k8-nosdn-') != -1
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/osh/playbooks/configure-opnfvhost.yml b/xci/installer/osh/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..52e42b06
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,101 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ roles:
+ - role: bootstrap-host
+ configure_network: xci_flavor != 'aio'
+
+ tasks:
+ - name: Create list of files to copy
+ shell: |
+ git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files
+ echo ".git/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files
+ echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files
+ args:
+ executable: /bin/bash
+ chdir: "{{ xci_path }}"
+ changed_when: False
+ delegate_to: 127.0.0.1
+ tags:
+ - skip_ansible_lint
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ archive: yes
+ src: "{{ xci_path }}/"
+ dest: "{{ remote_xci_path }}"
+ delete: yes
+ rsync_opts:
+ - "--recursive"
+ - "--files-from={{ xci_cache }}/releng-xci.files"
+
+ - name: link xci dynamic inventory to kubespray/inventory/opnfv directory
+ file:
+ src: "{{ remote_xci_playbooks }}/dynamic_inventory.py"
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py"
+ state: link
+
+ - name: Download kubectl and place it to /usr/local/bin
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl"
+ dest: /usr/local/bin/kubectl
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Prepare everything to run the {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+
+ - name: Install required packages
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ when: xci_flavor == 'aio'
+
+ - name: pip install required packages
+ pip:
+ name: "{{ item.name }}"
+ version: "{{ item.version | default(omit) }}"
+ with_items:
+ - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" }
+ - { name: 'netaddr' }
+ - { name: 'ansible-modules-hashivault' }
+
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/installer/osh/playbooks/configure-targethosts.yml b/xci/installer/osh/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..2fde9877
--- /dev/null
+++ b/xci/installer/osh/playbooks/configure-targethosts.yml
@@ -0,0 +1,40 @@
+---
+- hosts: k8s-cluster
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ roles:
+ - role: bootstrap-host
+
+ tasks:
+ - name: Manage SSH keys
+ include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
+
+ - name: Install dbus
+ package:
+ name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}"
+ state: present
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: xci_flavor == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: xci_flavor == 'ha'
diff --git a/xci/installer/osh/playbooks/group_vars/all.yml b/xci/installer/osh/playbooks/group_vars/all.yml
new file mode 100644
index 00000000..7453bdab
--- /dev/null
+++ b/xci/installer/osh/playbooks/group_vars/all.yml
@@ -0,0 +1,55 @@
+---
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/osh/playbooks/install-openstack-helm.yml b/xci/installer/osh/playbooks/install-openstack-helm.yml
new file mode 100644
index 00000000..a16572a5
--- /dev/null
+++ b/xci/installer/osh/playbooks/install-openstack-helm.yml
@@ -0,0 +1,24 @@
+---
+- hosts: kube-node
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ roles:
+ - role: prepare-kube-nodes-osh
+
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ roles:
+ - role: prepare-opnfvhost-osh
+ - role: prepare-osh
+ - role: install-osh-mini
+ when: xci_flavor == 'mini'
+ environment:
+ - CONTAINER_DISTRO_NAME: "{{ container_distro_name }}"
+ - CONTAINER_DISTRO_VERSION: "{{ container_distro_version }}"
+ - OPENSTACK_RELEASE: "{{ openstack_osh_version }}"
+ - role: install-osh-noha
+ when: xci_flavor == 'noha'
diff --git a/xci/installer/osh/playbooks/post-deployment.yml b/xci/installer/osh/playbooks/post-deployment.yml
new file mode 100644
index 00000000..5c2f7f36
--- /dev/null
+++ b/xci/installer/osh/playbooks/post-deployment.yml
@@ -0,0 +1,42 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}"
+
+ tasks:
+ - name: Reload XCI deployment host facts
+ setup:
+ filter: ansible_local
+ gather_subset: "!all"
+ delegate_to: 127.0.0.1
+
+ - name: Check if any post-deployment task defined for {{ deploy_scenario }} role
+ stat:
+ path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml"
+ register: post_deployment_yml
+
+ - name: Execute post-deployment tasks of {{ deploy_scenario }} role
+ include_role:
+ name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}"
+ tasks_from: post-deployment
+ when:
+ - post_deployment_yml.stat.exists
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
new file mode 100644
index 00000000..e5df54fa
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml
@@ -0,0 +1,109 @@
+---
+
+- name: Setup Clients
+ command: ./tools/deployment/common/setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/component/common/ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/component/common/mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/component/common/memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/component/common/rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Update nfs-provisioner helm-chart
+ shell: helm dependency update nfs-provisioner
+ args:
+ chdir: /root/repos/openstack-helm-infra
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+- name: Deploy nfs-provisioner
+ command: ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/component/keystone/keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/component/heat/heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/component/glance/glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/component/compute-kit/openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/component/compute-kit/libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/component/compute-kit/compute-kit.sh
+ regexp: 'tunnel: docker0'
+ line: ' tunnel: br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/component/compute-kit/compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
new file mode 100644
index 00000000..03c02a83
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright 2019, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cacheable: yes
+container_distro_name: "{{ (osh_distro=='opensuse') | ternary('opensuse', 'ubuntu') }}"
+container_distro_version: "{{ (osh_distro=='opensuse') | ternary('15', 'xenial') }}"
diff --git a/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
new file mode 100644
index 00000000..befdcfce
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Setup Clients
+ command: ./tools/deployment/multinode/010-setup-client.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy the ingress controller
+ command: ./tools/deployment/multinode/020-ingress.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Ceph
+ command: ./tools/deployment/multinode/030-ceph.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Activate the openstack namespace to be able to use Ceph
+ command: ./tools/deployment/multinode/040-ceph-ns-activate.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy MariaDB
+ command: ./tools/deployment/multinode/050-mariadb.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy RabbitMQ
+ command: ./tools/deployment/multinode/060-rabbitmq.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy memcached
+ command: ./tools/deployment/multinode/070-memcached.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Keystone
+ command: ./tools/deployment/multinode/080-keystone.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Horizon
+ command: ./tools/deployment/multinode/085-horizon.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Rados Gateway for object store
+ command: ./tools/deployment/multinode/090-ceph-radosgateway.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Glance
+ command: ./tools/deployment/multinode/100-glance.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Cinder
+ command: ./tools/deployment/multinode/110-cinder.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy OpenvSwitch
+ command: ./tools/deployment/multinode/120-openvswitch.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Libvirt
+ command: ./tools/deployment/multinode/130-libvirt.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Add br-vxlan as the tunnel interface
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/deployment/multinode/140-compute-kit.sh
+ regexp: 'NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"'
+ line: 'NETWORK_TUNNEL_DEV=br-vxlan'
+
+- name: Deploy Compute Kit (Nova and Neutron)
+ command: ./tools/deployment/multinode/140-compute-kit.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Heat
+ command: ./tools/deployment/multinode/150-heat.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Deploy Barbican
+ command: ./tools/deployment/multinode/160-barbican.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Copy script to the worker node
+ command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh"
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
+
+- name: Setup the gateway to the public network at worker node
+ command: /root/170-setup-gateway.sh
+ changed_when: false
+ delegate_to: node1
+
+- name: Add a route from opnfv to worker node for the public network
+ command: ip route add 172.24.4.0/24 via 192.168.122.4
+ changed_when: false
+
+# Deployment validation
+- name: Exercise the cloud
+ command: ./tools/deployment/developer/common/900-use-it.sh
+ changed_when: false
+ args:
+ chdir: /root/repos/openstack-helm
diff --git a/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
new file mode 100644
index 00000000..ff0aff60
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Install packages in kubernetes nodes
+ package:
+ name: "{{ packages }}"
+ state: present
+ changed_when: false
+ vars:
+ packages:
+ - ceph-common
+ - rbd-nbd
+ - apparmor
+ - nfs-common
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
new file mode 100644
index 00000000..c3988d6f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Helm Server
+After=network.target
+
+[Service]
+User=root
+Restart=always
+ExecStart=/usr/bin/helm serve
+
+[Install]
+WantedBy=multi-user.target
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
new file mode 100644
index 00000000..72ae821f
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+- name: Set kubernetes service account permissions
+ command: "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default"
+ changed_when: false
+
+- name: Set kubernetes node labels
+ command: "kubectl label nodes {{ item }} {{ node_labels[item]|join(' ') }}"
+ changed_when: false
+ with_items: "{{ groups['kube-node'] }}"
+
+- name: Create directories
+ file:
+ path: /root/{{ item }}
+ state: directory
+ with_items:
+ ['repos','tmp', '.helm/repository/local']
+
+- name: Rename bifrost clouds file to get it out of precedence
+ command: "mv .config/openstack/clouds.yaml .config/openstack/clouds.yaml.bifrost"
+ changed_when: false
+
+- name: Clone openstack-helm
+ git:
+ repo: "{{ osh_git_url }}"
+ dest: /root/repos/openstack-helm
+ version: "{{ osh_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Fix dns nameserver for openstack installation (mini flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tools/gate/files/heat-public-net-deployment.yaml
+ regexp: '10\.96\.0\.10'
+ line: " - 10.233.0.3"
+
+- name: Fix dns nameserver for openstack installation (noha flavor)
+ lineinfile:
+ path: /root/repos/openstack-helm/tempest/values.yaml
+ regexp: 'dns_servers'
+ line: " dns_servers: 10.233.0.3"
+
+- name: Clone openstack-helm-infra
+ git:
+ repo: "{{ osh_infra_git_url }}"
+ dest: /root/repos/openstack-helm-infra
+ version: "{{ osh_infra_version }}"
+ update: true
+ force: true
+ register: git_clone
+ until: git_clone is success
+ retries: 2
+ delay: 5
+
+- name: Get helm
+ get_url:
+ url: "{{ osh_helm_binary_url }}/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz"
+ dest: tmp
+
+- name: Uncompress helm package
+ command: "tar zxvf tmp/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz --strip-components=1 -C tmp/"
+ changed_when: false
+ tags:
+ - skip_ansible_lint
+
+- name: Put helm in system binaries
+ copy:
+ src: tmp/helm
+ dest: /usr/bin/helm
+ remote_src: yes
+ mode: 0755
+
+- name: Create helm-serve service file
+ copy:
+ src: helm-serve.service
+ dest: "/etc/systemd/system/helm-serve.service"
+ mode: 0640
+
+- name: Start helm-serve service
+ service:
+ name: helm-serve
+ state: started
+ enabled: yes
+
+- name: Wait for helm-serve service to start
+ wait_for:
+ port: 8879
+ host: 127.0.0.1
+
+- name: Install pyhelm
+ pip:
+ name: pyhelm
+
+- name: Init helm
+ command: "helm init"
+ changed_when: false
+
+- name: Remove stable (external) service from helm
+ command: "helm repo remove stable"
+ changed_when: false
+
+- name: Add local repositories service to helm
+ command: "helm repo add local http://localhost:8879/charts"
+ changed_when: false
+
+- name: Make charts from infra
+ make:
+ chdir: /root/repos/openstack-helm-infra
+ target: "{{ item }}"
+ with_items:
+ - helm-toolkit
+ - ingress
+ - mariadb
+ - rabbitmq
+ - memcached
+ - ceph-mon
+ - ceph-osd
+ - ceph-client
+ - ceph-provisioners
+ - ceph-rgw
+ - openvswitch
+ - libvirt
+
+- name: Install packages
+ package:
+ name: "{{ required_packages }}"
+ state: present
diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
new file mode 100644
index 00000000..979c3329
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml
@@ -0,0 +1,31 @@
+---
+required_packages:
+- patch
+- ipcalc
+- jq
+- nmap
+- bc
+
+node_labels:
+ node1:
+ - openstack-control-plane=enabled
+ - openstack-compute-node={{ (xci_flavor == 'mini') | ternary('enabled', 'disable') }}
+ - openstack-helm-node-class=primary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
+ node2:
+ - openstack-control-plane={{ (xci_flavor == 'noha') | ternary('disable', 'enabled') }}
+ - openstack-compute-node=enabled
+ - openstack-helm-node-class=secondary
+ - openvswitch=enabled
+ - linuxbridge=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-mds=enabled
+ - ceph-mgr=enabled
+ - ceph-rgw=enabled
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
new file mode 100644
index 00000000..453a815c
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+- name: Write new resolv.conf file
+ template:
+ src: resolv.conf.j2
+ dest: /etc/resolv.conf
+
+- name: Make resolv.conf immutable
+ shell: "chattr +i /etc/resolv.conf"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
+#TODO Fetch the value from a file generated by k8s deployer
+- name: Get kube service addresses
+ shell: "grep -r 'kube_service_addresses:' /root/releng-xci/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml | awk '{print $2}'"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ register: kube_service_addresses
+ tags:
+ - skip_ansible_lint
+
+#This rule allows openstack client in OPNFV VM to reach openstack
+- name: Update routing table with kube service addresses
+ shell: "ip route add {{ kube_service_addresses.stdout }} via 192.168.122.3 dev br-vlan onlink"
+ changed_when: false
+ args:
+ executable: /bin/bash
+ tags:
+ - skip_ansible_lint
+
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
new file mode 100644
index 00000000..ae706e02
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+{{ dns_var }}
+{% for nameserver in external_dns_nameservers %}
+nameserver {{ nameserver }}
+{% endfor %}
diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
new file mode 100644
index 00000000..4d6f9cbb
--- /dev/null
+++ b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml
@@ -0,0 +1,7 @@
+---
+kube_dns_ip: "10.233.0.3"
+external_dns_nameservers:
+- '{{kube_dns_ip}}'
+- '192.168.122.1'
+dns_var: "search svc.cluster.local cluster.local"
+
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index 929d88eb..98abf528 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -11,6 +11,59 @@
# OPNFV scenarios participating in XCI must create their own entry in this file so
# XCI can make use of them.
#
+
+# OpenStack based scenarios
+- scenario: os-nosdn-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
+ installers:
+ - installer: osa
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - opensuse
+ - ubuntu
+ - centos
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - opensuse
+ - ubuntu-bionic
+
+- scenario: os-nosdn-osm
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/os-nosdn-osm/role/os-nosdn-osm
+ installers:
+ - installer: osa
+ flavors:
+ - mini
+ distros:
+ - ubuntu
+
+- scenario: os-odl-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/os-odl-nofeature/role/os-odl-nofeature
+ installers:
+ - installer: osa
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - opensuse
+ - ubuntu
+
- scenario: os-odl-sfc
scm: git
src: https://gerrit.opnfv.org/gerrit/sfc
@@ -26,11 +79,11 @@
- opensuse
- ubuntu
-- scenario: os-nosdn-nofeature
+- scenario: os-odl-sfc_osm
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/sfc
version: master
- role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
+ role: scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm
installers:
- installer: osa
flavors:
@@ -38,14 +91,13 @@
- mini
- noha
distros:
- - opensuse
- ubuntu
-- scenario: os-odl-nofeature
+- scenario: os-odl-bgpvpn
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/sdnvpn
version: master
- role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature
+ role: scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn
installers:
- installer: osa
flavors:
@@ -53,5 +105,111 @@
- mini
- noha
distros:
+ - ubuntu
+ - centos
+
+
+# Kubernetes based scenarios
+- scenario: k8-nosdn-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
+- scenario: k8-canal-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-canal-nofeature/role/k8-canal-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
+- scenario: k8-calico-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-calico-nofeature/role/k8-calico-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
- opensuse
+ - installer: osh
+ flavors:
+ - mini
+ - noha
+ distros:
- ubuntu
+ - opensuse
+ - ubuntu-bionic
+
+- scenario: k8-flannel-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - noha
+ - mini
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
+- scenario: k8-contiv-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-contiv-nofeature/role/k8-contiv-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - noha
+ - mini
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
+
+- scenario: k8-nosdn-istio
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios
+ version: master
+ role: scenarios/k8-nosdn-istio/role/k8-nosdn-istio
+ installers:
+ - installer: kubespray
+ flavors:
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+ - opensuse
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
new file mode 100644
index 00000000..7aab18f3
--- /dev/null
+++ b/xci/playbooks/configure-localhost.yml
@@ -0,0 +1,116 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ failed_when: false
+ with_items:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml"
+
+ - name: cleanup leftovers of previous deployment
+ file:
+ path: "{{ item }}"
+ state: absent
+ recurse: no
+ with_items:
+ - "{{ log_path }} "
+ - "{{ opnfv_ssh_host_keys_path }}"
+
+ roles:
+ - role: clone-repository
+ project: "openstack/openstack-ansible-openstack_openrc"
+ repo: "{{ openstack_osa_openrc_git_url }}"
+ dest: roles/openstack-ansible-openstack_openrc
+ version: "master"
+ when: installer_type == "osa"
+ - role: clone-repository
+ project: "openstack/openstack-ansible"
+ repo: "{{ openstack_osa_git_url }}"
+ dest: "{{ xci_cache }}/repos/openstack-ansible"
+ version: "{{ openstack_osa_version }}"
+ when: installer_type == "osa"
+ - role: clone-repository
+ project: "kubernetes-incubator/kubespray"
+ repo: "{{ kubespray_git_url }}"
+ dest: "{{ xci_cache }}/repos/kubespray"
+ version: "{{ kubespray_version }}"
+ when: installer_type in ["kubespray", "osh"]
+ - role: clone-repository
+ project: "openstack/openstack-ansible-haproxy_server"
+ repo: "{{ openstack_osa_haproxy_git_url }}"
+ dest: roles/haproxy_server
+ version: "{{ haproxy_version }}"
+ when:
+ - installer_type == "kubespray" or installer_type == "osh"
+ - role: clone-repository
+ project: "ansible-keepalived"
+ repo: "{{ keepalived_git_url }}"
+ dest: roles/keepalived
+ version: "{{ keepalived_version }}"
+ when:
+ - installer_type == "kubespray" or installer_type == "osh"
+
+ tasks:
+ - name: create log directory {{log_path}}
+ file:
+ path: "{{log_path}}"
+ state: directory
+ recurse: no
+
+ - name: Synchronize local development OSA repository to XCI paths
+ # command module is much faster than the copy module
+ synchronize:
+ src: "{{ openstack_osa_dev_path }}"
+ dest: "{{ xci_cache }}/repos/openstack-ansible"
+ recursive: yes
+ delete: yes
+ when:
+ - openstack_osa_dev_path != ""
+ - installer_type == "osa"
+
+ - name: Configure SSH key for local user
+ user:
+ name: "{{ ansible_env.USER }}"
+ createhome: yes
+ home: "/home/{{ ansible_env.USER }}"
+ move_home: yes
+ shell: /bin/bash
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ ssh_key_file: .ssh/id_rsa
+ state: present
+
+ - name: Dump XCI execution environment to a file
+ shell: env > "{{ xci_path }}/.cache/xci.env"
+ args:
+ executable: /bin/bash
+ creates: "{{ xci_path }}/.cache/xci.env"
+
+ #TODO: Create an Ansible variable for
+ # kube_service_addresses(10.233.0.0/18)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 10.233.0.0/18 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
+
+ #Provide access to the external network (for tests)
+ - name: Update iptables
+ command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 172.24.4.0/24 -j RETURN"
+ become: true
+ tags:
+ - skip_ansible_lint
diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py
new file mode 100755
index 00000000..ed63141c
--- /dev/null
+++ b/xci/playbooks/dynamic_inventory.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# coding utf-8
+
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Based on https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/cobbler.py
+
+import argparse
+import glob
+import os
+import sys
+import yaml
+import json
+
+
+class XCIInventory(object):
+ """
+
+ Generates the ansible inventory based on the idf and pdf files provided
+ when executing the deployment script
+
+ """
+ def __init__(self):
+ super(XCIInventory, self).__init__()
+ self.inventory = {}
+ self.inventory['all'] = {}
+ self.inventory['all']['hosts'] = []
+ self.inventory['all']['vars'] = {}
+ self.inventory['_meta'] = {}
+ self.inventory['_meta']['hostvars'] = {}
+ self.installer = os.environ.get('INSTALLER_TYPE', 'osa')
+ self.flavor = os.environ.get('XCI_FLAVOR', 'mini')
+ self.flavor_files = os.path.dirname(os.path.realpath(__file__)) + "/../installer/" + self.installer + "/files/" + self.flavor
+
+ # Static information for opnfv host for now
+ self.add_host('opnfv')
+ self.add_hostvar('opnfv', 'ansible_host', '192.168.122.2')
+ self.add_hostvar('opnfv', 'ip', '192.168.122.2')
+ self.add_to_group('deployment', 'opnfv')
+ self.add_to_group('opnfv', 'opnfv')
+
+ self.opnfv_networks = {}
+ self.opnfv_networks['opnfv'] = {}
+ self.opnfv_networks['opnfv']['mgmt'] = {}
+ self.opnfv_networks['opnfv']['mgmt']['address'] = '172.29.236.10/22'
+ self.opnfv_networks['opnfv']['public'] = {}
+ self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24'
+ self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1'
+ self.opnfv_networks['opnfv']['public']['dns'] = ['192.168.122.1']
+ self.opnfv_networks['opnfv']['private'] = {}
+ self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22'
+ self.opnfv_networks['opnfv']['storage'] = {}
+ self.opnfv_networks['opnfv']['storage']['address'] = '172.29.244.10/24'
+
+ # Add localhost
+ self.add_host('deployment_host')
+ self.add_hostvar('deployment_host', 'ansible_ssh_host', '127.0.0.1')
+ self.add_hostvar('deployment_host', 'ansible_connection', 'local')
+
+ self.read_pdf_idf()
+
+ self.parse_args()
+
+ if self.args.host:
+ self.dump(self.get_host_info(self.args.host))
+ else:
+ self.dump(self.inventory)
+
+ def parse_args(self):
+ parser = argparse.ArgumentParser(description='Produce an Ansible inventory based on PDF/IDF XCI files')
+ parser.add_argument('--list', action='store_true', default=True, help='List XCI hosts (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific host')
+ self.args = parser.parse_args()
+
+ def read_pdf_idf(self):
+ pdf_file = os.environ['PDF']
+ idf_file = os.environ['IDF']
+ opnfv_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_pdf.yml"
+ opnfv_idf_file = os.path.dirname(os.path.realpath(__file__)) + "/../var/opnfv_vm_idf.yml"
+ nodes = []
+ host_networks = {}
+
+ with open(pdf_file) as f:
+ try:
+ pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(idf_file) as f:
+ try:
+ idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_file) as f:
+ try:
+ opnfv_pdf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+ with open(opnfv_idf_file) as f:
+ try:
+ opnfv_idf = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+
+
+ valid_host = (host for host in idf['xci']['installers'][self.installer]['nodes_roles'] \
+ if host in idf['xci']['flavors'][self.flavor] \
+ and host != 'opnfv')
+
+ for host in valid_host:
+ nodes.append(host)
+ hostname = idf['xci']['installers'][self.installer]['hostnames'][host]
+ self.add_host(hostname)
+ for role in idf['xci']['installers'][self.installer]['nodes_roles'][host]:
+ self.add_to_group(role, hostname)
+
+ pdf_host_info = list(filter(lambda x: x['name'] == host, pdf['nodes']))[0]
+ native_vlan_if = list(filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces']))
+ self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address'])
+ self.add_hostvar(hostname, 'ip', native_vlan_if[0]['address'])
+ host_networks[hostname] = {}
+ # And now record the rest of the information
+ for network, ndata in idf['idf']['net_config'].items():
+ network_interface_num = idf['idf']['net_config'][network]['interface']
+ host_networks[hostname][network] = {}
+ host_networks[hostname][network]['address'] = pdf_host_info['interfaces'][int(network_interface_num)]['address'] + "/" + str(ndata['mask'])
+ if 'gateway' in ndata.keys():
+ host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask'])
+ if 'dns' in ndata.keys():
+ host_networks[hostname][network]['dns'] = []
+ for d in ndata['dns']:
+ host_networks[hostname][network]['dns'].append(str(d))
+
+ # Get also vlan and mac_address from pdf
+ host_networks[hostname][network]['mac_address'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['mac_address'])
+ host_networks[hostname][network]['vlan'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['vlan'])
+
+ # Get also vlan and mac_address from opnfv_pdf
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ opnfv_mgmt = opnfv_pdf['opnfv_vm_pdf']['interfaces'][mgmt_idf_index]
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ opnfv_public = opnfv_pdf['opnfv_vm_pdf']['interfaces'][admin_idf_index]
+ self.opnfv_networks['opnfv']['mgmt']['mac_address'] = str(opnfv_mgmt['mac_address'])
+ self.opnfv_networks['opnfv']['mgmt']['vlan'] = str(opnfv_mgmt['vlan'])
+ self.opnfv_networks['opnfv']['public']['mac_address'] = str(opnfv_public['mac_address'])
+ self.opnfv_networks['opnfv']['public']['vlan'] = str(opnfv_public['vlan'])
+
+ # Add the interfaces from idf
+
+
+ host_networks.update(self.opnfv_networks)
+
+ self.add_groupvar('all', 'host_info', host_networks)
+
+ if 'deployment_host_interfaces' in idf['xci']['installers'][self.installer]['network']:
+ mgmt_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['mgmt']['interface'])
+ admin_idf_index = int(opnfv_idf['opnfv_vm_idf']['net_config']['admin']['interface'])
+ self.add_hostvar('deployment_host', 'network_interface_admin', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][admin_idf_index])
+ self.add_hostvar('deployment_host', 'network_interface_mgmt', idf['xci']['installers'][self.installer]['network']['deployment_host_interfaces'][mgmt_idf_index])
+
+ # Now add the additional groups
+ for parent in idf['xci']['installers'][self.installer]['groups'].keys():
+ for host in idf['xci']['installers'][self.installer]['groups'][parent]:
+ self.add_group(host, parent)
+
+ # Read additional group variables
+ self.read_additional_group_vars()
+
+ def read_additional_group_vars(self):
+ if not os.path.exists(self.flavor_files + "/inventory/group_vars"):
+ return
+ group_dir = self.flavor_files + "/inventory/group_vars/*.yml"
+ group_file = glob.glob(group_dir)
+ for g in group_file:
+ with open(g) as f:
+ try:
+ group_vars = yaml.safe_load(f)
+ except yaml.YAMLError as e:
+ print(e)
+ sys.exit(1)
+ for k,v in group_vars.items():
+ self.add_groupvar(os.path.basename(g.replace('.yml', '')), k, v)
+
+ def dump(self, data):
+ print (json.dumps(data, sort_keys=True, indent=2))
+
+ def add_host(self, host):
+ self.inventory['all']['hosts'].append(host)
+
+ def hosts(self):
+ return self.inventory['all']['hosts']
+
+ def add_group(self, group, parent = 'all'):
+ if parent not in self.inventory.keys():
+ self.inventory[parent] = {}
+ if 'children' not in self.inventory[parent]:
+ self.inventory[parent]['children'] = []
+ self.inventory[parent]['children'].append(group)
+
+ def add_to_group(self, group, host):
+ if group not in self.inventory.keys():
+ self.inventory[group] = []
+ self.inventory[group].append(host)
+
+ def add_hostvar(self, host, param, value):
+ if host not in self.hostvars():
+ self.inventory['_meta']['hostvars'][host] = {}
+ self.inventory['_meta']['hostvars'][host].update({param: value})
+
+ def add_groupvar(self, group, param, value):
+ if param not in self.groupvars(group):
+ self.inventory[group]['vars'][param] = {}
+ self.inventory[group]['vars'].update({param: value})
+
+ def hostvars(self):
+ return iter(self.inventory['_meta']['hostvars'].keys())
+
+ def groupvars(self, group):
+ return iter(self.inventory[group]['vars'].keys())
+
+ def get_host_info(self, host):
+ return self.inventory['_meta']['hostvars'][host]
+
+if __name__ == '__main__':
+ XCIInventory()
+
+# vim: set ts=4 sw=4 expandtab:
diff --git a/xci/playbooks/get-opnfv-scenario-requirements.yml b/xci/playbooks/get-opnfv-scenario-requirements.yml
index 7eaa43de..a9165709 100644
--- a/xci/playbooks/get-opnfv-scenario-requirements.yml
+++ b/xci/playbooks/get-opnfv-scenario-requirements.yml
@@ -31,15 +31,88 @@
loop_control:
label: "{{ item[0].scenario }}"
- - name: Create scenario directories
- file:
- path: "{{ role_path_default }}/{{ item.scenario }}"
- state: directory
+ - name: Update scenarios with local overrides
+ set_fact:
+ scenarios: >
+ {%- for z in xci_scenarios_overrides -%}
+ {%- for x in scenarios if x.scenario == z.scenario -%}
+ {%- set _ = x.update(z) -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {{- scenarios -}}
+ with_items: "{{ xci_scenarios_overrides }}"
+ loop_control:
+ label: "{{ item.scenario }}"
+ when: xci_scenarios_overrides is defined
+
+ - name: Collect list of known scenarions
+ set_fact:
+ known_scenarios: >
+ {%- set scenario_names = [] -%}
+ {%- for x in scenarios -%}
+ {%- set _ = scenario_names.append(x.scenario) -%}
+ {%- endfor -%}
+ {{- scenario_names -}}
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- - name: Clone git repos (with git)
+ - name: Fail if 'DEPLOY_SCENARIO' is not defined
+ fail:
+ msg: "DEPLOY_SCENARIO env variable is not defined so no scenario can be deployed"
+ when: deploy_scenario is not defined
+
+ - name: Ensure {{ deploy_scenario }} is a known XCI scenario
+ fail:
+ msg: "{{ deploy_scenario }} does not exist"
+ when: deploy_scenario not in known_scenarios
+
+ - name: Collect scenario information
+ set_fact:
+ xci_scenario: >
+ {%- set xci_scenario = {} -%}
+ {%- for x in scenarios if x.scenario == deploy_scenario -%}
+ {%- for z in x.installers if z.installer == installer_type -%}
+ {%- set _ = xci_scenario.update({'flavors': z.flavors}) -%}
+ {%- set _ = xci_scenario.update({'distros': z.distros}) -%}
+ {%- endfor -%}
+ {%- set _ = xci_scenario.update({'role': x.role | basename}) -%}
+ {%- endfor -%}
+ {{ xci_scenario }}
+
+ - name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+ become: true
+
+ - name: Record scenario information
+ ini_file:
+ create: yes
+ section: scenarios
+ state: present
+ option: role
+ value: "{{ xci_scenario.role | basename }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ become: true
+
+ - name: Fail if {{ deploy_scenario }} is not supported
+ fail:
+ msg:
+ - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ - ERROR! The {{ deploy_scenario }} scenario can't be deployed. This is because
+ - the {{ installer_type }} XCI installer or the {{ xci_flavor }} flavor or the {{ xci_distro }}
+ - distribution is not supported by this scenario. It may also be possible that
+ - this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
+ - ''
+ - This is a great chance for you to contribute to XCI ;-)
+ - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ - ''
+ when:
+ (xci_scenario['flavors'] is defined and xci_flavor not in xci_scenario['flavors']) or
+ (xci_scenario['distros'] is defined and xci_distro not in xci_scenario['distros'])
+
+ - name: Clone git repos
git:
repo: "{{ item.src }}"
dest: "{{ scenario_path_default }}/{{ item.scenario | default(item.src | basename) }}"
@@ -47,8 +120,6 @@
refspec: "{{ item.refspec | default(omit) }}"
update: true
force: true
- when:
- - item.scm == "git" or item.scm is undefined
with_items: "{{ scenarios }}"
register: git_clone
until: git_clone | success
@@ -57,91 +128,19 @@
loop_control:
label: "{{ item.scenario }}"
- - name: Check that scenarios exist
- stat:
- path: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}"
- register: scenarios_list_exists
- with_items: "{{ scenarios }}"
- loop_control:
- label: "{{ item.scenario }}"
-
- - name: Plug in the scenario to XCI
- synchronize:
- src: "{{ scenario_path_default }}/{{ item.item.scenario }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- when: item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Synchronize local changes to scenarios' master branch
+ - name: Plug in the scenario Ansible roles to XCI
synchronize:
- src: "{{ XCI_PATH }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- failed_when: false
- when:
- - item.stat.exists
- - item.item.version == 'master'
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Plug in the scenario to XCI (fallback)
- synchronize:
- src: "{{ XCI_PATH }}/{{ item.item.role }}/"
- dest: "{{ role_path_default }}/{{ item.item.scenario }}"
- when: not item.stat.exists
- with_items: "{{ scenarios_list_exists.results }}"
- loop_control:
- label: "{{ item.item.scenario }}"
-
- - name: Gather information about the selected {{ DEPLOY_SCENARIO }} scenario
- set_fact:
- deploy_scenario: "{{ item }}"
+ src: "{{ scenario_path_default }}/{{ item.scenario }}/{{ item.role }}/"
+ dest: "{{ role_path_default }}/{{ item.role | basename }}"
with_items: "{{ scenarios }}"
loop_control:
label: "{{ item.scenario }}"
- when: DEPLOY_SCENARIO | lower == item.scenario
-
- - name: Determine if the selected {{ DEPLOY_SCENARIO }} scenario can be deployed
- block:
- - set_fact:
- deploy_scenario_installer: "{{ item }}"
- with_items: "{{ deploy_scenario.installers }}"
- loop_control:
- label: "{{ item.installer }}"
- when: item.installer == XCI_INSTALLER
- - set_fact:
- deploy_scenario_flavor: "{{ (XCI_FLAVOR in deploy_scenario_installer.flavors) | bool }}"
- when:
- - deploy_scenario_installer
- - set_fact:
- deploy_scenario_distro: "{{ (XCI_DISTRO in deploy_scenario_installer.distros) | bool }}"
- when:
- - deploy_scenario_installer
- - deploy_scenario_flavor
- when: deploy_scenario is defined
-
- - name: Fail if {{ DEPLOY_SCENARIO }} is not supported
- fail:
- msg:
- - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ERROR! The {{ DEPLOY_SCENARIO }} scenario can't be deployed. This is because
- - the {{ XCI_INSTALLER }} XCI installer or the {{ XCI_FLAVOR }} flavor or the {{ XCI_DISTRO }}
- - distribution is not supported by this scenario. It may also be possible that
- - this scenario doesn't exist at all or it's not listed in {{ scenario_file }}.
- - ''
- - This is a great chance for you to contribute to XCI ;-)
- - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- - ''
- when:
- - deploy_scenario is not defined or not deploy_scenario_distro
vars:
ansible_python_interpreter: "/usr/bin/python"
scenarios: "{{ lookup('file', scenario_file) | from_yaml }}"
scenario_file: '../opnfv-scenario-requirements.yml'
- scenario_path_default: "{{ XCI_SCENARIOS_CACHE }}"
+ scenario_path_default: "{{ xci_scenarios_cache }}"
role_path_default: "{{ playbook_dir }}/roles"
git_clone_retries: 2
git_clone_retry_delay: 5
diff --git a/xci/playbooks/manage-ssh-keys.yml b/xci/playbooks/manage-ssh-keys.yml
new file mode 100644
index 00000000..999215d8
--- /dev/null
+++ b/xci/playbooks/manage-ssh-keys.yml
@@ -0,0 +1,56 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: Configure SSH key for devuser
+ user:
+ name: devuser
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+- name: Configure SSH key for root user
+ user:
+ name: root
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_comment: xci
+ ssh_key_type: rsa
+ state: present
+
+- name: Determine local user
+ become: no
+ local_action: command whoami
+ changed_when: False
+ register: _ansible_user
+
+- name: Fetch local SSH key
+ delegate_to: localhost
+ become: no
+ slurp:
+ src: "/home/{{ _ansible_user.stdout }}/.ssh/id_rsa.pub"
+ register: _local_ssh_key
+
+- name: Fetch OPNFV SSH key
+ delegate_to: opnfv
+ slurp:
+ src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
+ register: _opnfv_ssh_key
+
+- name: "Configure {{ inventory_hostname }} authorized_keys file"
+ authorized_key:
+ exclusive: "{{ item.exclusive }}"
+ user: root
+ state: present
+ manage_dir: yes
+ key: "{{ item.key }}"
+ comment: "{{ item.comment }}"
+ with_items:
+ - { key: "{{ _local_ssh_key['content'] | b64decode }}", comment: "{{ _ansible_user.stdout }} key", exclusive: yes }
+ - { key: "{{ _opnfv_ssh_key['content'] | b64decode }}", comment: "opnfv host key", exclusive: no }
diff --git a/xci/playbooks/prepare-tests.yml b/xci/playbooks/prepare-tests.yml
index ee30094d..1a1935aa 100644
--- a/xci/playbooks/prepare-tests.yml
+++ b/xci/playbooks/prepare-tests.yml
@@ -13,7 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-- name: Setup functest installing required packages and create the public network
+- name: Prepare the environment for testing
hosts: opnfv
+ user: root
+ vars_files:
+ - ../var/opnfv.yml
+ - ../installer/osa/files/openstack_services.yml
roles:
- - role: "prepare-functest"
+ - role: "prepare-tests"
diff --git a/xci/playbooks/provision-vm-nodes.yml b/xci/playbooks/provision-vm-nodes.yml
deleted file mode 100644
index 8b8bb30d..00000000
--- a/xci/playbooks/provision-vm-nodes.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
- connection: local
- gather_facts: true
- vars_files:
- - ../var/opnfv.yml
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: ../var/{{ ansible_os_family }}.yml
- roles:
- - role: clone-repository
- project: "opnfv/bifrost"
- repo: "{{ OPENSTACK_BIFROST_GIT_URL }}"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
- version: "{{ OPENSTACK_BIFROST_VERSION }}"
-
- tasks:
- - name: Load distribution variables
- include_vars:
- file: ../var/{{ ansible_os_family }}.yml
- - name: Synchronize local development bifrost repository to XCI paths
- # command module is much faster than the copy module
- synchronize:
- src: "{{ OPENSTACK_BIFROST_DEV_PATH }}"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
- recursive: yes
- delete: yes
- when:
- - OPENSTACK_BIFROST_DEV_PATH != ""
- - name: combine opnfv/releng-xci and openstack/bifrost scripts/playbooks
- copy:
- src: "{{ XCI_PATH}}/bifrost/"
- dest: "{{ XCI_CACHE }}/repos/bifrost"
diff --git a/xci/playbooks/roles/.gitignore b/xci/playbooks/roles/.gitignore
deleted file mode 100644
index e0b47770..00000000
--- a/xci/playbooks/roles/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-*
-!.gitignore
-!clone-repository/
-!configure-network/
-!configure-nfs/
-!prepare-functest/
-!remote-folders/
-!synchronize-time/
diff --git a/xci/playbooks/roles/bootstrap-host/defaults/main.yml b/xci/playbooks/roles/bootstrap-host/defaults/main.yml
new file mode 100644
index 00000000..8e5a0e34
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/defaults/main.yml
@@ -0,0 +1,11 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+configure_network: yes
+configure_time: yes
diff --git a/xci/playbooks/roles/configure-network/files/network-config-suse b/xci/playbooks/roles/bootstrap-host/files/network-config-suse
index 02cdd998..02cdd998 100755
--- a/xci/playbooks/roles/configure-network/files/network-config-suse
+++ b/xci/playbooks/roles/bootstrap-host/files/network-config-suse
diff --git a/xci/playbooks/roles/bootstrap-host/handlers/main.yml b/xci/playbooks/roles/bootstrap-host/handlers/main.yml
new file mode 100644
index 00000000..b9103233
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: Reload facts
+ setup:
+ filter: ansible_local
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/main.yml b/xci/playbooks/roles/bootstrap-host/tasks/main.yml
new file mode 100644
index 00000000..7d6d259e
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linx GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- include: network.yml
+ when: configure_network
+
+- include: time.yml
+ when: configure_time
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network.yml b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
new file mode 100644
index 00000000..a4f260c4
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network.yml
@@ -0,0 +1,64 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- name: ensure glean rules are removed
+ file:
+ path: "/etc/udev/rules.d/99-glean.rules"
+ state: absent
+
+- name: Determine required packages
+ set_fact:
+ network_packages:
+ - bridge-utils
+ - "{{ (ansible_pkg_mgr in ['zypper', 'apt']) | ternary('iproute2', 'iproute') }}"
+ - "{{ (ansible_pkg_mgr == 'apt') | ternary('vlan', 'bridge-utils') }}"
+ - iptables
+
+- name: Ensure networking packages are present
+ package:
+ name: "{{ network_packages }}"
+ state: present
+
+- name: Ensure local facts directory exists
+ file:
+ path: "/etc/ansible/facts.d"
+ state: directory
+
+# NOTE(hwoarang) We have to check all levels of the local fact before we add it
+# otherwise Ansible will fail.
+- name: Record initial active interface
+ ini_file:
+ create: yes
+ section: network
+ state: present
+ option: xci_interface
+ value: "{{ ansible_default_ipv4.interface }}"
+ path: "/etc/ansible/facts.d/xci.fact"
+ when: ansible_local is not defined
+ or (ansible_local is defined and ansible_local.xci is not defined)
+ or (ansible_local is defined and ansible_local.xci is defined and ansible_local.xci.network is not defined)
+ or (ansible_local is defined and ansible_local.xci is defined and ansible_local.xci.network is defined and ansible_local.xci.network.xci_interface is not defined)
+ notify:
+ - Reload facts
+
+- name: Run handlers
+ meta: flush_handlers
+
+- name: "Configure networking on {{ ansible_os_family }}"
+ include_tasks: "network_{{ ansible_os_family | lower }}.yml"
+
+- name: Wait for host to come back to life
+ local_action:
+ module: wait_for
+ host: "{{ ansible_host }}"
+ delay: 15
+ state: started
+ port: 22
+ connect_timeout: 10
+ timeout: 180
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
new file mode 100644
index 00000000..176c7eb1
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml
@@ -0,0 +1,98 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: configure modules
+ lineinfile:
+ dest: /etc/modules
+ state: present
+ create: yes
+ line: "8021q"
+
+- name: add modules
+ modprobe:
+ name: 8021q
+ state: present
+
+- name: ensure interfaces.d folder is empty
+ file:
+ state: "{{ item }}"
+ path: "/etc/network/interfaces.d"
+ with_items:
+ - absent
+ - directory
+
+- name: Ensure /etc/interfaces can source additional files
+ copy:
+ content: |
+ auto lo
+ iface lo inet loopback
+ source /etc/network/interfaces.d/*.cfg
+ dest: "/etc/network/interfaces"
+
+- name: "Configure networking for {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/debian.interface.j2"
+ dest: "/etc/network/interfaces.d/{{ item.name }}.cfg"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: restart network service
+ shell: "/sbin/ip addr flush dev {{ item }}; /sbin/ifdown -a; /sbin/ifup -a"
+ async: 15
+ poll: 0
+ with_items:
+ - "{{ public_interface }}"
+ - "{{ mgmt_interface }}"
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
new file mode 100644
index 00000000..288fdf65
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_redhat.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.ifcfg.j2"
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" , bridge: "br-vlan" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", bridge: "br-storage", vlan_id: 20 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
+ - { name: "br-vlan" , network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-mgmt" , network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-storage", network: "{{ host_info[inventory_hostname].storage }}" }
+ - { name: "br-vxlan" , network: "{{ host_info[inventory_hostname].private }}" }
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
new file mode 100644
index 00000000..a8f1bf59
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml
@@ -0,0 +1,93 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Configure networking on {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ ansible_local.xci.network.xci_interface }}" }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.10", vlan_id: 10 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.30", vlan_id: 30 }
+ - { name: "{{ ansible_local.xci.network.xci_interface }}.20", vlan_id: 20 }
+ - { name: "br-mgmt", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.10", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when: baremetal | bool != true
+
+- name: "Configure baremetal networking for blade: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" }
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' not in inventory_hostname"
+
+- name: "Configure baremetal networking for VM: {{ inventory_hostname }}"
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2"
+ dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
+ with_items:
+ - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" }
+ - { name: "{{ mgmt_interface }}.30", vlan_id: 30 }
+ - { name: "{{ mgmt_interface }}.20", vlan_id: 20 }
+ - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" }
+ - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" }
+ - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" }
+ - { name: "br-vxlan", bridge_ports: "{{ mgmt_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" }
+ - { name: "br-storage", bridge_ports: "{{ mgmt_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" }
+ loop_control:
+ label: "{{ item.name }}"
+ when:
+ - baremetal | bool == true
+ - "'opnfv' in inventory_hostname"
+
+- name: Add postup/postdown scripts on SUSE
+ copy:
+ src: "network-config-suse"
+ dest: "/etc/sysconfig/network/scripts/network-config-suse"
+ mode: 0755
+
+- name: Configure static DNS on SUSE
+ lineinfile:
+ regexp: '^NETCONFIG_DNS_STATIC_SERVERS=.*'
+ line: "NETCONFIG_DNS_STATIC_SERVERS=\"{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}\""
+ path: "/etc/sysconfig/network/config"
+ state: present
+ when: host_info[inventory_hostname]['public']['dns'] is defined
+
+- name: Configure routes on SUSE
+ template:
+ src: "{{ installer_type }}/{{ ansible_os_family | lower }}.routes.j2"
+ dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
+ with_items:
+ - { name: "br-vlan", gateway: "{{ host_info[inventory_hostname]['public']['gateway'] }}", route: "default" }
+
+- name: restart network service
+ service:
+ name: network
+ state: restarted
+ async: 15
+ poll: 0
diff --git a/xci/playbooks/roles/synchronize-time/tasks/main.yml b/xci/playbooks/roles/bootstrap-host/tasks/time.yml
index 8f94d33f..9eca769d 100644
--- a/xci/playbooks/roles/synchronize-time/tasks/main.yml
+++ b/xci/playbooks/roles/bootstrap-host/tasks/time.yml
@@ -10,14 +10,21 @@
- name: install chrony
package:
name: "chrony"
- state: latest
+ state: present
- name: restart chrony
service:
name: "{{ (ansible_pkg_mgr == 'apt') | ternary('chrony', 'chronyd') }}"
state: restarted
- name: synchronize time
shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+ args:
+ executable: /bin/bash
+ changed_when: True
register: chrony_got_time
until: chrony_got_time.rc == 0
retries: 5
delay: 5
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/kubespray b/xci/playbooks/roles/bootstrap-host/templates/kubespray
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/kubespray
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
new file mode 100644
index 00000000..2f976002
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2
@@ -0,0 +1,39 @@
+# {{ ansible_managed }}
+
+# Physical interface
+{% if item.bridge_ports is not defined %}
+auto {{ item.name }}
+iface {{ item.name }} inet manual
+{% if item.vlan_id is defined %}
+ vlan-raw-device {{ item.name|replace('.' ~ item.vlan_id, '') }}
+{% endif %}
+
+{% else %}
+auto {{ item.name }}
+iface {{ item.name }} inet static
+ bridge_stp off
+ bridge_waitport 0
+ bridge_fd 0
+ bridge_ports {{ item.bridge_ports }}
+{% if item.name == 'br-vlan' %}
+ # Create veth pair, don't bomb if already exists
+ pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+ # Set both ends UP
+ pre-up ip link set br-vlan-veth up
+ pre-up ip link set eth12 up
+ # Delete veth pair on DOWN
+ post-down ip link del br-vlan-veth || true
+ bridge_ports br-vlan-veth
+{% endif %}
+{% if item.network is defined %}
+ address {{ item.network.address | ipaddr('address') }}
+ netmask {{ item.network.address | ipaddr('netmask') }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+ gateway {{ item.network.gateway | ipaddr('address') }}
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+ dns-nameservers {{ item.network.dns | join(' ') }}
+{% endif %}
+
+{% endif %}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
new file mode 100644
index 00000000..525686d9
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2
@@ -0,0 +1,26 @@
+DEVICE={{ item.name }}
+NM_CONTROLLED=no
+ONBOOT=yes
+BOOTPROTO=none
+{% if item.vlan_id is defined %}
+VLAN=yes
+ETHERDEVICE={{ ansible_local.xci.network.xci_interface }}
+VLAN_ID={{ item.vlan_id }}
+{% endif %}
+{% if item.bridge is not defined %}
+BRIDGE={{ item.bridge }}
+{% else %}
+TYPE=Bridge
+DELAY=0
+STP=off
+{% endif %}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
+{% endif %}
+{% if item.network is defined and item.network.gateway is defined %}
+GATEWAY="{{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}"
+{% endif %}
+{% if item.network is defined and item.network.dns is defined %}
+DNS="{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}"
+{% endif %}
+{% endif %}
diff --git a/xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
index ffa418d4..7c2929d6 100644
--- a/xci/playbooks/roles/configure-network/templates/suse/suse.interface.j2
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2
@@ -1,8 +1,7 @@
STARTMODE='auto'
BOOTPROTO='static'
{% if item.vlan_id is defined %}
-ETHERDEVICE={{ interface }}
-VLAN_ID={{ item.vlan_id }}
+ETHERDEVICE={{ item.name.split('.')[0] }}
{% endif %}
{% if item.bridge_ports is defined %}
BRIDGE='yes'
@@ -10,8 +9,8 @@ BRIDGE_FORWARDDELAY='0'
BRIDGE_STP=off
BRIDGE_PORTS={{ item.bridge_ports }}
{% endif %}
-{% if item.ip is defined %}
-IPADDR={{ item.ip }}
+{% if item.network is defined %}
+IPADDR={{ item.network.address }}
{% endif %}
PRE_UP_SCRIPT="compat:suse:network-config-suse"
POST_DOWN_SCRIPT="compat:suse:network-config-suse"
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
new file mode 100644
index 00000000..93941fad
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.routes.j2
@@ -0,0 +1 @@
+{{ item.route }} {{ item.gateway | ipaddr('address') }}
diff --git a/xci/playbooks/roles/bootstrap-host/templates/osh b/xci/playbooks/roles/bootstrap-host/templates/osh
new file mode 120000
index 00000000..f820fd11
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/templates/osh
@@ -0,0 +1 @@
+osa \ No newline at end of file
diff --git a/xci/playbooks/roles/bootstrap-host/vars/main.yml b/xci/playbooks/roles/bootstrap-host/vars/main.yml
new file mode 100644
index 00000000..1730ad57
--- /dev/null
+++ b/xci/playbooks/roles/bootstrap-host/vars/main.yml
@@ -0,0 +1,70 @@
+---
+# admin network information
+admin_mac: "{{ host_info[inventory_hostname].admin.mac_address }}"
+admin_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == admin_mac -%}
+ {%- if admin_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ admin_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+admin_vlan: "{{ host_info[inventory_hostname].admin.vlan }}"
+
+# mgmt network information
+mgmt_mac: "{{ host_info[inventory_hostname].mgmt.mac_address }}"
+mgmt_interface: >-
+ {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == mgmt_mac -%}
+ {%- if mgmt_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ mgmt_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+mgmt_vlan: "{{ host_info[inventory_hostname].mgmt.vlan }}"
+
+# storage network information
+storage_mac: "{{ host_info[inventory_hostname].storage.mac_address }}"
+storage_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == storage_mac -%}
+ {%- if storage_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ storage_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+storage_vlan: "{{ host_info[inventory_hostname].storage.vlan }}"
+
+# public vlan netwrk information
+public_mac: "{{ host_info[inventory_hostname].public.mac_address }}"
+public_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == public_mac -%}
+ {%- if public_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{ x.device }}.{{ public_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+public_vlan: "{{ host_info[inventory_hostname].public.vlan }}"
+
+# private vxlan network information
+private_mac: "{{ host_info[inventory_hostname].private.mac_address }}"
+private_interface: >-
+ {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%}
+ {%- if x.macaddress == private_mac -%}
+ {%- if private_vlan == 'native' -%}
+ {{ x.device }}
+ {%- else -%}
+ {{x.device}}.{{ private_vlan }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+private_vlan: "{{ host_info[inventory_hostname].private.vlan }}"
diff --git a/xci/playbooks/roles/clone-repository/tasks/main.yml b/xci/playbooks/roles/clone-repository/tasks/main.yml
index a124003d..0ba80c0a 100644
--- a/xci/playbooks/roles/clone-repository/tasks/main.yml
+++ b/xci/playbooks/roles/clone-repository/tasks/main.yml
@@ -13,3 +13,7 @@
dest: "{{ dest }}"
version: "{{ version }}"
force: yes
+ environment:
+ http_proxy: "{{ lookup('env','http_proxy') }}"
+ https_proxy: "{{ lookup('env','https_proxy') }}"
+ no_proxy: "{{ lookup('env','no_proxy') }}"
diff --git a/xci/playbooks/roles/configure-network/tasks/main.yml b/xci/playbooks/roles/configure-network/tasks/main.yml
deleted file mode 100644
index 65abaa40..00000000
--- a/xci/playbooks/roles/configure-network/tasks/main.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: ensure glean rules are removed
- file:
- path: "/etc/udev/rules.d/99-glean.rules"
- state: absent
-
-- block:
- - name: configure modules
- lineinfile:
- dest: /etc/modules
- state: present
- create: yes
- line: "8021q"
- - name: add modules
- modprobe:
- name: 8021q
- state: present
- - name: ensure interfaces.d folder is empty
- shell: "/bin/rm -rf /etc/network/interfaces.d/*"
- - name: ensure interfaces file is updated
- template:
- src: "{{ ansible_os_family | lower }}/{{ ansible_hostname }}.interface.j2"
- dest: "/etc/network/interfaces"
- - name: restart network service
- shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
- when: ansible_os_family | lower == "debian"
-
-- block:
- - name: Remove existing network configuration
- file:
- path: "/etc/sysconfig/network/{{ item }}"
- state: absent
- with_items:
- - "ifcfg-eth0"
- - "ifroute-eth0"
-
- - name: Configure networking on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.interface.j2"
- dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ interface }}" }
- - { name: "{{ interface }}.10", vlan_id: 10 }
- - { name: "{{ interface }}.30", vlan_id: 30 }
- - { name: "{{ interface }}.20", vlan_id: 20 }
- - { name: "br-mgmt", bridge_ports: "{{ interface }}.10", ip: "{{ host_info[inventory_hostname].MGMT_IP }}/22" }
- - { name: "br-vxlan", bridge_ports: "{{ interface }}.30", ip: "{{ host_info[inventory_hostname].VXLAN_IP }}/22" }
- - { name: "br-vlan", bridge_ports: "{{ interface }}", ip: "{{ host_info[inventory_hostname].VLAN_IP }}/24" }
- - { name: "br-storage", bridge_ports: "{{ interface }}.20", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}/22" }
-
- - name: Add postup/postdown scripts on SUSE
- copy:
- src: "network-config-suse"
- dest: "/etc/sysconfig/network/scripts/network-config-suse"
- mode: 0755
-
- - name: Configure routes on SUSE
- template:
- src: "{{ ansible_os_family | lower }}/suse.routes.j2"
- dest: "/etc/sysconfig/network/ifroute-{{ item.name }}"
- with_items:
- - { name: "br-vlan", gateway: "192.168.122.1", route: "default" }
-
- - name: restart network service
- shell: "/usr/sbin/wicked ifreload all"
- when: ansible_os_family | lower == "suse"
-
-- block:
- - name: Configure networking on CentOS for interfaces
- template:
- src: "{{ ansible_os_family | lower }}/interface.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "{{ interface }}" , bridge: "br-vlan" }
- - { name: "{{ interface }}.10", bridge: "br-mgmt" , vlan_id: 10 }
- - { name: "{{ interface }}.20", bridge: "br-storage", vlan_id: 20 }
- - { name: "{{ interface }}.30", bridge: "br-vxlan" , vlan_id: 30 }
- - name: Configure networking on CentOS for bridges
- template:
- src: "{{ ansible_os_family | lower }}/bridge.ifcfg.j2"
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name }}"
- with_items:
- - { name: "br-vlan" , ip: "{{ host_info[inventory_hostname].VLAN_IP }}", prefix: 24 }
- - { name: "br-mgmt" , ip: "{{ host_info[inventory_hostname].MGMT_IP }}", prefix: 22 }
- - { name: "br-storage", ip: "{{ host_info[inventory_hostname].STORAGE_IP }}", prefix: 22 }
- - { name: "br-vxlan" , ip: "{{ host_info[inventory_hostname].VXLAN_IP }}", prefix: 22 }
- - name: Add default route through br-vlan
- lineinfile:
- path: "/etc/sysconfig/network-scripts/ifcfg-br-vlan"
- line: "GATEWAY=192.168.122.1"
- - name: Restart networking
- command: "systemctl restart network"
- - name: wait for the server to come back
- wait_for_connection:
- when: ansible_os_family | lower == "redhat"
diff --git a/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2
deleted file mode 100644
index 6d6a3835..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/compute00.interface.j2
+++ /dev/null
@@ -1,75 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
- offload-sg off
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2
deleted file mode 120000
index a74df1c2..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/compute01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-compute00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2
deleted file mode 100644
index 5d42a5d2..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller00.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller01.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2
deleted file mode 120000
index e835d7ca..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/controller02.interface.j2
+++ /dev/null
@@ -1 +0,0 @@
-controller00.interface.j2 \ No newline at end of file
diff --git a/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2 b/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2
deleted file mode 100644
index 42826414..00000000
--- a/xci/playbooks/roles/configure-network/templates/debian/opnfv.interface.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# {{ ansible_managed }}
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto {{ interface }}
-iface {{ interface }} inet manual
-
-# Container/Host management VLAN interface
-auto {{ interface }}.10
-iface {{ interface }}.10 inet manual
- vlan-raw-device {{ interface }}
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto {{ interface }}.30
-iface {{ interface }}.30 inet manual
- vlan-raw-device {{ interface }}
-
-# Storage network VLAN interface (optional)
-auto {{ interface }}.20
-iface {{ interface }}.20 inet manual
- vlan-raw-device {{ interface }}
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.30
- address {{ host_info[inventory_hostname].VXLAN_IP }}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# OpenStack Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports {{ interface }}.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2 b/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2
deleted file mode 100644
index 06b5f177..00000000
--- a/xci/playbooks/roles/configure-network/templates/redhat/bridge.ifcfg.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-IPADDR={{ item.ip }}
-PREFIX={{ item.prefix }}
-ONBOOT=yes
-BOOTPROTO=none
-TYPE=Bridge
-DELAY=0
-STP=off
diff --git a/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2 b/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2
deleted file mode 100644
index b0dea0f5..00000000
--- a/xci/playbooks/roles/configure-network/templates/redhat/interface.ifcfg.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-DEVICE={{ item.name }}
-NM_CONTROLLED=no
-ONBOOT=yes
-BOOTPROTO=none
-{% if item.vlan_id is defined %}
-VLAN=yes
-ETHERDEVICE={{ interface }}
-VLAN_ID={{ item.vlan_id }}
-{% endif %}
-BRIDGE={{ item.bridge }}
diff --git a/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2 b/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2
deleted file mode 100644
index 7c868447..00000000
--- a/xci/playbooks/roles/configure-network/templates/suse/suse.routes.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ item.route }} {{ item.gateway }}
diff --git a/xci/playbooks/roles/configure-nfs/tasks/main.yml b/xci/playbooks/roles/configure-nfs/tasks/main.yml
index 25e81496..3b349ad6 100644
--- a/xci/playbooks/roles/configure-nfs/tasks/main.yml
+++ b/xci/playbooks/roles/configure-nfs/tasks/main.yml
@@ -47,7 +47,7 @@
- name: Install the NFS server package
package:
name: "{{ nfs_server_package }}"
- state: latest
+ state: present
- name: restart NFS service
service:
diff --git a/xci/playbooks/roles/create-nodes/README.md b/xci/playbooks/roles/create-nodes/README.md
new file mode 100644
index 00000000..bf190296
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/README.md
@@ -0,0 +1,160 @@
+create-nodes
+================
+
+This role creates the all nodes required for the XCI deployment. In a baremetal
+deployment, it creates the OPNFV VM and provisions the physical servers. In a
+non-baremetal deployment, it creates the OPNFV VM and the rest of VMs used to
+deploy scenarios. It is based on the bifrost role:
+
+https://github.com/openstack/bifrost/tree/master/playbooks/roles/bifrost-create-vm-nodes
+
+It creates the VMs or provisions the physical servers based on the pdf and idf
+document which describes the characteristics of the VMs or physical servers.
+For more information check the spec:
+
+https://github.com/opnfv/releng-xci/blob/master/docs/specs/infra_manager.rst
+
+
+Flow
+----
+
+The script xci/infra/bifrost/scripts/bifrost-provision.sh will call the
+playbook that starts executing the role:
+
+xci-setup-nodes.yaml
+
+Note that at this stage the pdf and the opnfv_pdf_vm.yml are loaded.
+
+Some distro specific tasks related to variables are done and then the
+prepare_libvirt playbook is run. This playbook, as the name says,
+gets everything ready to run libvirt.
+
+After that, the nodes_json_data dictionary is initialized. This will collect
+the data and finally dump it all into the baremetal_json_file which will be
+read by bifrost in the subsequent role.
+
+The opnfv vm and the rest of vms get created using the xml libvirt template,
+which gets filled with the pdf and opnfv_pdf_vm.yml variables. If there is a
+baremetal deployment, the nodes_json_data gets filled in the
+baremetalhoststojson.yml playbook which basically reads the pdf info.
+
+Finally nodes_json_data is dumped.
+
+Requirements
+------------
+
+The following packages are required and ensured to be present:
+- libvirt-bin
+- qemu-utils
+- qemu-kvm
+- sgabios
+
+
+Warning
+-------
+
+- It is assumed that the opnfv VM characteristics are not described in the pdf
+but in a similar document called opnfv_pdf_vm.yml. There is also an idf
+document opnfv_idf_vm.yml
+
+- All references to csv from bifrost-create-vm-nodes were removed
+
+Role Variables
+--------------
+
+baremetal_json_file: Defaults to '/tmp/baremetal.json'. It contains the
+ required information for bifrost to configure the
+ VMs appropriately
+
+vm_disk_cache: Disk cache mode to use by VMs disk.
+ Defaults to shell variable 'VM_DISK_CACHE', or,
+ if that is not set, to 'writeback'.
+
+node_names: Space-separated names for nodes to be created.
+ It is taken from the hostnames variable in idf.
+ If not set, VM names will be autogenerated.
+ Note that independent on the number of names in this list,
+ at most 'test_vm_num_nodes' VMs will be created.
+
+vm_network: Name of the libvirt network to create the nodes on.
+ Defaults to shell variable 'VM_NET_BRIDGE', or,
+ if that is not set, to 'default'.
+
+node_storage_pool: Name of the libvirt storage pool to create disks
+ for VMs in.
+ Defaults to shell variable 'LIBVIRT_STORAGE_POOL', or,
+ if that is not set, to 'default'.
+ If absent, this pool will be created.
+
+node_storage_pool_path: Path used by the libvirt storage pool
+ 'node_storage_pool' if it has to be created.
+ Defaults to "/var/lib/libvirt/images".
+
+node_logdir: Folder where to store VM logs.
+ Defaults to "/var/log/libvirt/baremetal_logs".
+
+vm_emulator: Path to emulator executable used to define VMs in libvirt.
+ Defaults to "/usr/bin/qemu-system-x86_64".
+ Generally users should not need to modify this setting,
+ as it is OS-specific and is overwritten by
+ os/distribution-specific defaults in this role when needed.
+
+vm_libvirt_uri: URI to connect to libvirt for networks, storage and VM
+ related actions.
+ Defaults to shell variable 'LIBVIRT_CONNECT_URI', or,
+ if that is not set, to 'qemu:///system'.
+ Note that currently connecting to remote libvirt is
+ not tested and is unsupported.
+
+network_interface: Name of the bridge to create when creating
+ 'vm_network' libvirt network.
+ Defaults to "virbr0".
+ Name and default of this option are chosen to be the same
+ as in 'bifrost-ironic-install' role.
+
+opnfv_vm_network_ip: IP for the 'network_interface' bridge.
+ Defaults to '192.168.122.1'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+node_network_netmask: Subnet mask for 'network_interface' bridge.
+ Defaults to '255.255.255.0'.
+ This setting is applied only when 'vm_network'
+ was absent and is created from scratch.
+
+Dependencies
+------------
+
+None at this time.
+
+Example Playbook
+----------------
+
+- hosts: localhost
+ connection: local
+ become: yes
+ gather_facts: yes
+ roles:
+ - role: create-vm-nodes
+
+License
+-------
+
+Copyright (c) 2018 SUSE Linux GmbH.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author Information
+------------------
+
+mbuil@suse.com
diff --git a/xci/playbooks/roles/create-nodes/defaults/main.yml b/xci/playbooks/roles/create-nodes/defaults/main.yml
new file mode 100644
index 00000000..889f9c10
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/defaults/main.yml
@@ -0,0 +1,31 @@
+---
+# defaults file for bifrost-create-vm-nodes
+baremetal_json_file: '/tmp/baremetal.json'
+
+# We collect these parameters from the pdf
+vm_nic: "virtio"
+vm_disk_cache: unsafe
+node_groups: {}
+node_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}"
+
+network_bridge_admin: 'br-admin'
+network_bridge_mgmt: 'br-mgmt'
+
+vm_network_admin: "{{ lookup('env', 'VM_NET_BRIDGE') | default('admin', true) }}"
+vm_network_mgmt: "{{ lookup('env', 'VM_NET_BRIDGE_MGMT') | default('mgmt', true) }}"
+
+node_network_netmask: "255.255.255.0"
+
+node_storage_pool: "{{ lookup('env', 'LIBVIRT_STORAGE_POOL') | default('default', true) }}"
+node_storage_pool_path: "/var/lib/libvirt/images"
+node_logdir: "/var/log/libvirt/baremetal_logs"
+# NOTE(pas-ha) next two are generic values for most OSes, overridden by distro-specifc vars
+vm_emulator: "/usr/bin/qemu-system-x86_64"
+# NOTE(pas-ha) not really tested with non-local qemu connections
+vm_libvirt_uri: "{{ lookup('env', 'LIBVIRT_CONNECT_URI') | default('qemu:///system', true) }}"
+
+opnfv_image_path: "/var/lib/libvirt/images"
+
+vms_to_create: "{{ (baremetal | bool) | ternary([opnfv_vm_pdf], [opnfv_vm_pdf] + nodes) }}"
+baremetal_nodes: "{{ (baremetal | bool) | ternary(nodes, omit) }}"
+libvirt_networks: "{{ (baremetal | bool) | ternary([vm_network_admin,vm_network_mgmt],[vm_network_admin]) }}"
diff --git a/xci/playbooks/roles/create-nodes/files/virtualbmc.conf b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
new file mode 100644
index 00000000..f8351dc1
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf
@@ -0,0 +1,3 @@
+[log]
+logfile: /var/log/vbmc.log
+debug: true
diff --git a/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
new file mode 100644
index 00000000..ef6ec345
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml
@@ -0,0 +1,91 @@
+---
+# Copyright 2018, SUSE Linux GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This playbook builds the json file with information about the baremetal nodes
+# which is read by ironic to start the pxe booting
+
+
+- name: BAREMETAL - Create file for static ip
+ file:
+ path: /tmp/baremetalstaticips
+ state: touch
+ group: root
+ owner: root
+ mode: 0644
+
+- name: "Generating the json describing baremetal nodes"
+ block:
+
+ - set_fact:
+ node_name: "{{ idf.kubespray.hostnames[item.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ node_name: "{{ idf.osa.hostnames[item.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ node_name: "{{ idf.osh.hostnames[item.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ host_group: "{{ node_default_groups | union(node_groups[node_name]) }}"
+ when: node_groups[node_name] is defined
+
+ - name: BAREMETAL - Fetch the ip
+ set_fact:
+ admin_ip: "{{ item.interfaces[idf.net_config.admin.interface].address }}"
+
+ - name: BAREMETAL - Fetch the mac
+ set_fact:
+ admin_mac: "{{ item.interfaces[idf.net_config.admin.interface].mac_address }}"
+
+ - name: BAREMETAL - set the json entry for baremetal nodes
+ set_fact:
+ node_data:
+ name: "{{ node_name }}"
+ uuid: "{{ node_name | to_uuid }}"
+ host_groups: "{{ host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "{{ item.remote_management.address }}"
+ ipmi_port: "{{ virtual_ipmi_port| default('623') }}"
+ ipmi_username: "{{ item.remote_management.user }}"
+ ipmi_password: "{{ item.remote_management.pass }}"
+ nics:
+ - mac: "{{ admin_mac }}"
+ ansible_ssh_host: "{{ admin_ip }}"
+ ipv4_address: "{{ admin_ip }}"
+ properties:
+ cpu_arch: "{{ item.node.arch }}"
+ ram: "{{ item.node.memory.rstrip('G') }}"
+ cpus: "{{ item.node.cpus }}"
+ disk_size: "{{ item.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: BAREMETAL - Static ip config for dnsmasq
+ lineinfile:
+ path: /tmp/baremetalstaticips
+ state: present
+ line: '{{ admin_mac }},{{ admin_ip }}'
+
+ - name: BAREMETAL - add created node info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({node_name: node_data}) }}"
+
+ when: (num_nodes | int) > (nodes_json_data | length | int) + 1
diff --git a/xci/playbooks/roles/create-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
new file mode 100644
index 00000000..ac55bf32
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml
@@ -0,0 +1,198 @@
+---
+- name: "Creating VM"
+ block:
+ - set_fact:
+ vm_name: "{{ idf.kubespray.hostnames[item.1.name] }}"
+ when: installer_type == "kubespray"
+
+ - set_fact:
+ vm_name: "{{ idf.osa.hostnames[item.1.name] }}"
+ when: installer_type == "osa"
+
+ - set_fact:
+ vm_name: "{{ idf.osh.hostnames[item.1.name] }}"
+ when: installer_type == "osh"
+
+ - set_fact:
+ vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log"
+ vm_host_group: "{{ node_default_groups }}"
+
+ - set_fact:
+ vm_host_group: "{{ node_default_groups | union(node_groups[vm_name]) }}"
+ when: node_groups[vm_name] is defined
+
+ - name: set prealloc arg for Debian
+ set_fact:
+ prealloc: "--prealloc-metadata"
+ when:
+ - ansible_os_family == 'Debian'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ - name: list info on pools
+ virt_pool:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+ - name: list existing vms
+ virt:
+ command: list_vms
+ register: existing_vms
+
+ - block:
+ - name: Check if volume exists
+ stat:
+ path: "{{ opnfv_image_path }}/{{ vm_name }}.qcow2"
+ register: _vm_volume_prepared
+
+ - name: Resize opnfv VM image to {{ item.1.disks[0].disk_capacity }}
+ command: "qemu-img resize {{ opnfv_image_path }}/opnfv.qcow2 {{ item.1.disks[0].disk_capacity }}"
+ when:
+ - vm_name == 'opnfv'
+ - _vm_volume_prepared.stat.exists
+
+ # NOTE(pas-ha) Ansible still lacks modules to operate on libvirt volumes
+ # mbuil: Assuming there is only one disk [0]
+ - name: create volume for vm
+ command: >
+ virsh --connect {{ vm_libvirt_uri }}
+ vol-create-as {{ node_storage_pool }} {{ vm_name }}.qcow2
+ {{ item.1.disks[0].disk_capacity }}
+ --format qcow2 {{ prealloc|default("") }}
+ when:
+ - not _vm_volume_prepared.stat.exists
+ - (vm_name + '.qcow2') not in ansible_libvirt_pools[node_storage_pool].volumes
+
+ - name: set path to the volume created
+ set_fact:
+ vm_volume_path: "{{ ansible_libvirt_pools[node_storage_pool].path }}/{{ vm_name }}.qcow2"
+
+ - name: pre-touch the vm volume
+ file:
+ state: touch
+ path: "{{ vm_volume_path }}"
+ when: vm_libvirt_uri == 'qemu:///system'
+
+ # NOTE(TheJulia): CentOS default installs with an XFS root, and chattr
+ # fails to set +C on XFS. This could be more elegant, however the use
+ # case is for CI testing.
+ - name: set copy-on-write for volume on non-CentOS systems
+ command: chattr +C {{ vm_volume_path }}
+ ignore_errors: yes
+ when:
+ - ansible_distribution != 'CentOS'
+ - vm_libvirt_uri == 'qemu:///system'
+
+ # Fetches the xml descriptor from the template
+ - name: create_vm
+ virt:
+ command: define
+ name: "{{ vm_name }}"
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'vm.xml.j2') }}"
+
+ rescue:
+ - name: "Execute `dmesg` to collect debugging output should VM creation fail."
+ command: dmesg
+ - name: >
+ "Execute `virsh capabilities` to collect debugging output
+ should VM creation fail."
+ command: virsh capabilities
+ - name: "Abort due to failed VM creation"
+ fail: >
+ msg="VM creation step failed, please review dmesg
+ output for additional details"
+ when: vm_name not in existing_vms.list_vms
+
+ # TODO(pas-ha) replace 'command: vbmc ...' tasks
+ # with a custom Ansible module using vbmc Python API
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list
+
+ - debug: var=vbmc_list
+
+ # NOTE(NobodyCam): Space at the end of the find clause is required for proper matching.
+ - name: delete vm from virtualbmc if it is there
+ command: vbmc delete {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ when: vbmc_list.stdout.find(vm_name) != -1
+
+ - set_fact:
+ virtual_ipmi_port: "{{ (vm_ipmi_port_start|default(623) | int ) + (item.0 | int) }}"
+
+ - name: plug vm into vbmc
+ command: vbmc add {{ vm_name }} --libvirt-uri {{ vm_libvirt_uri }} --port {{ virtual_ipmi_port }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: start virtualbmc
+ command: vbmc start {{ vm_name }}
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+
+ - name: get list of nodes from virtualbmc
+ command: vbmc list
+ environment:
+ PATH: "{{ lookup('env', 'XCI_VENV') }}/bin"
+ register: vbmc_list2
+
+ - debug: var=vbmc_list2
+
+ - name: get XML of the vm
+ virt:
+ name: "{{ vm_name }}"
+ command: get_xml
+ register: vm_xml
+
+ - name: Fetch the index for admin network
+ set_fact:
+ admin_index: "{{ (vm_name == 'opnfv') | ternary(opnfv_vm_idf.net_config.admin.interface, idf.net_config.admin.interface) | int }}"
+
+ - name: Fetch the ip
+ set_fact:
+ vm_ip: "{{ item.1.interfaces[admin_index | int].address }}"
+
+ # Assumes there is only a single NIC per VM
+ - name: get MAC from vm XML
+ set_fact:
+ vm_mac: "{{ (vm_xml.get_xml | regex_findall(\"<mac address='.*'/>\") | first).split('=') | last | regex_replace(\"['/>]\", '') }}"
+
+ # NOTE(pas-ha) using default username and password set by virtualbmc - "admin" and "password" respectively
+ # see vbmc add --help
+ - name: set the json entry for vm
+ set_fact:
+ vm_data:
+ name: "{{ vm_name }}"
+ uuid: "{{ vm_name | to_uuid }}"
+ host_groups: "{{ vm_host_group }}"
+ driver: "ipmi"
+ driver_info:
+ power:
+ ipmi_address: "192.168.122.1"
+ ipmi_port: "{{ virtual_ipmi_port }}"
+ ipmi_username: "{{ item.1.remote_management.user }}"
+ ipmi_password: "{{ item.1.remote_management.pass }}"
+ nics:
+ - mac: "{{ vm_mac }}"
+ ansible_ssh_host: "{{ vm_ip }}"
+ ipv4_address: "{{ vm_ip }}"
+ properties:
+ cpu_arch: "{{ item.1.node.arch }}"
+ ram: "{{ item.1.node.memory.rstrip('G') }}"
+ cpus: "{{ item.1.node.cpus }}"
+ disk_size: "{{ item.1.disks[0].disk_capacity.rstrip('G') }}"
+
+ - name: add created vm info
+ set_fact:
+ nodes_json_data: "{{ nodes_json_data | combine({vm_name: vm_data}) }}"
+ when: vm_name != 'opnfv'
+
+ - name: Record OPNFV VM ip
+ set_fact:
+ opnfv_vm_ip: "{{ vm_ip }}"
+ when: vm_name == 'opnfv'
+
+ when: (num_nodes | int) > (item.0 | int)
diff --git a/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
new file mode 100644
index 00000000..a227bc4f
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/download_opnfvimage.yml
@@ -0,0 +1,32 @@
+---
+- name: Download the {{ xci_distro }} image checksum file
+ get_url:
+ dest: "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ force: no
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2.sha256.txt
+ timeout: 3000
+- name: Extract checksum
+ shell: awk '{print $1}' "{{ xci_cache }}/deployment_image.qcow2.sha256.txt"
+ register: _image_checksum
+- fail:
+ msg: "Failed to get image checksum"
+ when: _image_checksum == ''
+- set_fact:
+ image_checksum: "{{ _image_checksum.stdout }}"
+- name: Download the {{ xci_distro }} image file
+ get_url:
+ url: http://artifacts.opnfv.org/releng/xci/images/{{ xci_distro }}.qcow2
+ checksum: "sha256:{{ image_checksum }}"
+ timeout: 3000
+ dest: "{{ xci_cache }}/deployment_image.qcow2"
+ force: no
+- name: Set correct mode for deployment_image.qcow2 file
+ file:
+ path: "{{ xci_cache }}/deployment_image.qcow2"
+ mode: '0755'
+ owner: 'root'
+ group: 'root'
+
+- name: Create copy of original deployment image
+ shell: "cp {{ xci_cache }}/deployment_image.qcow2 {{ opnfv_image_path }}/opnfv.qcow2"
+ become: yes
diff --git a/xci/playbooks/roles/create-nodes/tasks/main.yml b/xci/playbooks/roles/create-nodes/tasks/main.yml
new file mode 100644
index 00000000..607ac494
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+# baremetal_json_file could be the file coming from pdf/idf
+
+- name: "Load distribution defaults"
+ include_vars: "{{ ansible_os_family | lower }}.yml"
+
+# From the previous list
+- name: "Install required packages"
+ package:
+ name: "{{ required_packages }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
+
+- include_tasks: prepare_libvirt.yml
+ with_items: "{{ libvirt_networks }}"
+
+- include_tasks: download_opnfvimage.yml
+
+- name: create placeholder var for vm entries in JSON format
+ set_fact:
+ nodes_json_data: {}
+
+# First we create the opnfv_vm
+- include_tasks: create_vm.yml
+ with_indexed_items: "{{ vms_to_create }}"
+
+- include_tasks: baremetalhoststojson.yml
+ with_items: "{{ baremetal_nodes }}"
+
+- name: Start the opnfv vm
+ virt:
+ command: start
+ name: opnfv
+
+- name: remove previous baremetal data file
+ file:
+ state: absent
+ path: "{{ baremetal_json_file }}"
+
+# We got nodes_json_data from the create_vm playbook
+- name: write to baremetal json file
+ copy:
+ dest: "{{ baremetal_json_file }}"
+ content: "{{ nodes_json_data | to_nice_json }}"
+
+- name: >
+ "Set file permissions such that the baremetal data file
+ can be read by the user executing Ansible"
+ file:
+ path: "{{ baremetal_json_file }}"
+ owner: "{{ ansible_env.SUDO_USER }}"
+ when: >
+ ansible_env.SUDO_USER is defined and
+ baremetal_json_file != ""
diff --git a/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
new file mode 100644
index 00000000..06afaec3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml
@@ -0,0 +1,139 @@
+---
+- name: "Restart libvirt service"
+ service: name="{{libvirt_service_name}}" state=restarted
+
+# NOTE(Shrews) We need to enable ip forwarding for the libvirt bridge to
+# operate properly with dnsmasq. This should be done before starting dnsmasq.
+- name: "Enable IP forwarding in sysctl"
+ sysctl:
+ name: "net.ipv4.ip_forward"
+ value: 1
+ sysctl_set: yes
+ state: present
+ reload: yes
+
+# NOTE(Shrews) Ubuntu packaging+apparmor issue prevents libvirt from loading
+# the ROM from /usr/share/misc.
+- name: "Look for sgabios in {{ sgabios_dir }}"
+ stat: path={{ sgabios_dir }}/sgabios.bin
+ register: test_sgabios_qemu
+
+- name: "Look for sgabios in /usr/share/misc"
+ stat: path=/usr/share/misc/sgabios.bin
+ register: test_sgabios_misc
+
+- name: "Place sgabios.bin"
+ command: cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
+ when: >
+ test_sgabios_qemu == false and
+ test_sgabios_misc == true
+
+# NOTE(TheJulia): In order to prevent conflicts, stop
+# dnsmasq to prevent conflicts with libvirt restarting.
+# TODO(TheJulia): We shouldn't need to do this, but the
+# libvirt dhcp instance conflicts withour specific config
+# and taking this path allows us to not refactor dhcp at
+# this moment. Our DHCP serving should be refactored
+# so we don't need to do this.
+- name: "Stop default dnsmasq service"
+ service:
+ name: dnsmasq
+ state: stopped
+ ignore_errors: true
+
+# NOTE(TheJulia): Seems if you test in a VM, this might
+# be helpful if your installed your host originally
+# with the default 192.168.122/0/24 network
+- name: destroy libvirt network
+ virt_net:
+ name: "{{ item }}"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+# Ubuntu creates a default network when installing libvirt.
+# This network uses the 192.168.122.0/24 range and thus
+# conflicts with our admin network
+- name: destroy libvirt network
+ virt_net:
+ name: "default"
+ state: absent
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is present
+ virt_net:
+ name: "{{ item }}"
+ state: present
+ xml: "{{ lookup('template', 'net-'+item+'.xml.j2') }}"
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: find facts on libvirt networks
+ virt_net:
+ command: facts
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: "Delete network interface if virtual network is not active"
+ command: ip link del {{ ansible_libvirt_networks[item].bridge }}
+ when:
+ - ansible_libvirt_networks[item].state != 'active'
+ - vm_libvirt_uri == 'qemu:///system'
+ ignore_errors: yes
+
+- name: set libvirt network to autostart
+ virt_net:
+ name: "{{ item }}"
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: ensure libvirt network is running
+ virt_net:
+ name: "{{ item }}"
+ state: active
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: get libvirt network status
+ virt_net:
+ name: "{{ item }}"
+ command: status
+ uri: "{{ vm_libvirt_uri }}"
+ register: test_vm_net_status
+
+- name: fail if libvirt network is not active
+ assert:
+ that: test_vm_net_status.status == 'active'
+
+- name: define a libvirt pool if not set
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: present
+ uri: "{{ vm_libvirt_uri }}"
+ xml: "{{ lookup('template', 'pool_dir.xml.j2') }}"
+
+- name: ensure libvirt pool is running
+ virt_pool:
+ name: "{{ node_storage_pool }}"
+ state: active
+ autostart: yes
+ uri: "{{ vm_libvirt_uri }}"
+
+- name: create dir for bm logs
+ file:
+ state: directory
+ path: "{{ node_logdir }}"
+ recurse: yes
+ mode: "0755"
+
+- name: install virtualbmc
+ pip:
+ name: virtualbmc
+ version: 1.5 # >1.3 needs zmq dependency.
+ virtualenv: "{{ lookup('env', 'XCI_VENV') }}"
+
+- name: Create directory for the config of vbmc
+ file:
+ path: /etc/virtualbmc
+ state: directory
+
+- name: Place the config for virtualbmc
+ copy:
+ src: virtualbmc.conf
+ dest: /etc/virtualbmc/virtualbmc.conf
diff --git a/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
new file mode 100644
index 00000000..aedbbeb7
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-admin.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
new file mode 100644
index 00000000..4a9964c3
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net-mgmt.xml.j2
@@ -0,0 +1,11 @@
+<network>
+ <name>{{ item }}</name>
+ <forward mode='route'>
+ </forward>
+ <bridge name='br-{{ item }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].gateway }}' netmask='255.255.255.0'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.mgmt.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/net.xml.j2 b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
new file mode 100644
index 00000000..7e372ffe
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/net.xml.j2
@@ -0,0 +1,14 @@
+<network>
+ <name>{{ vm_network }}</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_interface }}' stp='on' delay='0'/>
+ <ip address='{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].gateway }}' netmask='{{ node_network_netmask }}'>
+ <dhcp>
+ <host mac="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}" ip="{{ opnfv_vm_pdf.interfaces[opnfv_vm_idf.net_config.admin.interface].address }}"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2 b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
new file mode 100644
index 00000000..e4645deb
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/pool_dir.xml.j2
@@ -0,0 +1,7 @@
+<pool type='dir'>
+ <name>{{ node_storage_pool }}</name>
+ <target>
+ <path>{{ node_storage_pool_path }}</path>
+ </target>
+</pool>
+
diff --git a/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
new file mode 100644
index 00000000..9fad42b8
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2
@@ -0,0 +1,69 @@
+<domain type='{{ vm_domain_type }}'>
+ <name>{{ vm_name }}</name>
+ <memory unit='GiB'>{{ item.1.node.memory.rstrip('G') }}</memory>
+ <vcpu>{{ item.1.node.cpus }}</vcpu>
+ <os>
+ <type arch='{{ item.1.node.arch }}' machine='{{ item.1.node.model }}'>hvm</type>
+ {%- if 'opnfv' in vm_name -%}
+ <boot dev='hd'/>
+ {%- else -%}
+ <boot dev='network'/>
+ {% endif -%}
+ <bootmenu enable='no'/>
+ <bios useserial='yes' rebootTimeout='10000'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='{{ item.1.node.cpu_cflags }}'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>{{ vm_emulator }}</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='{{ vm_disk_cache }}'/>
+ <source file='{{ vm_volume_path }}'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <interface type='network'>
+ <source network='{{ vm_network_admin }}'/>
+ <model type='{{ vm_nic }}'/>
+ {%- if vm_name == 'opnfv' -%}
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.admin.interface].mac_address }}'/>
+ {%- else -%}
+ <mac address='{{ item.1.interfaces[idf.net_config.admin.interface].mac_address }}'/>
+ {%- endif -%}
+ </interface>
+ {%- if baremetal | bool -%}
+ <interface type='network'>
+ <source network='{{ vm_network_mgmt }}'/>
+ <model type='{{ vm_nic }}'/>
+ <mac address='{{ item.1.interfaces[opnfv_vm_idf.net_config.mgmt.interface].mac_address }}'/>
+ </interface>
+ {%- endif -%}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <serial type='file'>
+ <source path='{{ vm_log_file }}'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/xci/playbooks/roles/create-nodes/vars/debian.yml b/xci/playbooks/roles/create-nodes/vars/debian.yml
new file mode 100644
index 00000000..bcfc47d5
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/debian.yml
@@ -0,0 +1,13 @@
+---
+sgabios_dir: /usr/share/qemu/
+libvirt_service_name: libvirt-bin
+required_packages:
+ - libvirt-bin
+ - qemu-utils
+ - qemu-kvm
+ - qemu-system-x86
+ - sgabios
+ - pkg-config
+ - libvirt-dev
+ - python-lxml
+ - python-libvirt
diff --git a/xci/playbooks/roles/create-nodes/vars/redhat.yml b/xci/playbooks/roles/create-nodes/vars/redhat.yml
new file mode 100644
index 00000000..2b285110
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/redhat.yml
@@ -0,0 +1,17 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-img
+ - qemu-kvm-tools
+ - qemu-kvm
+ - qemu-kvm-common
+ - qemu-system-x86
+ - sgabios-bin
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkgconfig
+ - libvirt-devel
+ - libvirt-python
+ - python-lxml
diff --git a/xci/playbooks/roles/create-nodes/vars/suse.yml b/xci/playbooks/roles/create-nodes/vars/suse.yml
new file mode 100644
index 00000000..7e4c41ef
--- /dev/null
+++ b/xci/playbooks/roles/create-nodes/vars/suse.yml
@@ -0,0 +1,15 @@
+---
+sgabios_dir: /usr/share/sgabios/
+libvirt_service_name: libvirtd
+required_packages:
+ - qemu-tools
+ - qemu-kvm
+ - qemu-x86
+ - qemu-sgabios
+ - libvirt
+ - libvirt-client
+ - libvirt-daemon
+ - pkg-config
+ - libvirt-devel
+ - python-lxml
+ - libvirt-python
diff --git a/xci/playbooks/roles/prepare-functest/defaults/main.yml b/xci/playbooks/roles/prepare-functest/defaults/main.yml
deleted file mode 100644
index a3638302..00000000
--- a/xci/playbooks/roles/prepare-functest/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Gateway parameters
-gateway_ip: "10.10.10.1"
-gateway_ip_mask: "10.10.10.1/24"
-broadcast_ip: "10.10.10.255"
-gateway_interface: "br-vlan"
-
-# Network parameters
-external_network: "ext-net"
-
-# Subnet parameters
-subnet_name: "ext-subnet"
-allocation_pool: "start=10.10.10.5,end=10.10.10.254"
-subnet_cidr: "10.10.10.0/24"
diff --git a/xci/playbooks/roles/prepare-functest/tasks/main.yml b/xci/playbooks/roles/prepare-functest/tasks/main.yml
deleted file mode 100644
index 9a380cd1..00000000
--- a/xci/playbooks/roles/prepare-functest/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 SUSE Linux GmbH
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: check if the gateway was already set
- shell: "ip a | grep {{ gateway_ip }}"
- register: gateway_ip_result
- ignore_errors: True
-
-- name: add public network gateway
- command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
- when: gateway_ip_result|failed
-
-- name: prepare script to create networks for functest
- template:
- src: prepare-functest.sh.j2
- dest: /root/prepare-functest.sh
- mode: 0755
-
-- name: Create networks
- shell: "/root/prepare-functest.sh"
-
-- name: prepare environment file for functest
- template:
- src: env.j2
- dest: /root/env
- mode: 0755
diff --git a/xci/playbooks/roles/prepare-functest/templates/env.j2 b/xci/playbooks/roles/prepare-functest/templates/env.j2
deleted file mode 100644
index 87093325..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/env.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-INSTALLER_TYPE=osa
-INSTALLER_IP=192.168.122.2
-EXTERNAL_NETWORK={{ external_network }}
-DEPLOY_SCENARIO="os-nosdn-nofeature-noha"
diff --git a/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2 b/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2
deleted file mode 100644
index febe8369..00000000
--- a/xci/playbooks/roles/prepare-functest/templates/prepare-functest.sh.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-source /root/openrc
-
-openstack --insecure network create --external \
- --provider-physical-network flat \
- --provider-network-type flat {{ external_network }}
-
-openstack --insecure subnet create --network {{ external_network }} \
- --allocation-pool {{ allocation_pool }} \
- --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
- --no-dhcp {{ subnet_name }}
diff --git a/xci/playbooks/roles/prepare-tests/defaults/main.yml b/xci/playbooks/roles/prepare-tests/defaults/main.yml
new file mode 100644
index 00000000..7002586c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# Gateway parameters
+gateway_ip: "192.168.122.1"
+gateway_ip_mask: "192.168.122.1/24"
+broadcast_ip: "192.168.122.255"
+gateway_interface: "br-vlan"
+
+# Network parameters
+external_network: "ext-net"
+
+# Subnet parameters
+subnet_name: "ext-subnet"
+allocation_pool: "start=192.168.122.100,end=192.168.122.254"
+subnet_cidr: "192.168.122.0/24"
diff --git a/xci/playbooks/roles/prepare-tests/tasks/main.yml b/xci/playbooks/roles/prepare-tests/tasks/main.yml
new file mode 100644
index 00000000..a543ac1f
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/main.yml
@@ -0,0 +1,56 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 SUSE Linux GmbH
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: install required packages
+ package:
+ name: "{{ required_packages[ansible_pkg_mgr] }}"
+ update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+ state: present
+
+# Docker is needed for test frameworks
+- name: Ensure Docker service is started and enabled
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+- name: install required pip packages
+ pip:
+ name: "{{ required_pip }}"
+ state: present
+ extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
+
+# odl scenarios require to add odl variables to env
+- include_tasks: process_neutron_conf.yml
+ when: "'-odl-' in deploy_scenario"
+
+- name: prepare environment file for tests
+ template:
+ src: env.j2
+ dest: /root/env
+ mode: 0755
+
+- name: create the script to prepare for testing
+ template:
+ src: prepare-tests.sh.j2
+ dest: /root/prepare-tests.sh
+ mode: 0755
+
+- name: create the script to run functest
+ template:
+ src: run-functest.sh.j2
+ dest: /root/run-functest.sh
+ mode: 0755
+
+- name: create the script to run yardstick
+ template:
+ src: run-yardstick.sh.j2
+ dest: /root/run-yardstick.sh
+ mode: 0755
diff --git a/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
new file mode 100644
index 00000000..45608df3
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml
@@ -0,0 +1,19 @@
+---
+- name: Collecting ODL variables
+ block:
+ - name: Fetch odl_password variable
+ shell: "cat /tmp/ml2_conf.ini | grep password | cut -d ' ' -f3"
+ register: odl_password
+
+ - name: Fetch odl_username variable
+ shell: "cat /tmp/ml2_conf.ini | grep username | cut -d ' ' -f3"
+ register: odl_username
+
+ - name: Fetch odl_port variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f3 | cut -d '/' -f1"
+ register: odl_port
+
+ - name: Fetch odl_ip variable
+ shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f2 | cut -d '/' -f3"
+ register: odl_ip
+ when: "'-odl-' in deploy_scenario"
diff --git a/xci/playbooks/roles/prepare-tests/templates/env.j2 b/xci/playbooks/roles/prepare-tests/templates/env.j2
new file mode 100644
index 00000000..d4f8f86c
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/env.j2
@@ -0,0 +1,15 @@
+INSTALLER_IP=192.168.122.2
+TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
+ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
+{# external network is only valid for OpenStack based scenarios #}
+{% if 'os-' in deploy_scenario %}
+EXTERNAL_NETWORK={{ external_network }}
+{% endif %}
+{% if '-odl-' in deploy_scenario %}
+SDN_CONTROLLER_IP={{ odl_ip.stdout }}
+SDN_CONTROLLER_USER={{ odl_username.stdout }}
+SDN_CONTROLLER_PASSWORD={{ odl_password.stdout }}
+SDN_CONTROLLER_RESTCONFPORT={{ odl_port.stdout }}
+SDN_CONTROLLER_WEBPORT={{ odl_port.stdout }}
+{% endif %}
+
diff --git a/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
new file mode 100644
index 00000000..1b779cb9
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/prepare-tests.sh.j2
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Variables that we need to pass from XCI to testing
+XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME FUNCTEST_VERSION)
+
+# Extract variables from xci.env file
+if [[ -e /root/xci.env ]]; then
+ for x in ${XCI_ENV[@]}; do
+ grep "^${x}=" /root/xci.env >> /root/env
+ done
+ # Parse the XCI's DEPLOY_SCENARIO and XCI_FLAVOR variables and
+ # set the functest container's DEPLOY_SCENARIO variable in the
+ # following format <scenario>-<flavor>. But the XCI's mini flavor
+ # is converted into noha.
+ DEPLOY_SCENARIO=`grep -Po '(?<=DEPLOY_SCENARIO=).*' /root/xci.env`
+ XCI_FLAVOR=`grep -Po '(?<=XCI_FLAVOR=).*' /root/xci.env`
+ XCI_FLAVOR=${XCI_FLAVOR/mini/noha}
+ echo "DEPLOY_SCENARIO=$DEPLOY_SCENARIO-$XCI_FLAVOR" >> /root/env
+fi
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+source /root/openrc
+
+openstack --insecure network create --external \
+ --provider-physical-network flat \
+ --provider-network-type flat {{ external_network }}
+
+openstack --insecure subnet create --network {{ external_network }} \
+ --allocation-pool {{ allocation_pool }} \
+ --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
+ --no-dhcp {{ subnet_name }}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# Create k8s.creds file for testing
+KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
+KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
+cat << EOF > ~/k8s.creds
+KUBERNETES_PROVIDER=local
+KUBE_MASTER_URL=$KUBE_MASTER_URL
+KUBE_MASTER_IP=$KUBE_MASTER_IP
+EOF
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
new file mode 100644
index 00000000..b4cf46d7
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-functest.sh.j2
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Create directory to store functest logs
+mkdir -p /root/functest-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- functest environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+# the needed images differ between the suites so avoid downloading unnecessary images
+echo "Downloading the images needed for functest-$FUNCTEST_SUITE_NAME"
+mkdir ~/images && cd ~/images
+if [[ "$FUNCTEST_SUITE_NAME" =~ "healthcheck" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
+elif [[ "$FUNCTEST_SUITE_NAME" =~ "smoke" ]]; then
+ wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img \
+ http://testresults.opnfv.org/functest/shaker-image.qcow2 \
+ https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+else
+ echo "Unsupported test suite for functest"
+ exit 1
+fi
+echo "------------------------------------------------------"
+ls -al . && cd ~
+echo "------------------------------------------------------"
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}:${FUNCTEST_VERSION}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/images:/home/opnfv/functest/images \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ ${DOCKER_IMAGE_NAME}
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/.kube/config:/root/.kube/config \
+ -v $(pwd)/functest-results:/home/opnfv/functest/results \
+ $DOCKER_IMAGE_NAME
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2 b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
new file mode 100644
index 00000000..6a7fd8be
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/templates/run-yardstick.sh.j2
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Create directory to store yardstick logs
+mkdir -p /root/yardstick-results/
+
+# Dump the env file
+echo "------------------------------------------------------"
+echo "------------- yardstick environment file --------------"
+cat /root/env
+echo "------------------------------------------------------"
+
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+rc_file_vol="-v /root/openrc:/etc/yardstick/openstack.creds"
+{% else %}
+{# k8 scenario name is hardcoded for the timebeing until we clarify #}
+{# which suite name we should use for the scenarios without yardstick suites #}
+DEPLOY_SCENARIO="k8-nosdn-nofeature-noha"
+rc_file_vol="-v /root/admin.conf:/etc/yardstick/admin.conf"
+{% endif %}
+
+OS_CACERT="/etc/ssl/certs/haproxy.cert"
+DOCKER_IMAGE_NAME="opnfv/yardstick"
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_${DEPLOY_SCENARIO}_daily.yaml"
+
+# add OS_CACERT to openrc
+echo "export OS_CACERT=/etc/yardstick/os_cacert" >> ~/openrc
+
+opts="--privileged=true --rm"
+envs="-e INSTALLER_TYPE=$INSTALLER_TYPE -e INSTALLER_IP=$INSTALLER_IP \
+ -e NODE_NAME=$NODE_NAME -e EXTERNAL_NETWORK=$EXTERNAL_NETWORK \
+ -e YARDSTICK_BRANCH=master -e BRANCH=master \
+ -e DEPLOY_SCENARIO=$DEPLOY_SCENARIO -e CI_DEBUG=true"
+cacert_file_vol="-v $OS_CACERT:/etc/yardstick/os_cacert"
+map_log_dir="-v /root/yardstick-results:/tmp/yardstick"
+sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} \
+ ${map_log_dir} ${sshkey} ${DOCKER_IMAGE_NAME} \
+ exec_tests.sh ${YARDSTICK_SCENARIO_SUITE_NAME}"
+echo "Running yardstick with the command"
+echo "------------------------------------------------------"
+echo $cmd
+echo "------------------------------------------------------"
+$cmd
diff --git a/xci/playbooks/roles/prepare-tests/vars/main.yml b/xci/playbooks/roles/prepare-tests/vars/main.yml
new file mode 100644
index 00000000..83638466
--- /dev/null
+++ b/xci/playbooks/roles/prepare-tests/vars/main.yml
@@ -0,0 +1,17 @@
+---
+required_packages:
+ apt:
+ - docker.io
+ - wget
+ - xz-utils
+ zypper:
+ - docker
+ - wget
+ - xz
+ yum:
+ - docker
+ - wget
+ - xz
+
+required_pip:
+ - docker-py
diff --git a/xci/scenarios/README.rst b/xci/scenarios/README.rst
deleted file mode 100644
index 5d9bdf06..00000000
--- a/xci/scenarios/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-This folder keeps the roles for the generic scenarios.
diff --git a/xci/scenarios/k8-nosdn-nofeature/.gitkeep b/xci/scenarios/k8-nosdn-nofeature/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/k8-nosdn-nofeature/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/os-nosdn-nofeature/README.rst b/xci/scenarios/os-nosdn-nofeature/README.rst
deleted file mode 100644
index dcdc83fc..00000000
--- a/xci/scenarios/os-nosdn-nofeature/README.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-This scenario is currently incomplete. In order for it to be
-complete, changes for CEPH must be moved here, combining OVS + CEPH.
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 1aaf84d8..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,255 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 86b87c15..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,170 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 99b768c4..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
deleted file mode 100644
index 2f678544..00000000
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# neutron specific config
-neutron_plugin_type: ml2.ovs
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_provider_networks:
- network_flat_networks: "*"
- network_types: "vxlan"
- network_vxlan_ranges: "1:1000" \ No newline at end of file
diff --git a/xci/scenarios/os-odl-nofeature/.gitkeep b/xci/scenarios/os-odl-nofeature/.gitkeep
deleted file mode 100644
index e69de29b..00000000
--- a/xci/scenarios/os-odl-nofeature/.gitkeep
+++ /dev/null
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
deleted file mode 100644
index 2ca5a987..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,256 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
deleted file mode 100644
index 0f8ccd18..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,171 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
deleted file mode 100644
index 7ed9cd32..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,173 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "102:199"
- net_name: "physnet1"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/user_variables_os-odl-nofeature.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/user_variables_os-odl-nofeature.yml
deleted file mode 100644
index 403d372c..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/user_variables_os-odl-nofeature.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# Copyright (c) 2017 Ericsson AB and others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# Ensure the openvswitch kernel module is loaded
-openstack_host_specific_kernel_modules:
- - name: "openvswitch"
- pattern: "CONFIG_OPENVSWITCH"
- group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron"
-
-neutron_ml2_drivers_type: "flat,vlan,vxlan"
-
-neutron_plugin_base:
- - odl-router_v2 \ No newline at end of file
diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
deleted file mode 100644
index 61d31a7f..00000000
--- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-- name: copy user_variables_os-odl-nofeature.yml
- copy:
- src: "user_variables_os-odl-nofeature.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_variables_os-odl-nofeature.yml"
-
-- name: copy user_variables_os-odl-nofeature-ha.yml
- copy:
- src: "{{XCI_FLAVOR}}/user_variables_os-odl-nofeature-ha.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_variables_os-odl-nofeature-ha.yml"
- when:
- - XCI_FLAVOR == "ha"
-
-- name: copy os-odl-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml" \ No newline at end of file
diff --git a/xci/scenarios/os-odl-nofeature/xci_overrides b/xci/scenarios/os-odl-nofeature/xci_overrides
deleted file mode 100644
index 2c65df0d..00000000
--- a/xci/scenarios/os-odl-nofeature/xci_overrides
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-if [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]] && [[ $XCI_FLAVOR == "ha" ]]; then
- export VM_MEMORY_SIZE=20480
-elif [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]]; then
- export VM_MEMORY_SIZE=16384
-fi
diff --git a/xci/scripts/update-osa-version-files.sh b/xci/scripts/update-osa-version-files.sh
index a8b0cefc..bb0d82ab 100755
--- a/xci/scripts/update-osa-version-files.sh
+++ b/xci/scripts/update-osa-version-files.sh
@@ -35,6 +35,11 @@ printme() {
# Only need a single argument
[[ $# -lt 1 || $# -gt 2 ]] && echo "Invalid number of arguments!" && usage
+ironic_git_url=https://github.com/openstack/ironic
+ironic_client_git_url=https://github.com/openstack/python-ironicclient
+ironic_inspector_git_url=https://github.com/openstack/ironic-inspector
+ironic_inspector_client_git_url=https://github.com/openstack/python-ironic-inspector-client
+
tempdir="$(mktemp -d)"
trap cleanup EXIT
@@ -71,12 +76,22 @@ cat $tempdir/openstack-ansible/ansible-role-requirements.yml >> $releng_xci_base
# Update the pinned OSA version
sed -i -e "/^export OPENSTACK_OSA_VERSION/s@:-\"[a-z0-9]*@:-\"${1}@" \
- -e "s/\(^# HEAD of osa.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ -e "s@\(^# HEAD of osa \).*@\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)@" $releng_xci_base/config/pinned-versions
# Update the pinned bifrost version
-[[ -n ${2:-} ]] && \
- sed -i -e "/^export OPENSTACK_BIFROST_VERSION/s@:-\"[a-z0-9]*@:-\"${2}@" \
- -e "s/\(^# HEAD of bifrost.*of \).*/\1$(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+if [[ -n ${2:-} ]]; then
+ echo "Updating bifrost..."
+ sed -i -e "/^export OPENSTACK_BIFROST_VERSION/s@:-\"[a-z0-9]*@:-\"${2}@" \
+ -e "s/\(^# HEAD of bifrost \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ # Get ironic shas
+ for ironic in ironic_git_url ironic_client_git_url ironic_inspector_git_url ironic_inspector_client_git_url; do
+ ironic_sha=$(git ls-remote ${!ironic} | grep "${OPENSTACK_OSA_VERSION:-master}" | awk '{print $1}')
+ ironic=${ironic/_git*/}
+ echo "... updating ${ironic}"
+ sed -i -e "/^export BIFROST_${ironic^^}_VERSION/s@:-\"[a-z0-9]*@:-\"${ironic_sha}@" \
+ -e "s/\(^# HEAD of ${ironic/_/-} \).*/\1\"${OPENSTACK_OSA_VERSION:-master}\" as of $(date +%d\.%m\.%Y)/" $releng_xci_base/config/pinned-versions
+ done
+fi
cp $tempdir/openstack-ansible/playbooks/defaults/repo_packages/openstack_services.yml ${releng_xci_base}/installer/osa/files/.
cp $tempdir/openstack-ansible/global-requirement-pins.txt ${releng_xci_base}/installer/osa/files/.
diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh
index 0af2a359..965cfe4c 100755
--- a/xci/scripts/vm/start-new-vm.sh
+++ b/xci/scripts/vm/start-new-vm.sh
@@ -26,13 +26,16 @@ export XCI_BUILD_CLEAN_VM_OS=${XCI_BUILD_CLEAN_VM_OS:-true}
# ones.
export XCI_UPDATE_CLEAN_VM_OS=${XCI_UPDATE_CLEAN_VM_OS:-false}
+# IP of OPNFV VM so we remove it from known_hosts
+OPNFV_VM_IP=192.168.122.2
+
grep -q -i ^Y$ /sys/module/kvm_intel/parameters/nested || { echo "Nested virtualization is not enabled but it's needed for XCI to work"; exit 1; }
destroy_vm_on_failures() {
local exit_err=${xci_error:-130}
if ! ${XCI_KEEP_CLEAN_VM_ON_FAILURES}; then
- sudo virsh destroy ${VM_NAME}_xci_vm
- sudo virsh undefine ${VM_NAME}_xci_vm
+ sudo virsh destroy ${VM_NAME}_xci_vm || true
+ sudo virsh undefine ${VM_NAME}_xci_vm || true
fi
exit $exit_err
}
@@ -53,9 +56,10 @@ update_clean_vm_files() {
local image_remote="${opnfv_url}/${OS}.qcow2"
get_new_vm_files() {
+ echo "Downloading new ${OS} images from ${opnfv_url}"
rm -rf ${vm_cache}/${OS}*
- wget --quiet ${image_remote}
- wget --quiet ${sha_remote}
+ curl -O -s --retry 10 ${image_remote}
+ curl -O -s --retry 10 ${sha_remote}
}
# There are 3 reasons why we want to fetch files from the GS storage
@@ -68,7 +72,7 @@ update_clean_vm_files() {
sha_local=$(awk '{print $1}' $shafile)
if $XCI_UPDATE_CLEAN_VM_OS; then
echo "Updating local copies of ${OS}..."
- ! curl -s ${sha_remote} | grep -q ${sha_local} && \
+ ! curl --retry 10 -s ${sha_remote} | grep -q ${sha_local} && \
get_new_vm_files
fi
echo "Verifying integrity of ${OS} files..."
@@ -83,7 +87,7 @@ update_clean_vm_files() {
[[ $# -ne 1 ]] && usage && exit 1
-declare -r CPU=${XCI_CPU_TYPE:-host-passthrough}
+declare -r CPU=${XCI_CPU_TYPE:-host-model}
declare -r NCPUS=${XCI_NCPUS:-24}
declare -r MEMORY=${XCI_MEMORY_SIZE:-65536}
declare -r DISK=${XCI_DISK_SIZE:-500}
@@ -100,7 +104,6 @@ sudo virsh destroy ${VM_NAME} || true
sudo virsh undefine ${VM_NAME} || true
source /etc/os-release
-echo "Installing host (${ID,,}) dependencies..."
# check we can run sudo
if ! sudo -n "true"; then
echo ""
@@ -112,31 +115,36 @@ if ! sudo -n "true"; then
exit 1
fi
-# Wait 30-120 seconds so we avoid running multiple instances of pkg manager. Of course
-# this will not work as it should if there is an external process running a package
-# manager instance. However, since this script is only being execute on CI nodes which
-# we have complete control it should be mostly fine.
-backoff_time=0
-while [[ ${backoff_time} -le 30 ]]; do
- backoff_time=$(( $RANDOM % 120 ))
-done
+COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-manager qemu-kvm)
case ${ID,,} in
- *suse)
- pkg_mgr_cmd="sudo zypper -q -n install virt-manager qemu-kvm qemu-tools libvirt-daemon docker libvirt-client libvirt-daemon-driver-qemu iptables ebtables dnsmasq"
+ *suse*)
+ pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu > /dev/null"
;;
centos)
- pkg_mgr_cmd="sudo yum install -q -y epel-release && sudo yum install -q -y in virt-manager qemu-kvm qemu-kvm-tools qemu-img libvirt-daemon-kvm docker iptables ebtables dnsmasq"
+ pkg_mgr_cmd="sudo yum install -C -q -y epel-release > /dev/null"
+ pkg_mgr_cmd+=" && sudo yum install -C -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm > /dev/null"
;;
ubuntu)
- pkg_mgr_cmd="sudo apt-get install -y -q=3 virt-manager qemu-kvm libvirt-bin qemu-utils docker.io docker iptables ebtables dnsmasq"
+ pkg_mgr_cmd="sudo apt-get install --no-upgrade -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io > /dev/null"
;;
esac
-if pgrep -fa "${pkg_mgr_cmd%*install*}" 2>&1; then
- sleep ${backoff_time}
-fi
+echo "Checking for running package manager instance..."
+while true; do
+ _pkg_mgr_proc=$(pgrep -f "${pkg_mgr_cmd%*install*}" | cat)
+ if [[ -n ${_pkg_mgr_proc} ]]; then
+ echo "Wainting for process ${_pkg_mgr_proc} to finish..."
+ sleep 60
+ else
+ break
+ fi
+done
+
+echo "Installing host (${ID,,}) dependencies..."
+set +e
eval ${pkg_mgr_cmd}
+set -e
echo "Ensuring libvirt and docker services are running..."
sudo systemctl -q start libvirtd
@@ -168,20 +176,23 @@ else
update_clean_vm_files
fi
+declare -r XCI_DEPLOYMENT_IMAGE="deployment_image.qcow2"
+
# Doesn't matter if we just built an image or got one from artifacts. In both
# cases there should be a copy in the cache so copy it over.
sudo rm -f ${BASE_PATH}/${OS}.qcow2
# Fix perms again...
sudo chmod 777 -R $XCI_CACHE_DIR/clean_vm/images/
sudo chown $uid:$gid -R $XCI_CACHE_DIR/clean_vm/images/
-cp ${XCI_CACHE_DIR}/clean_vm/images/${OS}.qcow2 ${BASE_PATH}/
+cp ${XCI_CACHE_DIR}/clean_vm/images/${OS}.qcow2* ${BASE_PATH}/
+cp ${XCI_CACHE_DIR}/clean_vm/images/${OS}.qcow2.sha256.txt ${BASE_PATH}/${XCI_DEPLOYMENT_IMAGE}.sha256.txt
+cp ${XCI_CACHE_DIR}/clean_vm/images/${OS}.qcow2 ${BASE_PATH}/${XCI_DEPLOYMENT_IMAGE}
+
+cd ${BASE_PATH}
declare -r OS_IMAGE_FILE=${OS}.qcow2
[[ ! -e ${OS_IMAGE_FILE} ]] && echo "${OS_IMAGE_FILE} not found! This should never happen!" && exit 1
-echo "Resizing disk image '${OS}' to ${DISK}G..."
-qemu-img resize ${OS_IMAGE_FILE} ${DISK}G
-
echo "Creating new network '${NETWORK}' if it does not exist already..."
if ! sudo virsh net-list --name --all | grep -q ${NETWORK}; then
cat > /tmp/${NETWORK}.xml <<EOF
@@ -206,9 +217,28 @@ fi
sudo virsh net-list --autostart | grep -q ${NETWORK} || sudo virsh net-autostart ${NETWORK}
sudo virsh net-list --inactive | grep -q ${NETWORK} && sudo virsh net-start ${NETWORK}
+echo "Determining backend storage device..."
+if sudo vgscan | grep -q xci-vm-vg; then
+ echo "Using LVM backend..."
+ lv_dev="/dev/xci-vm-vg/xci-vm-${OS}"
+ echo "Creating new xci-vm-${OS} LV if necessary..."
+ sudo lvscan | grep -q xci-vm-${OS} || {
+ sudo lvcreate -W y -l 33%FREE -n xci-vm-${OS} xci-vm-vg
+ sudo mkfs.ext4 -m 0 ${lv_dev}
+ }
+ echo "Flusing the ${OS_IMAGE_FILE} image to ${lv_dev}..."
+ sudo qemu-img convert -O raw ${OS_IMAGE_FILE} ${lv_dev}
+ disk_config="${lv_dev},cache=unsafe,io=threads,bus=virtio"
+else
+ echo "Using file backend..."
+ echo "Resizing disk image '${OS}' to ${DISK}G..."
+ qemu-img resize ${OS_IMAGE_FILE} ${DISK}G
+ disk_config="${OS_IMAGE_FILE},cache=unsafe,io=threads,bus=virtio"
+fi
+
echo "Installing virtual machine '${VM_NAME}'..."
sudo virt-install -n ${VM_NAME} --memory ${MEMORY} --vcpus ${NCPUS} --cpu ${CPU} \
- --import --disk=${OS_IMAGE_FILE},cache=none,bus=virtio --network network=${NETWORK},model=virtio \
+ --import --disk=${disk_config} --network network=${NETWORK},model=virtio \
--graphics none --hvm --noautoconsole
trap destroy_vm_on_failures EXIT
@@ -230,8 +260,9 @@ done
# Fix up perms if needed to make ssh happy
chmod 600 ${BASE_PATH}/xci/scripts/vm/id_rsa_for_dib*
# Remove it from known_hosts
-ssh-keygen -R $_ip || true
-ssh-keygen -R ${VM_NAME} || true
+for item in $_ip ${VM_NAME} ${OPNFV_VM_IP} ${VM_NAME}_opnfv; do
+ ssh-keygen -R $item || true
+done
# Initial ssh command until we setup everything
vm_ssh="ssh -o StrictHostKeyChecking=no -i ${BASE_PATH}/xci/scripts/vm/id_rsa_for_dib -l devuser"
@@ -258,26 +289,30 @@ echo "Adding ${VM_NAME} entry to /etc/hosts"
sudo sed -i "/.*${VM_NAME}.*/d" /etc/hosts
sudo bash -c "echo '${_ip} ${VM_NAME}' >> /etc/hosts"
+# remove ssh xci-vm-config that uses none-distro specific filename
+/bin/rm -f $HOME/.ssh/xci-vm-config
+
echo "Dropping a minimal .ssh/config file"
-cat > $HOME/.ssh/xci-vm-config<<EOF
+cat > $HOME/.ssh/${OS}-xci-vm-config<<EOF
Host *
StrictHostKeyChecking no
ServerAliveInterval 60
ServerAliveCountMax 5
IdentityFile ${BASE_PATH}/xci/scripts/vm/id_rsa_for_dib
-Host *_xci_vm
+Host ${OS}_xci_vm
User devuser
-Host *_xci_vm_opnfv
+Host ${OS}_xci_vm_opnfv
+Hostname 192.168.122.2
User root
TCPKeepAlive yes
StrictHostKeyChecking no
-ProxyCommand ssh -l devuser \$(echo %h | sed 's/_opnfv//') 'nc 192.168.122.2 %p'
+ProxyCommand ssh -l devuser -i ${BASE_PATH}/xci/scripts/vm/id_rsa_for_dib ${OS}_xci_vm -W %h:%p
EOF
# Final ssh command which will also test the configuration file
-declare -r vm_ssh="ssh -F $HOME/.ssh/xci-vm-config"
+declare -r vm_ssh="ssh -F $HOME/.ssh/${OS}-xci-vm-config"
echo "Preparing test environment..."
# *_xci_vm hostname is invalid. Letst just use distro name
@@ -296,12 +331,13 @@ EOF
# Need to copy releng-xci to the vm so we can execute stuff
do_copy() {
- rsync -a \
- --exclude "${VM_NAME}*" \
- --exclude "${OS}*" \
- --exclude "build.log" \
- --exclude "*.qcow2*" \
- -e "$vm_ssh" ${BASE_PATH}/ ${VM_NAME}:~/releng-xci/
+ echo "Copying releng-xci host folder to guest vm..."
+ rsync -a \
+ --exclude "${VM_NAME}*" \
+ --include "${XCI_DEPLOYMENT_IMAGE}*" \
+ --exclude "*qcow2*" \
+ --exclude "build.log" \
+ -e "$vm_ssh" ${BASE_PATH}/ ${VM_NAME}:~/releng-xci/
}
do_copy
@@ -311,6 +347,12 @@ rm ${BASE_PATH}/vm_hosts.txt
$vm_ssh ${VM_NAME} "cp --preserve=all ~/releng-xci/xci/scripts/vm/id_rsa_for_dib /home/devuser/.ssh/id_rsa"
$vm_ssh ${VM_NAME} "cp --preserve=all ~/releng-xci/xci/scripts/vm/id_rsa_for_dib.pub /home/devuser/.ssh/id_rsa.pub"
$vm_ssh ${VM_NAME} "sudo mv /home/devuser/releng-xci/vm_hosts.txt /etc/hosts"
+# Disable 3-level nested virtualization since it makes things terribly slow
+$vm_ssh ${VM_NAME} "sudo bash -c 'echo \"options kvm_intel nested=0\" > /etc/modprobe.d/qemu-system-x86.conf'"
+$vm_ssh ${VM_NAME} "sudo modprobe -r kvm_intel && sudo modprobe -a kvm_intel"
+$vm_ssh ${VM_NAME} "sudo bash -c 'mkdir -p /root/.ssh && cat /home/devuser/.ssh/id_rsa.pub > /root/.ssh/authorized_keys'"
+$vm_ssh ${VM_NAME} "sudo bash -c 'mkdir -p /var/lib/libvirt/images'"
+rsync -a -e "$vm_ssh" --include "${BASE_PATH}/${XCI_DEPLOYMENT_IMAGE}*" --exclude '*' root@${VM_NAME}:/var/lib/libvirt/images/
set +e
@@ -319,13 +361,15 @@ echo "Verifying test script exists..."
$vm_ssh ${VM_NAME} "bash -c 'stat ~/releng-xci/run_jenkins_test.sh'"
if [[ $? != 0 ]]; then
echo "Failed to find a 'run_jenkins_test.sh' script..."
- if ${DEFAULT_XCI_TEST}; then
+ if [[ ${DEFAULT_XCI_TEST} == true ]]; then
echo "Creating a default test case to run xci-deploy.sh"
cat > ${BASE_PATH}/run_jenkins_test.sh <<EOF
#!/bin/bash
+set -o pipefail
export XCI_FLAVOR=mini
+export BIFROST_CREATE_IMAGE_VIA_DIB=false
cd ~/releng-xci/xci
-./xci-deploy.sh
+./xci-deploy.sh | ts
EOF
# Copy again
do_copy
diff --git a/xci/var/Debian.yml b/xci/var/Debian.yml
index bd07473a..5ea7a885 100644
--- a/xci/var/Debian.yml
+++ b/xci/var/Debian.yml
@@ -7,8 +7,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# this is the interface the VM nodes are connected to libvirt network "default"
-interface: "ens3"
-python_crypto_package_name: python-crypto
-docker_package_name: docker.io
-docker_service_name: docker
diff --git a/xci/var/RedHat.yml b/xci/var/RedHat.yml
index 814d060e..5ea7a885 100644
--- a/xci/var/RedHat.yml
+++ b/xci/var/RedHat.yml
@@ -7,8 +7,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# this is placeholder and left blank intentionally to complete later on
-interface: "ens3"
-python_crypto_package_name: python-crypto
-docker_package_name: docker
-docker_service_name: docker
diff --git a/xci/var/Suse.yml b/xci/var/Suse.yml
index a041e18b..5ea7a885 100644
--- a/xci/var/Suse.yml
+++ b/xci/var/Suse.yml
@@ -7,8 +7,3 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# this is the interface the VM nodes are connected to libvirt network "default"
-interface: "eth0"
-python_crypto_package_name: python-pycrypto
-docker_package_name: docker
-docker_service_name: docker
diff --git a/xci/var/ericsson-pod2-idf.yml b/xci/var/ericsson-pod2-idf.yml
new file mode 100644
index 00000000..2839b120
--- /dev/null
+++ b/xci/var/ericsson-pod2-idf.yml
@@ -0,0 +1,187 @@
+##############################################################################
+# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ERICSSON POD 2 installer descriptor file ###
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'osa', 'osh']
+ net_config: &net_config
+ admin:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'PXE/Admin' on wiki, different IP
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0 # Tagged, not the same with 'storage' on wiki
+ mask: 22
+ private:
+ interface: 4
+ network: 172.29.242.0 # Tagged, not the same with 'private' on wiki
+ mask: 22
+ public:
+ interface: 2
+ network: 192.168.122.0 # Untagged, 'public' on wiki
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'ens1f1' #should be eno49 but it is currently broken
+ - 'ens1f0'
+ - 'ens1f1'
+ - 'ens2f0'
+ - 'ens2f1'
+
+xci:
+ pod_name: pod1
+ net_config: *net_config
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
+
+ # net_config network to be used by the PXE
+ pxe_network: public
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: virbr0
+ ip: 192.168.122.1
+
+ extra_addresses:
+ opnfv: 192.168.122.2
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/ericsson-pod2-pdf.yml b/xci/var/ericsson-pod2-pdf.yml
new file mode 100644
index 00000000..4c7271ec
--- /dev/null
+++ b/xci/var/ericsson-pod2-pdf.yml
@@ -0,0 +1,269 @@
+---
+### POD descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Jose Lausuch
+ contact: jose.lausuch@ericsson.com
+ lab: Ericsson
+ location: Rosersberg, Sweden
+ type: production
+ link: https://wiki.opnfv.org/display/pharos/CI-ERICSSON-POD2
+##############################################################################
+jumphost:
+ name: CI-POD2-HOST
+ node: &nodeparams
+ type: baremetal
+ vendor: HP
+ model: ProLiant BL460c Gen9
+ arch: x86_64
+ cpus: 2
+ cpu_cflags: haswell
+ cores: 12
+ memory: 128G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 1200G
+ disk_type: hdd
+ disk_interface: scsi
+ disk_rotation: 15000
+ os: ubuntu-16.04
+ remote_params: &remoteparas
+ type: ipmi
+ versions:
+ - 1.0
+ - 2.0
+ user: opnfv
+ pass: Winter2017
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.11
+ mac_address: "58:20:B1:01:8A:F2"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ address: 172.16.2.11
+ mac_address: "ec:b1:d7:a1:a1:10"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.236.10
+ mac_address: "5c:b9:01:8b:9f:e8"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 192.168.122.2
+ mac_address: "5c:b9:01:8b:9f:e9"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.240.10
+ mac_address: "5c:b9:01:8b:9f:ec"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ address: 172.29.242.10
+ mac_address: "5c:b9:01:8b:9f:ed"
+ vlan: 3010
+##############################################################################
+nodes:
+ - name: node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.12
+ mac_address: "58:20:B1:01:8B:F0"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:a0"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:94"
+ address: "172.29.236.11"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:95"
+ address: "192.168.122.3"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:80"
+ address: "172.29.240.11"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:81"
+ address: "172.29.242.11"
+ vlan: 3010
+ ############################################################################
+ - name: node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.13
+ mac_address: "58:20:B1:01:8E:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:44:80"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:30"
+ address: "172.29.236.12"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:31"
+ address: "192.168.122.4"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:64"
+ address: "172.29.240.12"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:99:65"
+ address: "172.29.242.12"
+ vlan: 3010
+ ############################################################################
+ - name: node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.14
+ mac_address: "58:20:B1:01:8D:32"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a2:43:c0"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4c"
+ address: "172.29.236.13"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:4d"
+ address: "192.168.122.5"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6c"
+ address: "172.29.240.13"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:9d:6d"
+ address: "172.29.242.13"
+ vlan: 3010
+ ############################################################################
+ - name: node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.15
+ mac_address: "58:20:B1:01:8B:FC"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:8b:d0"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fc"
+ address: "172.29.236.14"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a5:fd"
+ address: "192.168.122.6"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:08"
+ address: "172.29.240.14"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:09"
+ address: "172.29.242.14"
+ vlan: 3010
+ ############################################################################
+ - name: node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remoteparas
+ address: 172.16.2.16
+ mac_address: "58:20:B1:01:8F:EA"
+ interfaces:
+ - name: 'nic0'
+ speed: 1gb
+ features: 'dpdk|sriov'
+ mac_address: "ec:b1:d7:a1:bd:60"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e8"
+ address: "172.29.236.15"
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:a6:e9"
+ address: "192.168.122.7"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:14"
+ address: "172.29.240.15"
+ vlan: 3010
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "5c:b9:01:8b:97:15"
+ address: "172.29.242.15"
+ vlan: 3010
+
diff --git a/xci/var/idf.yml b/xci/var/idf.yml
new file mode 100644
index 00000000..8ed55f6f
--- /dev/null
+++ b/xci/var/idf.yml
@@ -0,0 +1,164 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+idf:
+ version: 0.1
+ net_config: &net_config
+ admin:
+ interface: 2
+ network: 192.168.122.0
+ mask: 22
+ mgmt:
+ interface: 0
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 1
+ network: 172.29.240.0
+ mask: 22
+ public:
+ interface: 2
+ network: 192.168.122.0
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 192.168.122.1
+ private:
+ interface: 3
+ network: 172.29.244.0
+ mask: 22
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ node1: [controller]
+ node2: [compute, storage]
+ node3: [compute, storage]
+ node4: [controller]
+ node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ node1: controller00
+ node2: compute00
+ node3: compute01
+ node4: controller01
+ node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnfv: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ osh: &idf_osh
+ nodes_roles:
+ opnfv: [opnfv]
+ node1: [kube-master, etcd, vault]
+ node2: [kube-node]
+ node3: [kube-node]
+ node4: [kube-master, etcd, vault]
+ node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ node1: master1
+ node2: node1
+ node3: node2
+ node4: master2
+ node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: mgmt
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+xci:
+ pod_name: vpod1
+ net_config: *net_config
+ flavors:
+ mini:
+ - opnfv
+ - node1
+ - node2
+ noha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ ha:
+ - opnfv
+ - node1
+ - node2
+ - node3
+ - node4
+ - node5
+ # net_config network to be used by the PXE
+ pxe_network: public
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: virbr0
+ ip: 192.168.122.1
+
+ extra_addresses:
+ opnfv: 192.168.122.2
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-idf.yml b/xci/var/lf-pod4-idf.yml
new file mode 100644
index 00000000..55ca6b63
--- /dev/null
+++ b/xci/var/lf-pod4-idf.yml
@@ -0,0 +1,222 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 installer descriptor file ###
+
+idf:
+ version: 0.1
+ installer: ['apex', 'compass4nfv', 'daisy', 'fuel', 'osa', 'osh']
+ net_config: &net_config
+ oob:
+ interface: 0
+ ip-range: 172.30.8.83-172.30.8.88
+ vlan: 410
+ mask: 24
+ admin:
+ interface: 0
+ vlan: native
+ network: 192.168.122.0
+ gateway: 192.168.122.1
+ dns: 8.8.8.8
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
+ storage:
+ interface: 3
+ network: 172.29.240.0
+ mask: 24
+ private:
+ interface: 2
+ network: 172.29.242.0
+ mask: 24
+ public:
+ interface: 4
+ network: 192.168.122.0
+ mask: 24
+ gateway: 192.168.122.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ osa: &idf_osa
+ nodes_roles:
+ opnfv: [deployment]
+ pod4-node1: [controller]
+ pod4-node2: [compute, storage]
+ pod4-node3: [compute, storage]
+ pod4-node4: [controller]
+ pod4-node5: [controller]
+ groups:
+ openstack:
+ - controller
+ - compute
+ - storage
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: controller00
+ pod4-node2: compute00
+ pod4-node3: compute01
+ pod4-node4: controller01
+ pod4-node5: controller02
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ osh: &idf_osh
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ kubespray: &idf_kubespray
+ nodes_roles:
+ opnvf: [opnfv]
+ pod4-node1: [kube-master, etcd, vault]
+ pod4-node2: [kube-node]
+ pod4-node3: [kube-node]
+ pod4-node4: [kube-master, etcd, vault]
+ pod4-node5: [kube-master, etcd, vault]
+ groups:
+ k8s-cluster:
+ - kube-node
+ - kube-master
+ hostnames:
+ opnfv: opnfv
+ pod4-node1: master1
+ pod4-node2: node1
+ pod4-node3: node2
+ pod4-node4: master2
+ pod4-node5: master3
+ network:
+ # network mapping
+ network_mapping:
+ # Management network used by installer components to communicate
+ net-mgmt: admin
+ # Storage Network
+ net-storage: storage
+ # Internal network for communication between VNF
+ net-internal: private
+ # Public network for VNF remote acces (ext-net in Openstack)
+ net-vnf: public
+ deployment_host_interfaces:
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3.450'
+ fuel:
+ jumphost:
+ bridges:
+ admin: 'pxebr'
+ mgmt: 'br-ctl'
+ private: ~
+ public: ~
+ network:
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3'
+ - 'eno4'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:04:00.0'
+ - '0000:02:00.0'
+ - '0000:02:00.1'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+xci:
+ pod_name: lf-pod4
+ net_config: *net_config
+ nodes_roles:
+ opnfv_host: [opnfv_host]
+ pod4-node1: [compute, storage]
+ pod4-node2: [compute, storage]
+ pod4-node3: [controller, storage]
+ pod4-node4: [controller, storage]
+ pod4-node5: [controller, storage]
+
+ # net_config network to be used by the PXE
+ pxe_network: admin
+
+ # As the MAC of generated bridges are generated, we use a list of local
+ # bridges to create libvirt networks
+ jumphost_interfaces_bridges:
+ - name: br_admin
+ ip:
+
+ extra_addresses:
+ opnfv_host: 192.168.12.2
+
+ flavors:
+ mini:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ noha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ ha:
+ - opnfv
+ - pod4-node1
+ - pod4-node2
+ - pod4-node3
+ - pod4-node4
+ - pod4-node5
+
+ installers:
+ osa: *idf_osa
+ kubespray: *idf_kubespray
+ osh: *idf_osh
diff --git a/xci/var/lf-pod4-pdf.yml b/xci/var/lf-pod4-pdf.yml
new file mode 100644
index 00000000..9607e4db
--- /dev/null
+++ b/xci/var/lf-pod4-pdf.yml
@@ -0,0 +1,198 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Trevor Bramwell
+ contact: tbramwell@linuxfoundation.org
+ lab: Linux Foundation
+ location: Portland, Oregon, USA
+ type: development
+ link: https://wiki.opnfv.org/display/pharos/LF+POD+4
+jumphost:
+ name: pod4-jump
+ node: &nodeparams
+ type: baremetal
+ vendor: Intel Corporation
+ model: S2600WT2R
+ arch: x86_64
+ cpus: 88
+ cpu_cflags: haswell
+ cores: 22
+ memory: 62G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 480G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ os: centos-7
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: admin
+ pass: octopus
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.83
+ mac_address: "a4:bf:01:01:b0:bb"
+ interfaces:
+ - name: nic1
+ speed: 1gb
+ features: 'dpdk|sriov'
+ vlan: native
+ mac_address: "a4:bf:01:01:b0:b9"
+ address: 192.168.12.1
+ - name: nic2
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 450
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.0.2
+ - name: nic3
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 452
+ mac_address: "00:1e:67:fd:9a:04"
+ address: 192.168.2.2
+ - name: nic4
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 451
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 192.168.1.2
+ - name: nic5
+ speed: 10gb
+ features: 'dpdk|sriov'
+ vlan: 414
+ mac_address: "00:1e:67:fd:9a:05"
+ address: 172.30.12.83
+##############################################################################
+nodes:
+ - name: pod4-node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.84
+ mac_address: "a4:bf:01:01:ab:b6"
+ interfaces:
+ - mac_address: "a4:bf:01:01:ab:b4"
+ address: 192.168.122.3
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 172.29.236.11
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:32"
+ address: 192.168.122.3
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.240.11
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:33"
+ address: 172.29.242.11
+ vlan: 414
+ ############################################################################
+ - name: pod4-node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.85
+ mac_address: "a4:bf:01:01:b6:97"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b6:95"
+ address: 192.168.122.4
+ vlan: native
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 172.29.236.12
+ vlan: 450
+ - mac_address: "00:1e:67:fd:98:e2"
+ address: 192.168.122.4
+ vlan: 452
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.240.12
+ vlan: 451
+ - mac_address: "00:1e:67:fd:98:e3"
+ address: 172.29.242.12
+ vlan: 414
+ ############################################################################
+ - name: pod4-node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.86
+ mac_address: "a4:bf:01:01:66:fe"
+ interfaces:
+ - mac_address: "a4:bf:01:01:66:fc"
+ address: 192.168.122.5
+ vlan: native
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 172.29.236.13
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9c:c8"
+ address: 192.168.122.5
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.240.13
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9c:c9"
+ address: 172.29.242.13
+ vlan: 414
+ ############################################################################
+ - name: pod4-node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.87
+ mac_address: "a4:bf:01:01:b2:f5"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b2:f3"
+ address: 192.168.122.6
+ vlan: native
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 172.29.236.14
+ vlan: 450
+ - mac_address: "00:1e:67:fd:9b:38"
+ address: 192.168.122.6
+ vlan: 452
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.240.14
+ vlan: 451
+ - mac_address: "00:1e:67:fd:9b:39"
+ address: 172.29.242.14
+ vlan: 414
+ ############################################################################
+ - name: pod4-node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.88
+ mac_address: "a4:bf:01:01:b5:11"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b5:0f"
+ address: 192.168.122.7
+ vlan: native
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 172.29.236.15
+ vlan: 450
+ - mac_address: "00:1e:67:fd:99:40"
+ address: 192.168.122.7
+ vlan: 452
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.240.15
+ vlan: 451
+ - mac_address: "00:1e:67:fd:99:41"
+ address: 172.29.242.14
+ vlan: 414
diff --git a/xci/var/opnfv.yml b/xci/var/opnfv.yml
index 5638eba4..91b9ee38 100644
--- a/xci/var/opnfv.yml
+++ b/xci/var/opnfv.yml
@@ -7,39 +7,59 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-OPNFV_RELENG_GIT_URL: "{{ lookup('env','OPNFV_RELENG_GIT_URL') }}"
-OPNFV_RELENG_VERSION: "{{ lookup('env','OPNFV_RELENG_VERSION') }}"
-OPENSTACK_BIFROST_GIT_URL: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}"
-OPENSTACK_BIFROST_DEV_PATH: "{{ lookup('env','OPENSTACK_BIFROST_DEV_PATH') }}"
-OPENSTACK_BIFROST_VERSION: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}"
-OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
-OPENSTACK_OSA_OPENRC_GIT_URL: "{{ lookup('env', 'OPENSTACK_OSA_OPENRC_GIT_URL') }}"
-OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
-OPENSTACK_OSA_DEV_PATH: "{{ lookup('env','OPENSTACK_OSA_DEV_PATH') }}"
-OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
-OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
-XCI_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_ANSIBLE_PIP_VERSION') }}"
-XCI_CACHE: "{{ lookup('env', 'XCI_CACHE') }}"
-XCI_FLAVOR: "{{ lookup('env','XCI_FLAVOR') }}"
-XCI_DISTRO: "{{ lookup('env', 'XCI_DISTRO') }}"
-XCI_FLAVOR_ANSIBLE_FILE_PATH: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
-XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
-XCI_PATH: "{{ lookup('env', 'XCI_PATH') }}"
-XCI_SCENARIOS_CACHE: "{{ lookup('env', 'XCI_SCENARIOS_CACHE') }}"
-LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
-OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
-OPNFV_SSH_HOST_KEYS_PATH: "{{ lookup('env', 'OPNFV_SSH_HOST_KEYS_PATH') }}"
-XCI_EXTRA_VARS_PATH: "{{ lookup('env', 'XCI_EXTRA_VARS_PATH') }}"
-XCI_SSL_SUBJECT: "{{ lookup('env', 'XCI_SSL_SUBJECT') }}"
-XCI_CEPH_ENABLED: "{{ lookup('env', 'XCI_CEPH_ENABLED') }}"
-RUN_TEMPEST: "{{ lookup('env', 'RUN_TEMPEST') }}"
-DEPLOY_SCENARIO: "{{ lookup('env','DEPLOY_SCENARIO') }}"
-XCI_INSTALLER: "{{ lookup('env','XCI_INSTALLER') }}"
+# This file is used for reflecting the environment variables set in various places
+# that are used in ansible playbooks/roles. Only the variables used within ansible
+# playbooks/roles should exist in this file.
-# install docker on opnfv host only if we are running as part of CI
-opnfv_required_packages:
- - "{{ docker_package_name }}"
+# openstack/bifrost variables
+openstack_bifrost_git_url: "{{ lookup('env','OPENSTACK_BIFROST_GIT_URL') }}"
+openstack_bifrost_version: "{{ lookup('env','OPENSTACK_BIFROST_VERSION') }}"
+openstack_bifrost_dev_path: "{{ lookup('env','OPENSTACK_BIFROST_DEV_PATH') }}"
-opnfv_required_pip:
- - python-openstackclient
- - docker-py
+# openstack/openstack-ansible variables
+openstack_osa_git_url: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
+openstack_osa_version: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
+openstack_osa_dev_path: "{{ lookup('env','OPENSTACK_OSA_DEV_PATH') }}"
+openstack_osa_path: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
+openstack_osa_etc_path: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
+openstack_osa_openrc_git_url: "{{ lookup('env', 'OPENSTACK_OSA_OPENRC_GIT_URL') }}"
+openstack_osa_haproxy_git_url: "{{ lookup('env','OPENSTACK_OSA_HAPROXY_GIT_URL') }}"
+
+# kubespray variables
+kubespray_git_url: "{{ lookup('env','KUBESPRAY_GIT_URL') }}"
+kubespray_version: "{{ lookup('env','KUBESPRAY_VERSION') }}"
+kubernetes_version: "{{ lookup('env','KUBERNETES_VERSION') }}"
+xci_kube_ansible_pip_version: "{{ lookup('env','XCI_KUBE_ANSIBLE_PIP_VERSION') }}"
+
+# openstack-helm variables
+osh_git_url: "{{ lookup('env','OSH_GIT_URL') }}"
+osh_version: "{{ lookup('env','OSH_VERSION') }}"
+osh_infra_git_url: "{{ lookup('env','OSH_INFRA_GIT_URL') }}"
+osh_infra_version: "{{ lookup('env','OSH_INFRA_VERSION') }}"
+osh_helm_binary_url: "{{ lookup('env','OSH_HELM_BINARY_URL') }}"
+osh_helm_binary_version: "{{ lookup('env','OSH_HELM_BINARY_VERSION') }}"
+openstack_osh_version: "{{ lookup('env','OPENSTACK_OSH_VERSION') }}"
+
+# variables for other components
+keepalived_git_url: "{{ lookup('env','KEEPALIVED_GIT_URL') }}"
+haproxy_version: "{{ lookup('env','HAPROXY_VERSION') }}"
+keepalived_version: "{{ lookup('env','KEEPALIVED_VERSION') }}"
+
+# xci variables
+xci_cache: "{{ lookup('env', 'XCI_CACHE') }}"
+xci_flavor: "{{ lookup('env','XCI_FLAVOR') }}"
+xci_flavor_ansible_file_path: "{{ lookup('env','XCI_FLAVOR_ANSIBLE_FILE_PATH') }}"
+xci_distro: "{{ lookup('env', 'XCI_DISTRO') }}"
+xci_scenarios_cache: "{{ lookup('env', 'XCI_SCENARIOS_CACHE') }}"
+xci_ssl_subject: "{{ lookup('env', 'XCI_SSL_SUBJECT') }}"
+xci_ceph_enabled: "{{ lookup('env', 'XCI_CEPH_ENABLED') }}"
+log_path: "{{ lookup('env','LOG_PATH') }}"
+opnfv_ssh_host_keys_path: "{{ lookup('env', 'OPNFV_SSH_HOST_KEYS_PATH') }}"
+run_tempest: "{{ lookup('env', 'RUN_TEMPEST') }}"
+core_openstack_install: "{{ lookup('env', 'CORE_OPENSTACK_INSTALL') }}"
+deploy_scenario: "{{ lookup('env','DEPLOY_SCENARIO') }}"
+installer_type: "{{ lookup('env','INSTALLER_TYPE') }}"
+osh_distro: "{{ lookup('env', 'OSH_DISTRO') }}"
+
+# baremetal variables
+baremetal: "{{ lookup('env','BAREMETAL') }}"
diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml b/xci/var/opnfv_vm_idf.yml
index 3725fb58..fa647287 100644
--- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
+++ b/xci/var/opnfv_vm_idf.yml
@@ -1,5 +1,4 @@
---
-# SPDX-license-identifier: Apache-2.0
##############################################################################
# Copyright (c) 2017 Ericsson AB and others.
# All rights reserved. This program and the accompanying materials
@@ -7,12 +6,14 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-- name: copy user_variables_os-nosdn-nofeature.yml
- copy:
- src: "user_variables_os-nosdn-nofeature.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_variables_os-nosdn-nofeature.yml"
-- name: copy os-nosdn-nofeature scenario specific openstack_user_config.yml
- copy:
- src: "{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml"
+opnfv_vm_idf:
+ version: 0.1
+ net_config: &net_config
+ admin:
+ interface: 0
+ network: 192.168.122.0
+ mask: 24
+ mgmt:
+ interface: 1
+ network: 172.29.236.0
+ mask: 22
diff --git a/xci/var/opnfv_vm_pdf.yml b/xci/var/opnfv_vm_pdf.yml
new file mode 100644
index 00000000..51371388
--- /dev/null
+++ b/xci/var/opnfv_vm_pdf.yml
@@ -0,0 +1,53 @@
+---
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+opnfv_vm_pdf:
+ name: opnfv
+ node: &nodeparams
+ type: virtual
+ vendor: libvirt
+ model: pc
+ arch: x86_64
+ cpus: 6
+ cpu_cflags: host-model
+ cores: 6
+ memory: 12G
+ disks: &disks
+ - name: disk1
+ disk_capacity: 80G
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation:
+ remote_params: &remote_params
+ type:
+ - ipmi: [2.0]
+ user: admin
+ pass: password
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:625
+ mac_address: "52:54:00:fe:3b:01"
+ interface_common_nic1: &interface_common_nic1
+ name: nic1
+ speed:
+ features:
+ vlan: native
+ interface_common_nic2: &interface_common_nic2
+ name: nic2
+ speed:
+ features:
+ vlan: native
+ interfaces:
+ - mac_address: "52:54:00:33:82:d0"
+ address: 192.168.122.2
+ gateway: 192.168.122.1
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:33:82:d1"
+ address: 172.29.236.10
+ gateway: 172.29.236.1
+ <<: *interface_common_nic2
diff --git a/xci/var/pdf.yml b/xci/var/pdf.yml
new file mode 100644
index 00000000..bb9a5f55
--- /dev/null
+++ b/xci/var/pdf.yml
@@ -0,0 +1,168 @@
+---
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+details:
+ pod_owner: OPNFV
+ contact: N/A
+ lab: OPNFV LaaS
+ location: N/A
+ type: production
+ link: http://wiki.opnfv.org/
+##############################################################################
+jumphost:
+ name: jumphost
+ node:
+ disks:
+ os:
+ remote_management:
+ interfaces:
+##############################################################################
+nodes:
+ - name: node1
+ node: &nodeparams
+ type: virtual
+ vendor: libvirt
+ model: pc
+ arch: x86_64
+ cpus: 6
+ cpu_cflags: host-model
+ cores: 6
+ memory: 12G
+ disks: &disks
+ - name: disk1
+ disk_capacity: 80G
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation:
+ remote_params: &remote_params
+ type:
+ - ipmi: [2.0]
+ user: admin
+ pass: password
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:625
+ mac_address: "52:54:00:fe:3b:01"
+ interface_common_nic1: &interface_common_nic1
+ name: nic1
+ speed:
+ features:
+ vlan: 10
+ interface_common_nic2: &interface_common_nic2
+ name: nic2
+ speed:
+ features:
+ vlan: 20
+ interface_common_nic3: &interface_common_nic3
+ name: nic3
+ speed:
+ features:
+ vlan: native
+ interface_common_nic4: &interface_common_nic4
+ name: nic4
+ speed:
+ features:
+ vlan: 30
+ interfaces:
+ - mac_address: "52:54:00:fe:3b:01"
+ address: 172.29.236.11
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:fe:3b:01"
+ address: 172.29.244.11
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:fe:3b:01"
+ address: 192.168.122.3
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:fe:3b:01"
+ address: 172.29.240.11
+ <<: *interface_common_nic4
+ ##############################################################################
+ - name: node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:626
+ mac_address: "52:54:00:b9:d4:87"
+ interfaces:
+ - mac_address: "52:54:00:b9:d4:87"
+ address: 172.29.236.12
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:b9:d4:87"
+ address: 172.29.244.12
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:b9:d4:87"
+ address: 192.168.122.4
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:b9:d4:87"
+ address: 172.29.240.12
+ <<: *interface_common_nic4
+ ##############################################################################
+ - name: node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:627
+ mac_address: "52:54:00:6d:0e:d1"
+ interfaces:
+ - mac_address: "52:54:00:6d:0e:d1"
+ address: 172.29.236.13
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:6d:0e:d1"
+ address: 172.29.244.13
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:6d:0e:d1"
+ address: 192.168.122.5
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:6d:0e:d1"
+ address: 172.29.240.13
+ <<: *interface_common_nic4
+ ##############################################################################
+ - name: node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:628
+ mac_address: "52:54:00:95:02:10"
+ interfaces:
+ - mac_address: "52:54:00:95:02:10"
+ address: 172.29.236.14
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:95:02:10"
+ address: 172.29.244.14
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:95:02:10"
+ address: 192.168.122.6
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:95:02:10"
+ address: 172.29.240.14
+ <<: *interface_common_nic4
+ ##############################################################################
+ - name: node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 192.168.122.1:629
+ mac_address: "52:54:00:84:fa:19"
+ interfaces:
+ - mac_address: "52:54:00:84:fa:19"
+ address: 172.29.236.15
+ <<: *interface_common_nic1
+ - mac_address: "52:54:00:84:fa:19"
+ address: 172.29.244.15
+ <<: *interface_common_nic2
+ - mac_address: "52:54:00:84:fa:19"
+ address: 192.168.122.7
+ <<: *interface_common_nic3
+ - mac_address: "52:54:00:84:fa:19"
+ address: 172.29.240.15
+ <<: *interface_common_nic4
diff --git a/xci/xci-deploy.sh b/xci/xci-deploy.sh
index 99053c7e..d9c41968 100755
--- a/xci/xci-deploy.sh
+++ b/xci/xci-deploy.sh
@@ -3,31 +3,6 @@ set -o errexit
set -o nounset
set -o pipefail
-submit_bug_report() {
- cd ${XCI_PATH}
- echo ""
- echo "-------------------------------------------------------------------------"
- echo "Oh nooooo! The XCI deployment failed miserably :-("
- echo ""
- echo "If you need help, please choose one of the following options"
- echo "* #opnfv-pharos @ freenode network"
- echo "* opnfv-tech-discuss mailing list (https://lists.opnfv.org/mailman/listinfo/opnfv-tech-discuss)"
- echo " - Please prefix the subject with [XCI]"
- echo "* https://jira.opnfv.org (Release Engineering project)"
- echo ""
- echo "Do not forget to submit the following information on your bug report:"
- echo ""
- git diff --quiet && echo "releng-xci tree status: clean" || echo "releng-xci tree status: local modifications"
- echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
- echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
- echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
- echo "xci flavor: $XCI_FLAVOR"
- echo "xci installer: $XCI_INSTALLER"
- echo "Environment variables:"
- env | grep --color=never '\(OPNFV\|XCI\|OPENSTACK\)'
- echo "-------------------------------------------------------------------------"
-}
-
#-------------------------------------------------------------------------------
# This script should not be run as root
#-------------------------------------------------------------------------------
@@ -53,16 +28,13 @@ fi
#-------------------------------------------------------------------------------
# find where are we
export XCI_PATH="$(git rev-parse --show-toplevel)"
-# source user vars
-source $XCI_PATH/xci/config/user-vars
-# source pinned versions
-source $XCI_PATH/xci/config/pinned-versions
-# source flavor configuration
-source "$XCI_PATH/xci/config/${XCI_FLAVOR}-vars"
-# source installer configuration
-source "$XCI_PATH/xci/installer/${XCI_INSTALLER}/env" &>/dev/null || true
-# source xci configuration
-source $XCI_PATH/xci/config/env-vars
+# source helpers library
+source ${XCI_PATH}/xci/files/xci-lib.sh
+
+# Make sure we pass XCI_PATH everywhere
+export XCI_ANSIBLE_PARAMS+=" -e xci_path=${XCI_PATH}"
+# Make sure everybody knows where our global roles are
+export ANSIBLE_ROLES_PATH="$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles"
if [[ -z $(echo $PATH | grep "$HOME/.local/bin") ]]; then
export PATH="$HOME/.local/bin:$PATH"
@@ -77,21 +49,30 @@ for local_user_var in ${user_local_dev_vars[@]}; do
done
unset user_local_dev_vars local_user_var
+#
+# Parse command line options
+#
+parse_cmdline_opts $*
+
+#
+# Bootstrap environment for XCI Deployment
+#
+echo "Info: Preparing host environment for the XCI deployment"
+echo "-------------------------------------------------------------------------"
+bootstrap_xci_env
+
# register our handler
-trap submit_bug_report ERR
+trap exit_trap ERR
+
+# We are using sudo so we need to make sure that env_reset is not present
+sudo sed -i "s/^Defaults.*env_reset/#&/" /etc/sudoers
#-------------------------------------------------------------------------------
-# Log info to console
+# Clean up environment
#-------------------------------------------------------------------------------
-echo "Info: Starting XCI Deployment"
-echo "Info: Deployment parameters"
+echo "Info: Cleaning up previous XCI artifacts"
echo "-------------------------------------------------------------------------"
-echo "xci flavor: $XCI_FLAVOR"
-echo "xci installer: $XCI_INSTALLER"
-echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
-echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
-echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
-echo "OPNFV scenario: $DEPLOY_SCENARIO"
+sudo -E bash files/xci-destroy-env.sh
echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
@@ -99,37 +80,9 @@ echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
echo "Info: Installing Ansible from pip"
echo "-------------------------------------------------------------------------"
-bash files/install-ansible.sh
+install_ansible
echo "-------------------------------------------------------------------------"
-case ${XCI_DISTRO,,} in
- # These should ideally match the CI jobs
- ubuntu)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-xenial}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-ubuntu-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl,iptables}"
- ;;
- centos)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-7}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-centos-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
- opensuse)
- export DIB_OS_RELEASE="${DIB_OS_RELEASE:-42.3}"
- export DIB_OS_ELEMENT="${DIB_OS_ELEMENT:-opensuse-minimal}"
- export DIB_OS_PACKAGES="${DIB_OS_PACKAGES:-vim,less,bridge-utils,iputils,rsyslog,curl,iptables}"
- ;;
-esac
-
-# There is no CentOS support at all
-if [[ ${XCI_DISTRO,,} == centos ]]; then
- echo ""
- echo "Error: Sorry, only Ubuntu and SUSE hosts are supported for now!"
- echo "Error: CentOS 7 support is still work in progress."
- echo ""
- exit 1
-fi
-
# Clone OPNFV scenario repositories
#-------------------------------------------------------------------------------
# This playbook
@@ -139,43 +92,46 @@ fi
echo "Info: Cloning OPNFV scenario repositories"
echo "-------------------------------------------------------------------------"
cd $XCI_PATH/xci/playbooks
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -i inventory get-opnfv-scenario-requirements.yml
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -i "localhost," get-opnfv-scenario-requirements.yml
echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------
-# Get scenario variables overrides
+# Check playbooks using ansible-lint
#-------------------------------------------------------------------------------
-if [[ -f $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO:-_no_scenario_}/xci_overrides ]]; then
- source $XCI_SCENARIOS_CACHE/$DEPLOY_SCENARIO/xci_overrides
-fi
+echo "Info: Verifying XCI playbooks using ansible-lint"
+echo "-------------------------------------------------------------------------"
+ansible_lint
+echo "-------------------------------------------------------------------------"
+# Get scenario variables overrides
#-------------------------------------------------------------------------------
-# Start provisioning VM nodes
+source $(find $XCI_SCENARIOS_CACHE/${DEPLOY_SCENARIO} -name xci_overrides) &>/dev/null &&
+ echo "Sourced ${DEPLOY_SCENARIO} overrides files successfully!" || :
+
#-------------------------------------------------------------------------------
-# This playbook
-# - removes directories that were created by the previous xci run
-# - clones opnfv/releng-xci and openstack/bifrost repositories
-# - combines opnfv/releng-xci and openstack/bifrost scripts/playbooks
-# - destroys VMs, removes ironic db, leases, logs
-# - creates and provisions VMs for the chosen flavor
+# Log info to console
#-------------------------------------------------------------------------------
-echo "Info: Starting provisining VM nodes using openstack/bifrost"
-echo "-------------------------------------------------------------------------"
-# We are using sudo so we need to make sure that env_reset is not present
-sudo sed -i "s/^Defaults.*env_reset/#&/" /etc/sudoers
-cd $XCI_PATH/bifrost/
-sudo -E bash ./scripts/destroy-env.sh
-cd $XCI_PLAYBOOKS
-ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -i inventory provision-vm-nodes.yml
-cd ${XCI_CACHE}/repos/bifrost
-bash ./scripts/bifrost-provision.sh
-echo "-----------------------------------------------------------------------"
-echo "Info: VM nodes are provisioned!"
-echo "-----------------------------------------------------------------------"
+log_xci_information
+
+# Deploy infrastructure based on the selected deloyment method
+echo "Info: Deploying hardware using '${INFRA_DEPLOYMENT}'"
+echo "---------------------------------------------------"
+source ${XCI_PATH}/xci/infra/${INFRA_DEPLOYMENT}/infra-provision.sh
# Deploy OpenStack on the selected installer
-echo "Info: Deploying '${XCI_INSTALLER}' installer"
+echo "Info: Deploying '${INSTALLER_TYPE}' installer"
echo "-----------------------------------------------------------------------"
-source ${XCI_PATH}/xci/installer/${XCI_INSTALLER}/deploy.sh
+source ${XCI_PATH}/xci/installer/${INSTALLER_TYPE}/deploy.sh
+
+# Reset trap
+trap ERR
+
+# Deployment time
+xci_deploy_time=$SECONDS
+echo "-------------------------------------------------------------------------------------------------------------"
+echo "Info: xci_deploy.sh deployment took $(($xci_deploy_time / 60)) minutes and $(($xci_deploy_time % 60)) seconds"
+echo "-------------------------------------------------------------------------------------------------------------"
+
+collect_xci_logs
# vim: set ts=4 sw=4 expandtab: