diff options
72 files changed, 2228 insertions, 265 deletions
@@ -30,28 +30,18 @@ meetings: time: '14:00 UTC' committers: - <<: *opnfv_releng_ptl - - name: 'Yolanda Robla Mota' - company: 'Red Hat' - email: 'yroblamo@redhat.com' - id: 'yrobla' - timezone: 'Europe/Barcelona' - name: 'Markos Chandras' company: 'SUSE' email: 'mchandras@suse.de' id: 'mchandras' timezone: 'Europe/London' - - name: 'Tianwei Wu' - company: 'Huawei' - email: 'wutianwei1@huawei.com' - id: 'hw_wutianwei' - timezone: 'Asia/Shanghai' - name: 'Manuel Buil' company: 'SUSE' email: 'mbuil@suse.com' - id: 'mbuild' + id: 'mbuil' timezone: 'Europe/Madrid' - - name: 'Periyasamy Palanisamy' - company: 'Ericsson' - email: 'periyasamy.palanisamy@ericsson.com' - id: 'epalper' - timezone: 'Europe/Aachen' + - name: 'Panagiotis Karalis' + company: 'Intracom Telecom' + email: 'panos.pkaralis@gmail.com' + id: 'pkaralis' + timezone: 'Europe/Athens' diff --git a/xci/README.rst b/xci/README.rst index d7555d46..a18d92ee 100644 --- a/xci/README.rst +++ b/xci/README.rst @@ -160,6 +160,37 @@ execute sandbox script ./xci-deploy.sh +Baremetal Usage +-------------- + +The previous deployments are based on VMs, i.e. controllers and computes are +VMs. It is also possible to deploy on baremetal and for that a pdf and idf file +which describes the hardware needs to be provided to the sandbox script: + +clone OPNFV releng-xci repository + + git clone https://gerrit.opnfv.org/gerrit/releng-xci.git + +change into directory where the sandbox script is located + + cd releng-xci/xci + +set the sandbox flavor + + export XCI_FLAVOR=noha + +set the version to use for openstack-ansible + + export OPENSTACK_OSA_VERSION=master + +set where the logs should be stored + + export LOG_PATH=/home/jenkins/xcilogs + +execute sandbox script + + ./xci-deploy.sh -i var/ericsson-pod2-idf.yml -p var/ericsson-pod2-pdf.yml + ============== User Variables ============== diff --git a/xci/config/env-vars b/xci/config/env-vars index 0458f311..a90e8533 100755 --- a/xci/config/env-vars +++ b/xci/config/env-vars @@ -8,10 +8,16 @@ export OPNFV_RELENG_GIT_URL=${OPNFV_RELENG_GIT_URL:-https://gerrit.opnfv.org/ger export OPENSTACK_BIFROST_GIT_URL=${OPENSTACK_BIFROST_GIT_URL:-https://git.openstack.org/openstack/bifrost} export OPENSTACK_OSA_GIT_URL=${OPENSTACK_OSA_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible} export OPENSTACK_OSA_OPENRC_GIT_URL=${OPENSTACK_OSA_OPENRC_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-openstack_openrc} -export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-incubator/kubespray.git} +export KUBESPRAY_GIT_URL=${KUBESPRAY_GIT_URL:-https://github.com/kubernetes-sigs/kubespray.git} +export OSH_GIT_URL=${OSH_GIT_URL:-https://github.com/openstack/openstack-helm.git} +export OSH_INFRA_GIT_URL=${OSH_INFRA_GIT_URL:-https://github.com/openstack/openstack-helm-infra.git} export OPENSTACK_OSA_HAPROXY_GIT_URL=${OPENSTACK_OSA_HAPROXY_GIT_URL:-https://git.openstack.org/openstack/openstack-ansible-haproxy_server} export KEEPALIVED_GIT_URL=${KEEPALIVED_GIT_URL:-https://github.com/evrardjp/ansible-keepalived} +export OSH_HELM_BINARY_URL=${OSH_HELM_BINARY_URL:-https://storage.googleapis.com/kubernetes-helm} +export OSH_HELM_BINARY_VERSION=${OSH_HELM_BINARY_VERSION:-v2.13.1} + + # Configuration export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy export OPNFV_HOST_IP=192.168.122.2 @@ -28,7 +34,8 @@ export XCI_PLAYBOOKS=${XCI_PATH}/xci/playbooks # Functest parameters export FUNCTEST_MODE=${FUNCTEST_MODE:-"tier"} export FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME:-"healthcheck"} -export FUNCTEST_VERSION=${FUNCTEST_VERSION:-"latest"} +# TODO: Investigate and fix why the env var FUNCTEST_VERSION set by Jenkins job doesn't take effect +export FUNCTEST_VERSION=${FUNCTEST_VERSION:-"hunter"} # CI paremeters export CI_LOOP=${CI_LOOP:-"daily"} @@ -50,7 +57,7 @@ export LOG_PATH=${LOG_PATH:-${XCI_PATH}/xci/logs} # https://github.com/ansible/ansible/commit/67859c3476501d5d9839fd904aec55468d09593a # This was fixed in 2.5.6 so remove the pin when OSA updates to newer version. #export XCI_ANSIBLE_PIP_VERSION=${XCI_ANSIBLE_PIP_VERSION:-$(curl -s https://raw.githubusercontent.com/openstack/openstack-ansible/${OPENSTACK_OSA_VERSION}/scripts/bootstrap-ansible.sh | grep ansible== | sed -n "s/.*ansible==\([0-9.]*\).*/\1/p")} -export XCI_ANSIBLE_PIP_VERSION="2.5.8" +export XCI_ANSIBLE_PIP_VERSION="2.7.8" export ANSIBLE_HOST_KEY_CHECKING=False export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature"} diff --git a/xci/config/ha-vars b/xci/config/ha-vars index 3440a855..4c40fb33 100755 --- a/xci/config/ha-vars +++ b/xci/config/ha-vars @@ -12,6 +12,7 @@ export NUM_NODES=6 [[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 controller01 controller02 compute00 compute01" [[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2" +[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 master2 master3 node1 node2" export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm} export VM_CPU=${VM_CPU:-6} export VM_DISK=${VM_DISK:-80} diff --git a/xci/config/mini-vars b/xci/config/mini-vars index 9e7e6180..aaa4cb88 100755 --- a/xci/config/mini-vars +++ b/xci/config/mini-vars @@ -12,6 +12,7 @@ export NUM_NODES=3 [[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00" [[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1" +[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1" export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm} export VM_CPU=${VM_CPU:-6} export VM_DISK=${VM_DISK:-80} diff --git a/xci/config/noha-vars b/xci/config/noha-vars index 2f3db993..e887ddb8 100755 --- a/xci/config/noha-vars +++ b/xci/config/noha-vars @@ -12,6 +12,7 @@ export NUM_NODES=4 [[ "$INSTALLER_TYPE" == "osa" ]] && export NODE_NAMES="opnfv controller00 compute00 compute01" [[ "$INSTALLER_TYPE" == "kubespray" ]] && export NODE_NAMES="opnfv master1 node1 node2" +[[ "$INSTALLER_TYPE" == "osh" ]] && export NODE_NAMES="opnfv master1 node1 node2" export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm} export VM_CPU=${VM_CPU:-6} export VM_DISK=${VM_DISK:-80} diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions index 9b42fa8e..440972ae 100755 --- a/xci/config/pinned-versions +++ b/xci/config/pinned-versions @@ -25,24 +25,31 @@ #------------------------------------------------------------------------------- # use releng-xci from master until the development work with the sandbox is complete export OPNFV_RELENG_VERSION="master" -# HEAD of bifrost "master" as of 11.10.2018 -export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"483e3a7a6ff2186b68b00c29b35213bee8074ebe"} -# HEAD of ironic "stable/rocky" as of 11.10.2018 -export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"4763762ae98cd871c4a90c8eacbdea7ead4f6f8b"} -# HEAD of ironic-client "stable/rocky" as of 11.10.2018 -export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"b29646d0e8db4c534ad7ca495a3fb73882f1ef43"} -# HEAD of ironic-inspector "stable/rocky" as of 11.10.2018 -export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"e89450c44e41ec2ddada7909e63f1edc1aa1afdd"} -# HEAD of ironic-inspector-client "stable/rocky" as of 11.10.2018 -export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"cb370b776aa274b0ecfacfb29fa5035670839308"} -# HEAD of osa "stable/rocky" as of 07.09.2018 -export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"ff6161e7f08b8ffe9ff6e0552517ea3f10292cf8"} +# HEAD of bifrost "master" as of 02.07.2019 +export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"cd559480c95867d272b8a32240e50c390646665b"} +# HEAD of ironic "master" as of 02.07.2019 +export BIFROST_IRONIC_VERSION=${BIFROST_IRONIC_VERSION:-"1beb8068f95f90a570c72b82f6e518110312b696"} +# HEAD of ironic-client "master" as of 02.07.2019 +export BIFROST_IRONIC_CLIENT_VERSION=${BIFROST_IRONIC_CLIENT_VERSION:-"eae60397bfcbed322b2121f77c35ac74d0c6b74c"} +# HEAD of ironic-inspector "master" as of 02.07.2019 +export BIFROST_IRONIC_INSPECTOR_VERSION=${BIFROST_IRONIC_INSPECTOR_VERSION:-"0b38536d1c9ab92952e6ecd069ea13facf012830"} +# HEAD of ironic-inspector-client "master" as of 02.07.2019 +export BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-"81ae133bd570ea7359b4797ee5699d2d4233b445"} +# HEAD of osa "stable/rocky" as of 04.01.2019 +export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"2087cd98f28b35f655ca398d25d2a6c71e38328e"} +export OPENSTACK_OSH_VERSION="rocky" +# HEAD of osh "master" as of 17.07.2019 +export OSH_VERSION=${OSH_VERSION:-"dadf9946e076df2b09556f4a18107dc487788cdd"} +# HEAD of osh-infra "master" as of 16.07.2019 +export OSH_INFRA_VERSION=${OSH_INFRA_VERSION:-"e96bdd9fb6235573acf5d4d1d019dca1e1446b7d"} export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p') export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p') -# Kubespray release v2.6.0 dated 10.08.2018 -# didn't bump to the tip of master due to open issues with the k8s v1.11.2 -export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"8b3ce6e418ccf48171eb5b3888ee1af84f8d71ba"} +# Kubespray release v2.11.0 dated 31.08.2019 +export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"v2.11.0"} +# Kubernetes version supported by the pinned kubespray version +# this is needed for pulling in kubectl +export KUBERNETES_VERSION=${KUBERNETES_VERSION:-"v1.15.3"} diff --git a/xci/config/user-vars b/xci/config/user-vars index 2625de24..d3d7b2f1 100755 --- a/xci/config/user-vars +++ b/xci/config/user-vars @@ -34,6 +34,14 @@ export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false} # export INSTALLER_TYPE="kubespray" export INSTALLER_TYPE=${INSTALLER_TYPE:-osa} +#Wait upstream in openstack-helm (OSH) to support opensuse +if [ "$XCI_DISTRO" == "opensuse" ] && [ "$INSTALLER_TYPE" == "osh" ]; then + export XCI_DISTRO=ubuntu-bionic + export OSH_DISTRO=opensuse +elif [ "$XCI_DISTRO" == "ubuntu" ] && [ "$INSTALLER_TYPE" == "osh" ]; then + export OSH_DISTRO=ubuntu +fi + #------------------------------------------------------------------------------- # Set DEPLOYMENT #------------------------------------------------------------------------------- @@ -53,6 +61,6 @@ export INFRA_DEPLOYMENT=${INFRA_DEPLOYMENT:-bifrost} export XCI_ANSIBLE_PARAMS=${XCI_ANSIBLE_PARAMS:-""} export RUN_TEMPEST=${RUN_TEMPEST:-false} export CORE_OPENSTACK_INSTALL=${CORE_OPENSTACK_INSTALL:-false} -export BIFROST_USE_PREBUILT_IMAGES=${BIFROST_USE_PREBUILT_IMAGES:-false} +export BIFROST_CREATE_IMAGE_VIA_DIB=${BIFROST_CREATE_IMAGE_VIA_DIB:-true} # Set this to to true to force XCI to re-create the target OS images export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false} diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh index 96148468..058d6569 100755 --- a/xci/files/xci-destroy-env.sh +++ b/xci/files/xci-destroy-env.sh @@ -21,6 +21,8 @@ rm -rf /opt/stack # HOME is normally set by sudo -H rm -rf ${HOME}/.config/openstack rm -rf ${HOME}/.ansible +# keepalived role fails ansible lint when cached +rm -rf ${HOME}/releng-xci/xci/playbooks/roles/keepalived # Wipe repos rm -rf ${XCI_CACHE}/repos diff --git a/xci/files/xci-lib.sh b/xci/files/xci-lib.sh index cb0751da..860153b9 100644 --- a/xci/files/xci-lib.sh +++ b/xci/files/xci-lib.sh @@ -52,7 +52,7 @@ function bootstrap_xci_env() { source "$XCI_PATH/xci/installer/${INSTALLER_TYPE}/env" &>/dev/null || true # source xci configuration source $XCI_PATH/xci/config/env-vars - # source the baremetal variable + # baremetal variable to true if the vendor in the pdf is not libvirt grep -o vendor.* ${PDF} | grep -q libvirt && export BAREMETAL=false || export BAREMETAL=true } @@ -135,7 +135,7 @@ function install_ansible() { [curl]=curl ) EXTRA_PKG_DEPS=( apt-utils ) - sudo apt-get update + sudo apt-get update -qq > /dev/null ;; rhel|fedora|centos) @@ -158,7 +158,7 @@ function install_ansible() { [wget]=wget [curl]=curl ) - sudo $PKG_MANAGER updateinfo + sudo $PKG_MANAGER updateinfo > /dev/null EXTRA_PKG_DEPS=( deltarpm ) ;; @@ -172,14 +172,7 @@ function install_ansible() { install_map+=(${EXTRA_PKG_DEPS[@]} ) - ${INSTALLER_CMD} ${install_map[@]} - - # Note(cinerama): If pip is linked to pip3, the rest of the install - # won't work. Remove the alternatives. This is due to ansible's - # python 2.x requirement. - if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then - sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip) - fi + ${INSTALLER_CMD} ${install_map[@]} > /dev/null # We need to prepare our virtualenv now virtualenv --quiet --no-site-packages ${XCI_VENV} @@ -189,7 +182,7 @@ function install_ansible() { # We are inside the virtualenv now so we should be good to use pip and python from it. pip -q install --upgrade pip==9.0.3 # We need a version which supports the '-c' parameter - pip -q install --upgrade -c $uc -c $osa_uc ara virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21 + pip -q install --upgrade -c $uc -c $osa_uc ara==0.16.4 virtualenv pip setuptools shade ansible==$XCI_ANSIBLE_PIP_VERSION ansible-lint==3.4.21 ara_location=$(python -c "import os,ara; print(os.path.dirname(ara.__file__))") export ANSIBLE_CALLBACK_PLUGINS="/etc/ansible/roles/plugins/callback:${ara_location}/plugins/callbacks" @@ -197,9 +190,9 @@ function install_ansible() { ansible_lint() { set -eu - local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks) + local playbooks_dir=(xci/playbooks xci/installer/osa/playbooks xci/installer/kubespray/playbooks xci/installer/osh/playbooks) # Extract role from scenario information - local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev) + local testing_role=$(sed -n "/^- scenario: ${DEPLOY_SCENARIO}$/,/^$/p" ${XCI_PATH}/xci/opnfv-scenario-requirements.yml | grep role | rev | cut -d '/' -f -1 | rev) # clear XCI_CACHE rm -rf ${XCI_CACHE}/repos/openstack-ansible-tests @@ -270,7 +263,7 @@ submit_bug_report() { echo "xci installer: $INSTALLER_TYPE" echo "xci scenario: $DEPLOY_SCENARIO" echo "Environment variables:" - env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\)' + env | grep --color=never '\(OPNFV\|XCI\|INSTALLER_TYPE\|OPENSTACK\|SCENARIO\|ANSIBLE\|BIFROST\|DIB\)' echo "-------------------------------------------------------------------------" } @@ -293,6 +286,7 @@ log_xci_information() { [[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION" [[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION" [[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION" + [[ "$INSTALLER_TYPE" == "osh" ]] && echo "kubespray version: $KUBESPRAY_VERSION" echo "-------------------------------------------------------------------------" } diff --git a/xci/infra/bifrost/infra-provision.sh b/xci/infra/bifrost/infra-provision.sh index e60e9de0..b0617733 100644 --- a/xci/infra/bifrost/infra-provision.sh +++ b/xci/infra/bifrost/infra-provision.sh @@ -64,14 +64,11 @@ ansible-playbook ${XCI_ANSIBLE_PARAMS} \ -e ipv4_gateway=192.168.122.1 \ -e wait_timeout=3600 \ -e enable_keystone=false \ - -e ironicinspector_source_install=true \ -e ironicinspector_git_branch=${BIFROST_IRONIC_INSPECTOR_VERSION:-master} \ - -e ironicinspectorclient_source_install=true \ -e ironicinspectorclient_git_branch=${BIFROST_IRONIC_INSPECTOR_CLIENT_VERSION:-master} \ - -e ironicclient_source_install=true \ -e ironicclient_git_branch=${BIFROST_IRONIC_CLIENT_VERSION:-master} \ -e ironic_git_branch=${BIFROST_IRONIC_VERSION:-master} \ - -e use_prebuilt_images=${BIFROST_USE_PREBUILT_IMAGES:-false} \ + -e create_image_via_dib=${BIFROST_CREATE_IMAGE_VIA_DIB:-true} \ -e xci_distro=${XCI_DISTRO} \ -e ironic_url="http://192.168.122.2:6385/" \ ${BIFROST_ROOT_DIR}/playbooks/opnfv-virtual.yml diff --git a/xci/infra/bifrost/playbooks/opnfv-virtual.yml b/xci/infra/bifrost/playbooks/opnfv-virtual.yml index 313919ba..f97eae4b 100644 --- a/xci/infra/bifrost/playbooks/opnfv-virtual.yml +++ b/xci/infra/bifrost/playbooks/opnfv-virtual.yml @@ -52,7 +52,7 @@ mode: '0755' owner: 'root' group: 'root' - when: use_prebuilt_images | bool == true + when: create_image_via_dib | bool == false - name: Ensure /etc/hosts has good defaults lineinfile: create: yes @@ -101,7 +101,6 @@ when: - create_image_via_dib | bool == true - transform_boot_image | bool == false - - use_prebuilt_images | bool == false - role: bifrost-keystone-client-config clouds: bifrost: @@ -142,7 +141,7 @@ name: bifrost-configdrives-dynamic private: True vars: - ipv4_namesever: "{{ ipv4_nameserver | ((lookup('env','http_proxy') != '') | ternary('192.168.122.1', '8.8.8.8')) }}" + ipv4_nameserver: "{{ host_info[inventory_hostname]['public']['dns'] | list }}" delegate_to: opnfv - import_role: name: bifrost-deploy-nodes-dynamic diff --git a/xci/infra/bifrost/playbooks/wait-for-baremetal.yml b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml index cb42cc49..96aab29c 100644 --- a/xci/infra/bifrost/playbooks/wait-for-baremetal.yml +++ b/xci/infra/bifrost/playbooks/wait-for-baremetal.yml @@ -9,9 +9,9 @@ gather_facts: False tasks: - name: "Wait for nodes to reboot." - wait_for: state=stopped port=22 host={{ ipv4_address }} timeout=240 + wait_for: state=stopped port=22 host={{ ipv4_address }} timeout=5000 delegate_to: opnfv - name: "Wait for nodes to become available." - wait_for: state=started port=22 host={{ ipv4_address }} timeout=3600 + wait_for: state=started port=22 host={{ ipv4_address }} timeout=5000 delegate_to: opnfv diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh index 48ef7456..af80b38f 100755 --- a/xci/installer/kubespray/deploy.sh +++ b/xci/installer/kubespray/deploy.sh @@ -33,6 +33,20 @@ echo "-----------------------------------------------------------------------" echo "Info: Configured localhost for kubespray" #------------------------------------------------------------------------------- +# Configure installer +#------------------------------------------------------------------------------- +# TODO: summarize what this playbook does +#------------------------------------------------------------------------------- + +echo "Info: Configuring kubespray installer" +echo "-----------------------------------------------------------------------" +cd $K8_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured kubespray installer" + +#------------------------------------------------------------------------------- # Configure deployment host, opnfv #------------------------------------------------------------------------------- # This playbook @@ -75,7 +89,7 @@ echo "-----------------------------------------------------------------------" ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \ export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \ cd releng-xci/.cache/repos/kubespray/; ansible-playbook \ - -i opnfv_inventory/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log" + -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log" scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \ $LOG_PATH/setup-kubernetes.log @@ -88,19 +102,15 @@ echo "-----------------------------------------------------------------------" echo "Info: Kubernetes installation is successfully completed!" echo "-----------------------------------------------------------------------" -# Configure the kubernetes authentication in opnfv host. In future releases -# kubectl is no longer an artifact so we should not fail if it's not available. -# This needs to be removed in the future -ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\ - cp -f ~/admin.conf ~/.kube/config; \ - cp -f ~/kubectl /usr/local/bin || true" - #------------------------------------------------------------------------------- # Execute post-installation tasks #------------------------------------------------------------------------------- # Playbook post.yml is used in order to execute any post-deployment tasks that # are required for the scenario under test. #------------------------------------------------------------------------------- +# copy admin.conf +ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/; \ + cp -f ~/admin.conf ~/.kube/config" echo "-----------------------------------------------------------------------" echo "Info: Running post-deployment scenario role" echo "-----------------------------------------------------------------------" @@ -119,16 +129,26 @@ echo "-----------------------------------------------------------------------" echo "Info: Kubernetes login details" echo "-----------------------------------------------------------------------" echo -# Get the dashborad URL -DASHBOARD_SERVICE=$(ssh root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard") +# Get the dashboard URL +if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP; +then +ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP; +echo "known_hosts entry from opnfv host from previous deployment found and deleted" +fi +DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard") DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}') KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config") echo "Info: Kubernetes Dashboard URL:" echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p" -# Get the dashborad user and password +# Get the dashboard user and password MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}') -USER_CSV=$(ssh root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv") +if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP; +then +ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP; +echo "Info: known_hosts entry for master host from previous deployment found and deleted" +fi +USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv") USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}') PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}') echo "Info: Dashboard username: ${USERNAME}" diff --git a/xci/installer/kubespray/playbooks/configure-installer.yml b/xci/installer/kubespray/playbooks/configure-installer.yml new file mode 100644 index 00000000..d88ee55c --- /dev/null +++ b/xci/installer/kubespray/playbooks/configure-installer.yml @@ -0,0 +1,50 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: localhost + connection: local + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + tasks: + - name: delete existing kubespray/inventory/opnfv directory + file: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv" + state: absent + + - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv + copy: + src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/" + dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv" + + - name: update kubespray k8s-cluster.yml for xci + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" } + - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" } + - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" } + - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" } + +# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile +# module is that escaping curly braces does not work with with_items. what happens is that +# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point +# to home folder of the user executing this task at runtime. + - name: update kubespray artifacts_dir + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml" + regexp: "artifacts_dir:.*" + line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'" + + - name: change dashboard server type to NodePort + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2" + insertafter: 'targetPort' + line: " type: NodePort" diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml index 82ece961..52e42b06 100644 --- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml +++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml @@ -52,22 +52,20 @@ - "--recursive" - "--files-from={{ xci_cache }}/releng-xci.files" - - name: delete the opnfv_inventory directory - file: - path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory" - state: absent - - - name: make sure kubespray/opnfv_inventory/group_vars/ exist - file: - path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars" - state: directory - - - name: copy kubespray inventory directory + - name: link xci dynamic inventory to kubespray/inventory/opnfv directory file: src: "{{ remote_xci_playbooks }}/dynamic_inventory.py" - path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/dynamic_inventory.py" + path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py" state: link + - name: Download kubectl and place it to /usr/local/bin + get_url: + url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl" + dest: /usr/local/bin/kubectl + owner: root + group: root + mode: 0755 + - name: Reload XCI deployment host facts setup: filter: ansible_local @@ -82,15 +80,9 @@ package: name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}" state: present - update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" when: xci_flavor == 'aio' - - name: change dashboard server type to NodePort - lineinfile: - path: "{{ remote_xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2" - insertafter: 'targetPort' - line: " type: NodePort" - - name: pip install required packages pip: name: "{{ item.name }}" diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml index 859460c6..2fde9877 100644 --- a/xci/installer/kubespray/playbooks/configure-targethosts.yml +++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml @@ -22,7 +22,7 @@ package: name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}" state: present - update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" - hosts: kube-master remote_user: root diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml index fdc466cc..e787aff5 100644 --- a/xci/installer/osa/files/ansible-role-requirements.yml +++ b/xci/installer/osa/files/ansible-role-requirements.yml @@ -7,12 +7,12 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# these versions are based on the osa commit ff6161e7f08b8ffe9ff6e0552517ea3f10292cf8 on 2018-09-05 -# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=ff6161e7f08b8ffe9ff6e0552517ea3f10292cf8 +# these versions are based on the osa commit e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 on 2018-12-11 +# https://git.openstack.org/cgit/openstack/openstack-ansible/commit/?h=refs/heads/stable/rocky&id=e41b0c40501ea8906fcbdcc7d37ff6ef0cd5cf02 - name: ansible-hardening scm: git src: https://git.openstack.org/openstack/ansible-hardening - version: d6d2b22e48f9295522b06bbd6a30b4a7d9168020 + version: 14e6bb6a411b6b03bf258144be66845a5831705c - name: apt_package_pinning scm: git src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning @@ -20,23 +20,23 @@ - name: pip_install scm: git src: https://git.openstack.org/openstack/openstack-ansible-pip_install - version: ac497c25107f7e16f8e6a64f16e114706e10eff0 + version: 671e7129ad3dcf20bdda942842f9f76203bf5a5e - name: galera_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-galera_client - version: 680779b74368a0336987d4f956959118db051716 + version: 6dbac51e5b74ffdee429375f6c22739e7a5ef017 - name: galera_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-galera_server - version: 256b7350fff2a0aec2a8921a867c63ecb067d230 + version: 7a7036f6d15ce3117a925217b66cba806034bb96 - name: ceph_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-ceph_client - version: 54301d205b20ee2fb9615addfd6185b62388ab99 + version: 278aaca502533b33b9714393e47b536654055c58 - name: haproxy_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server - version: 3dfda4ca5df51f80147730b1922f81942348f5f0 + version: 6bc259471283162b3cb8ec0c4bc736f81254d050 - name: keepalived scm: git src: https://github.com/evrardjp/ansible-keepalived @@ -44,139 +44,139 @@ - name: lxc_container_create scm: git src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create - version: 747c2ecab825347700fb54b07d5324952066188c + version: 14a74f2fb60fa7865cf34f75e3196e802847b9d1 - name: lxc_hosts scm: git src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts - version: 1c9384fbe90a0d570cd9fc94606804ccc1e9a3d4 + version: 83e20af591b00fc796eba0e0e1c7650faaa20cd7 - name: memcached_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-memcached_server - version: df909d23eac28746933526a806b4b9b6f198ca02 + version: e058c81a44859c7bcd3eeaac49a8f25b423e38a4 - name: openstack_hosts scm: git src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts - version: cc3b2539c1bf448bb9764c0d6e1775373684e458 + version: 0028cedcccc4913bd1c604404c84be16164d1fe5 - name: os_keystone scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_keystone - version: 50108f68ecc1c7fcde894fdab1b0f682e6179b5a + version: 5a54cc6ba50875c4068e4cdfe3cb23ae1603e257 - name: openstack_openrc scm: git src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc - version: 6c589d69ea4893c35f2d561aa6a18a30486950e2 + version: 805ef5349db7d8af0132b546ff56a36ec80ea7db - name: os_aodh scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_aodh - version: af239d4a609efd9d2765e6236b9f1c7c1e11acbd + version: 9b8d7483d69e60f4ae71ceb6a3336ff81f355c38 - name: os_barbican scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_barbican - version: 0dbe12c8a4d5296c24edc1c85173fd1338db0cca + version: f9ce44edb809c92735fa093334fa1d79cc538126 - name: os_ceilometer scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer - version: 325c9741c42356a7275015b77fad1fa7136a55e9 + version: 221dcccfef3efa1a187678f71c59d81d7e930a92 - name: os_cinder scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_cinder - version: fb50ae8d6b2c0f79f3b39515c3cb55cc8061de23 + version: a824d8d4dc6de6563f186449838e94c69a869e02 - name: os_congress scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_congress - version: a182cf9cbfc8927d19499b49200671bf85396339 + version: 0e6ccb63dba466bb1b7a11e94db7a420c716c06d - name: os_designate scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_designate - version: dbfcbaae7fbac66142398eb05b37210e48d9eed2 + version: 74c33e9788607f772d8402c4f5cfc79eb379278b - name: os_glance scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_glance - version: f65fcba32a1fda29708f2c21b6df74bb74ef8726 + version: 7ec6a11b98715530e3cd5adbf682c2834e3122a8 - name: os_gnocchi scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi - version: 95e09c8617b12ff6378f51bb95efd43d3e7a174a + version: db881f143223723b38f5d197e8e4b6dd4e057c6f - name: os_heat scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_heat - version: a549d4fc90087c81db02167eec28c6dd02bdd4b0 + version: 14b8927123aa9b0cf47f365c1ab9f82147ce4bdc - name: os_horizon scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_horizon - version: b9ca21a603345eb4bfe26ad0266d6bcd4a601c00 + version: b088034eeaa73ac781fe271588ba03871c88118e - name: os_ironic scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_ironic - version: 635b5cd8316e18b8481c06de513f5db7a02917b9 + version: 6ecf38f1296080a33366528ad40d513539138925 - name: os_magnum scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_magnum - version: 9e08a9f84983344f9183e6602f19d133d63b98de + version: 316f22626d242e33ce56fad367ef3570e0d8ab8b - name: os_neutron scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_neutron - version: 3db0b9ee91f6e8b99eb4ada99fa5ed75fd826e62 + version: 3032836715b4055041554583fa2ed685ab076c25 - name: os_nova scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_nova - version: 2380b70a2eb88e06af3afbde137bcf6d25dee90a + version: 9db5bf5ab6f82c1947d05a1ec7cd6e3ef304760f - name: os_octavia scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_octavia - version: 756f755b7141dbba1207635da95fbc66f0513fa4 + version: 508ea6d834153d0eb6da5bd32d10472f483c6dfa - name: os_rally scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_rally - version: b362b5fd64ce44cde00e1527a29d505fcc2d5f54 + version: 8e98112b858ecffbb92c6ae342237af87416b7fa - name: os_sahara scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_sahara - version: 5bf89e2bbd923a2c2854bbe9a7a7ac3e9ea4780b + version: ed7aa2d64a2ea3508c7d88a9e869524fdf0e9353 - name: os_swift scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_swift - version: d4af3504e8dc85981eb748c63e4b4460a7d8cacc + version: a88edf84964819870ef990d25b3bfa514186249a - name: os_tacker scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_tacker - version: 2abe2230c9419072dbea6af3eb879ec989fd5a32 + version: bbce8657c13d2545bf632eb81bb78329a5479798 - name: os_tempest scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_tempest - version: 1d0fa847bfb49eaa259fa7b29f313adbab523371 + version: 08341f4a19b2ed2231b790496c9f7cf2b4eda2e6 - name: os_trove scm: git src: https://git.openstack.org/openstack/openstack-ansible-os_trove - version: a5fe568345187cae713c6911de64dd38c9b477dc + version: eaca0137de0d3d7bd57a68eecfecf52e3171f591 - name: plugins scm: git src: https://git.openstack.org/openstack/openstack-ansible-plugins - version: cda51825f822f3c5545b7789029e1d2a06b17ec8 + version: a84ae0d744047fe41a0c028213de8daa52f72aee - name: rabbitmq_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server - version: cbe65114782e0c77bfb168a20043c2e082932059 + version: deccf93bdda1aa873b956418168368284509c99b - name: repo_build scm: git src: https://git.openstack.org/openstack/openstack-ansible-repo_build - version: 8a4f9db4b8524f3388995374dc34e1f206581bc8 + version: 630a6dfdcb46ba719ddb7fd7a4875259c5602b15 - name: repo_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-repo_server - version: 3e9048136ba2eb88c6b14daef07960a93da53695 + version: dd143b381b2fb94a3ba435f951e8b9338353a48d - name: rsyslog_client scm: git src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client - version: 05ba6715240f1191a0d1f364726786ddf8aaaa96 + version: ed5e61c8bc2aabb905918bb2751ae985b1cfe229 - name: rsyslog_server scm: git src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server - version: e4e527e5a86f1e08599ab04b5ef4b20c076aac4e + version: 9318bafbe60fed5f026c1e216d693bce745b9f99 - name: sshd scm: git src: https://github.com/willshersystems/ansible-sshd - version: 9575d16dd453f4d25e304bb226f0388b3fb1032a + version: d2ba81107ade1cf53c8b93590465c21ad2bc4530 - name: bird scm: git src: https://github.com/logan2211/ansible-bird @@ -192,40 +192,40 @@ - name: resolvconf scm: git src: https://github.com/logan2211/ansible-resolvconf - version: a2ff5ba59b47f96ddddcb7a3a67de93687c317a6 + version: '1.4' - name: ceph-ansible scm: git src: https://github.com/ceph/ceph-ansible - version: bf8f589958450ce07ec19d01fb98176ab50ab71f + version: a5aca6ebbc341feb34b9ec0d73e16aeeedae63ac - name: opendaylight scm: git src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight - version: 4e7d96224ad2a5d27eb7a3dcbc03a098bc45c809 + version: 0aebbc250b34ac5ac14b37bdf9b1a2e1cfaa5a76 - name: haproxy_endpoints scm: git src: https://github.com/logan2211/ansible-haproxy-endpoints version: 8e3a24a35beb16d717072dc83895c5a1f92689fb - name: nspawn_container_create - scm: git src: https://git.openstack.org/openstack/openstack-ansible-nspawn_container_create - version: 74ceba89a64a44080d198d74169fb547beb8a867 -- name: nspawn_hosts scm: git + version: 2bcf03f1cca550731789d5b53c7d0806ef5f5ff7 +- name: nspawn_hosts src: https://git.openstack.org/openstack/openstack-ansible-nspawn_hosts - version: b9183a5f1ee6463411636946feb155b5eb3271ce -- name: systemd_service scm: git + version: f69e101b9191682986272b766747f107b8a7a136 +- name: systemd_service src: https://git.openstack.org/openstack/ansible-role-systemd_service - version: fcd0651b9d811345bd4c3bffa059ed3c43b7d766 -- name: systemd_mount scm: git + version: a085a50c338b2eeaa87ed50eaaa22564d7c12968 +- name: systemd_mount src: https://git.openstack.org/openstack/ansible-role-systemd_mount - version: 51e06d3e77328c21f0779ee9c8c3203c31ef0b4f -- name: systemd_networkd scm: git + version: ee6263b3ce6502712ff4d6fb56474066df1773e4 +- name: systemd_networkd src: https://git.openstack.org/openstack/ansible-role-systemd_networkd - version: 3c1c4e43e36e588e4d6525e82066bcae74f9958f -- name: python_venv_build scm: git + version: b024d0a3d97caf06b962a1f19450511b108dc5eb +- name: python_venv_build src: https://git.openstack.org/openstack/ansible-role-python_venv_build - version: 9f58362c59c4ef45516be5c1d9341e5cbb36e1b2 + scm: git + version: 5fdd8e00633f28606fc531a449d741e8c772a9fc diff --git a/xci/installer/osa/files/global-requirement-pins.txt b/xci/installer/osa/files/global-requirement-pins.txt index 0056fa73..ec198a79 100644 --- a/xci/installer/osa/files/global-requirement-pins.txt +++ b/xci/installer/osa/files/global-requirement-pins.txt @@ -5,6 +5,11 @@ # # Use this file with caution! # + +### Pinned for gnocchi's dependency pycradox +# https://github.com/sileht/pycradox/commit/2209f89fd65ecf31bea8eac6405acce2543e7b84 +Cython<0.28 + ### ### These are pinned to ensure exactly the same behaviour forever! ### ### These pins are updated through the sources-branch-updater script ### diff --git a/xci/installer/osa/files/ha/openstack_user_config.yml b/xci/installer/osa/files/ha/openstack_user_config.yml index 360aa5cb..dc2ec183 100644 --- a/xci/installer/osa/files/ha/openstack_user_config.yml +++ b/xci/installer/osa/files/ha/openstack_user_config.yml @@ -77,18 +77,18 @@ shared-infra_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # repository (apt cache, python packages, etc) repo-infra_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # load balancer # Ideally the load balancer should not use the Infrastructure hosts. @@ -97,9 +97,9 @@ haproxy_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # rsyslog server # log_hosts: @@ -115,18 +115,18 @@ identity_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # cinder api services storage-infra_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # glance # The settings here are repeated for each infra host. @@ -139,27 +139,27 @@ image_hosts: container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.14" + - server: "172.29.244.12" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" options: "_netdev,auto" controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.14" + - server: "172.29.244.12" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" options: "_netdev,auto" controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 container_vars: limit_container_types: glance glance_nfs_client: - - server: "172.29.244.14" + - server: "172.29.244.12" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" @@ -170,43 +170,43 @@ compute-infra_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # heat orchestration_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # horizon dashboard_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # neutron server, agents (L3, etc) network_hosts: controller00: ip: 172.29.236.11 controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 # nova hypervisors compute_hosts: compute00: - ip: 172.29.236.14 + ip: 172.29.236.12 compute01: - ip: 172.29.236.15 + ip: 172.29.236.13 # cinder volume hosts (NFS-backed) # The settings here are repeated for each infra host. @@ -225,10 +225,10 @@ storage_hosts: nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" nfs_shares_config: /etc/cinder/nfs_shares shares: - - ip: "172.29.244.14" + - ip: "172.29.244.12" share: "/volumes" controller01: - ip: 172.29.236.12 + ip: 172.29.236.14 container_vars: cinder_backends: limit_container_types: cinder_volume @@ -238,10 +238,10 @@ storage_hosts: nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" nfs_shares_config: /etc/cinder/nfs_shares shares: - - ip: "172.29.244.14" + - ip: "172.29.244.12" share: "/volumes" controller02: - ip: 172.29.236.13 + ip: 172.29.236.15 container_vars: cinder_backends: limit_container_types: cinder_volume @@ -251,5 +251,5 @@ storage_hosts: nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" nfs_shares_config: /etc/cinder/nfs_shares shares: - - ip: "172.29.244.14" + - ip: "172.29.244.12" share: "/volumes" diff --git a/xci/installer/osa/files/openstack_services.yml b/xci/installer/osa/files/openstack_services.yml index 2c61cdc9..64718e33 100644 --- a/xci/installer/osa/files/openstack_services.yml +++ b/xci/installer/osa/files/openstack_services.yml @@ -31,48 +31,48 @@ ## Global Requirements requirements_git_repo: https://git.openstack.org/openstack/requirements -requirements_git_install_branch: 42aae93777c9d0216dad52995efcb623e5f05555 # HEAD of "stable/rocky" as of 17.08.2018 +requirements_git_install_branch: 32f8fa388d3b8367320a3308a350f28254a82d65 # HEAD of "stable/rocky" as of 11.12.2018 requirements_git_track_branch: stable/rocky ## Aodh service aodh_git_repo: https://git.openstack.org/openstack/aodh -aodh_git_install_branch: 86a19d58cc8003cf34549fb3a64fd3fd356b7ad2 # HEAD of "stable/rocky" as of 27.08.2018 +aodh_git_install_branch: ae5e710cd5ade867ebd0e6666bad95f82d130210 # HEAD of "stable/rocky" as of 11.12.2018 aodh_git_project_group: aodh_all aodh_git_track_branch: stable/rocky ## Barbican service barbican_git_repo: https://git.openstack.org/openstack/barbican -barbican_git_install_branch: 3f6cccae4982e5a8fe1ca5a3fb198e1481a5891c # HEAD of "stable/rocky" as of 27.08.2018 +barbican_git_install_branch: 0a1a9917e791d0c6fc8534a052700af5f5cbe9d0 # HEAD of "stable/rocky" as of 11.12.2018 barbican_git_project_group: barbican_all barbican_git_track_branch: stable/rocky ## Ceilometer service ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer -ceilometer_git_install_branch: 786f90c314ff89f23ebc1227c800bedbeaec204b # HEAD of "stable/rocky" as of 27.08.2018 -ceilometer_git_project_group: ceilometer_all +ceilometer_git_install_branch: 018ff32fe0200a041297c386eb8b381f1bec0e71 # HEAD of "stable/rocky" as of 11.12.2018 +ceilometer_git_project_group: all ceilometer_git_track_branch: stable/rocky ## Cinder service cinder_git_repo: https://git.openstack.org/openstack/cinder -cinder_git_install_branch: 707b449c47a86dd40f8d264f012061549172389e # HEAD of "stable/rocky" as of 27.08.2018 +cinder_git_install_branch: 8dbf5d7882a6271514a3075a02cd080e44b709d5 # HEAD of "stable/rocky" as of 11.12.2018 cinder_git_project_group: cinder_all cinder_git_track_branch: stable/rocky ## Designate service designate_git_repo: https://git.openstack.org/openstack/designate -designate_git_install_branch: 3a33ca85eafc5310afe6578f7c0a1ed8235cd477 # HEAD of "stable/rocky" as of 27.08.2018 +designate_git_install_branch: af1bb8a36a704bb1a226fe5154f828e152ef23e1 # HEAD of "stable/rocky" as of 11.12.2018 designate_git_project_group: designate_all designate_git_track_branch: stable/rocky ## Horizon Designate dashboard plugin designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard -designate_dashboard_git_install_branch: 630cc303cfd25458fc0a8bc68b0438e2b3721616 # HEAD of "stable/rocky" as of 27.08.2018 +designate_dashboard_git_install_branch: faa67c87ad3cd5563da722f13b3adaee5bfe350f # HEAD of "stable/rocky" as of 11.12.2018 designate_dashboard_git_project_group: horizon_all designate_dashboard_git_track_branch: stable/rocky @@ -87,14 +87,14 @@ dragonflow_git_track_branch: None ## Glance service glance_git_repo: https://git.openstack.org/openstack/glance -glance_git_install_branch: 488d2e4fccc399234fd62c127c8ece262111fe93 # HEAD of "stable/rocky" as of 27.08.2018 +glance_git_install_branch: 4982c24f0aeb64f9d20159e543a90e31fc325dce # HEAD of "stable/rocky" as of 11.12.2018 glance_git_project_group: glance_all glance_git_track_branch: stable/rocky ## Heat service heat_git_repo: https://git.openstack.org/openstack/heat -heat_git_install_branch: 207498dea1e9ca23485511cadbbdda85079e391f # HEAD of "stable/rocky" as of 27.08.2018 +heat_git_install_branch: 98eea44d5d91b74e1ab28c052e4fbc4b533d5f83 # HEAD of "stable/rocky" as of 11.12.2018 heat_git_project_group: heat_all heat_git_track_branch: stable/rocky @@ -107,89 +107,87 @@ heat_dashboard_git_track_branch: None ## Horizon service horizon_git_repo: https://git.openstack.org/openstack/horizon -horizon_git_install_branch: 9ced2fff0449d02bf1a5210c36b8635cc981d79e # HEAD of "stable/rocky" as of 27.08.2018 +horizon_git_install_branch: 0ccfce882749998f3a6a7f9bfc6fa74ea346ca53 # HEAD of "stable/rocky" as of 11.12.2018 horizon_git_project_group: horizon_all horizon_git_track_branch: stable/rocky ## Horizon Ironic dashboard plugin ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui -ironic_dashboard_git_install_branch: 36840aaa0da8053d2e4032c060a6a45af224572b # HEAD of "stable/rocky" as of 27.08.2018 +ironic_dashboard_git_install_branch: c700f3a613f3d78875caf7588e7bdf42a5db83cb # HEAD of "stable/rocky" as of 11.12.2018 ironic_dashboard_git_project_group: horizon_all ironic_dashboard_git_track_branch: stable/rocky ## Horizon Magnum dashboard plugin magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui -magnum_dashboard_git_install_branch: cfd93ba5bce986903af3ce113f3ce969390f9d37 # HEAD of "stable/rocky" as of 17.08.2018 +magnum_dashboard_git_install_branch: 2e9cb253eaee45a57f07369e432369dbff8fc173 # HEAD of "stable/rocky" as of 11.12.2018 magnum_dashboard_git_project_group: horizon_all magnum_dashboard_git_track_branch: stable/rocky ## Horizon LBaaS dashboard plugin neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard -neutron_lbaas_dashboard_git_install_branch: 0d73ac97ac3fb19d13fc0d0981f005a94cfed224 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_lbaas_dashboard_git_install_branch: 84fd20a474e8165ddbf5cf4bd14b7eb7da63ed41 # HEAD of "stable/rocky" as of 11.12.2018 neutron_lbaas_dashboard_git_project_group: horizon_all neutron_lbaas_dashboard_git_track_branch: stable/rocky ## Horizon FWaaS dashboard plugin neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard -neutron_fwaas_dashboard_git_install_branch: e7d3433fe8b671dd6ede65e81e831a0abe1bc859 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_fwaas_dashboard_git_install_branch: 4adf5599211ef90696da94b2fee3aac730f3b7bc # HEAD of "stable/rocky" as of 11.12.2018 neutron_fwaas_dashboard_git_project_group: horizon_all neutron_fwaas_dashboard_git_track_branch: stable/rocky ## Horizon Sahara dashboard plugin sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard -sahara_dashboard_git_install_branch: db8283502a9fe1d3b540dbacf9e607255d81a110 # HEAD of "stable/rocky" as of 27.08.2018 +sahara_dashboard_git_install_branch: 6e3f7538ce7779612d8e82b069597c06c2225a77 # HEAD of "stable/rocky" as of 11.12.2018 sahara_dashboard_git_project_group: horizon_all sahara_dashboard_git_track_branch: stable/rocky ## Keystone service keystone_git_repo: https://git.openstack.org/openstack/keystone -keystone_git_install_branch: c767e254979c96858a81cc4bbe8d9773d4dd539b # HEAD of "stable/rocky" as of 27.08.2018 +keystone_git_install_branch: 295ccda8190b39a505c397d2f4d9e4896dc538cf # HEAD of "stable/rocky" as of 11.12.2018 keystone_git_project_group: keystone_all keystone_git_track_branch: stable/rocky ## Neutron service neutron_git_repo: https://git.openstack.org/openstack/neutron -neutron_git_install_branch: b6314ea6c3df11c6b27cd39844c53b65be0d9060 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_git_install_branch: ae2ef681403d1f103170ea70df1010f006244752 # HEAD of "stable/rocky" as of 11.12.2018 neutron_git_project_group: neutron_all neutron_git_track_branch: stable/rocky neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas -neutron_lbaas_git_install_branch: 594ccd122bb2e4750392d92427fe1b151c269403 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_lbaas_git_install_branch: 1353bad713fd97418a9984016da49df8cfa8825b # HEAD of "stable/rocky" as of 11.12.2018 neutron_lbaas_git_project_group: neutron_all neutron_lbaas_git_track_branch: stable/rocky neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas -neutron_vpnaas_git_install_branch: 1f7708beb71ad371b47a3ee8e552c4f22d96b272 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_vpnaas_git_install_branch: 0876f4dfe7e2f57305110e035efa753bfb711a3f # HEAD of "stable/rocky" as of 11.12.2018 neutron_vpnaas_git_project_group: neutron_all neutron_vpnaas_git_track_branch: stable/rocky neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas -neutron_fwaas_git_install_branch: 7567c42e99b298201b30593699d1e180e5bfa759 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_fwaas_git_install_branch: 5ece265b65247ee81a9335d5a685fa9f0a68b0fc # HEAD of "stable/rocky" as of 11.12.2018 neutron_fwaas_git_project_group: neutron_all neutron_fwaas_git_track_branch: stable/rocky neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing -neutron_dynamic_routing_git_install_branch: ac63f126c6bd0ab12d6cd80077023c3e5c264e98 # HEAD of "stable/rocky" as of 27.08.2018 +neutron_dynamic_routing_git_install_branch: ae3a01ca1fd6270fc27b3c6bae11afc0f17563d5 # HEAD of "stable/rocky" as of 11.12.2018 neutron_dynamic_routing_git_project_group: neutron_all neutron_dynamic_routing_git_track_branch: stable/rocky # Networking Calico is following master networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico -networking_calico_git_install_branch: 501fc98c428b8dd6b9dd7bf6908dbcd7b94ead5c # HEAD of "master" as of 27.08.2018 +networking_calico_git_install_branch: 79c7e00360ddb5fd3c38e60e5bbb3399928d9172 # HEAD of "master" as of 11.12.2018 networking_calico_git_project_group: neutron_all networking_calico_git_track_branch: stable/rocky -# ODL is frozen until further notice due to -# https://github.com/openstack/networking-odl/commit/391c1d89ef2b8133d3aafbe7612c7908be106e73#diff-b4ef698db8ca845e5845c4618278f29a networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl -networking_odl_git_install_branch: 53ff740b2a78626d5b077278997bdcec6b1b0892 # FROZEN HEAD of "stable/rocky" as of 31.03.2018 +networking_odl_git_install_branch: 1cef1f0939a405eea4cb87e712794e8fa26b5166 # HEAD of "stable/rocky" as of 11.12.2018 networking_odl_git_project_group: neutron_all -networking_odl_git_track_branch: None +networking_odl_git_track_branch: stable/rocky networking_ovn_git_repo: https://git.openstack.org/openstack/networking-ovn -networking_ovn_git_install_branch: 44dcb509be15a805e00684cce2fabfc4c5265ad7 # HEAD of "stable/rocky" as of 17.08.2018 +networking_ovn_git_install_branch: e077aa93b1dc244b59864236d7c673f852e4e3ba # HEAD of "stable/rocky" as of 11.12.2018 networking_ovn_git_project_group: neutron_all # BGPVPN is frozen until further notice due to @@ -200,21 +198,21 @@ networking_bgpvpn_git_project_group: neutron_all networking_bgpvpn_git_track_branch: None networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc -networking_sfc_git_install_branch: 4c38303620c8a3f38d7261a64ce8532979bf7560 # HEAD of "stable/rocky" as of 27.08.2018 +networking_sfc_git_install_branch: f0eddef3d53bbad417038f9d32b196ace2ebd0b2 # HEAD of "stable/rocky" as of 11.12.2018 networking_sfc_git_project_group: neutron_all networking_sfc_git_track_branch: stable/rocky ## Nova service nova_git_repo: https://git.openstack.org/openstack/nova -nova_git_install_branch: 5bff489231c16e62f081267a5b433745dba12ded # HEAD of "stable/rocky" as of 27.08.2018 +nova_git_install_branch: 8066142a1e381536291232250b3237e5c01ed1f4 # HEAD of "stable/rocky" as of 11.12.2018 nova_git_project_group: nova_all nova_git_track_branch: stable/rocky ## PowerVM Virt Driver nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm -nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 27.08.2018 +nova_powervm_git_install_branch: 984b122668161703eee33918d570c61ae9c5b1ca # HEAD of "stable/rocky" as of 11.12.2018 nova_powervm_git_project_group: nova_all nova_powervm_git_track_branch: stable/rocky @@ -229,14 +227,14 @@ nova_lxd_git_track_branch: None ## Sahara service sahara_git_repo: https://git.openstack.org/openstack/sahara -sahara_git_install_branch: 7d9483b21c1e45ef4834f3d3d9973d95d181df3c # HEAD of "stable/rocky" as of 27.08.2018 +sahara_git_install_branch: ddb518fd81b82308bdd01e58ebf6ed7a48c544ae # HEAD of "stable/rocky" as of 11.12.2018 sahara_git_project_group: sahara_all sahara_git_track_branch: stable/rocky ## Swift service swift_git_repo: https://git.openstack.org/openstack/swift -swift_git_install_branch: cfc4f30d633dd962cd8d38e0f208b867131082fc # HEAD of "stable/rocky" as of 17.08.2018 +swift_git_install_branch: 7fdf66ab70da705774a4ae9c328a3e762bb2f3b4 # HEAD of "stable/rocky" as of 11.12.2018 swift_git_project_group: swift_all swift_git_track_branch: stable/rocky @@ -251,52 +249,52 @@ swift_swift3_git_track_branch: None ## Ironic service ironic_git_repo: https://git.openstack.org/openstack/ironic -ironic_git_install_branch: 9ceb015a0a1aef961358953839a71dd3be57753e # HEAD of "stable/rocky" as of 27.08.2018 +ironic_git_install_branch: 6a6c0d882fe8ac299d18df75d2bbd111b170ad48 # HEAD of "stable/rocky" as of 11.12.2018 ironic_git_project_group: ironic_all ironic_git_track_branch: stable/rocky ## Magnum service magnum_git_repo: https://git.openstack.org/openstack/magnum -magnum_git_install_branch: 0dbe6806ed51fdc08e7b118ac10eedd53d365b5b # HEAD of "stable/rocky" as of 27.08.2018 +magnum_git_install_branch: 765e207a5d3a45b8523cb2c34e5d74541da481e6 # HEAD of "stable/rocky" as of 11.12.2018 magnum_git_project_group: magnum_all magnum_git_track_branch: stable/rocky ## Trove service trove_git_repo: https://git.openstack.org/openstack/trove -trove_git_install_branch: 50a27fafa16918d95eb49b0057b8eaf8593a6357 # HEAD of "stable/rocky" as of 27.08.2018 +trove_git_install_branch: 2953676e81fc22099e72ea7d0f27002a59aa779f # HEAD of "stable/rocky" as of 11.12.2018 trove_git_project_group: trove_all trove_git_track_branch: stable/rocky ## Horizon Trove dashboard plugin trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard -trove_dashboard_git_install_branch: 68dda48e33d54346d529c67b4a3d039ab79a8c4a # HEAD of "stable/rocky" as of 27.08.2018 +trove_dashboard_git_install_branch: c6482d8f7ebeb980a99cc89593245be381675984 # HEAD of "stable/rocky" as of 11.12.2018 trove_dashboard_git_project_group: horizon_all trove_dashboard_git_track_branch: stable/rocky ## Octavia service octavia_git_repo: https://git.openstack.org/openstack/octavia -octavia_git_install_branch: 4ba1ee5ee017b06c58c8f92a7977adfb73304ef7 # HEAD of "stable/rocky" as of 27.08.2018 +octavia_git_install_branch: ec4c88e23ebeb786491158682f9a7dd42928f97a # HEAD of "stable/rocky" as of 12.14.2018 octavia_git_project_group: octavia_all octavia_git_track_branch: stable/rocky ## Tacker service tacker_git_repo: https://git.openstack.org/openstack/tacker -tacker_git_install_branch: 0779335868b03f328a267794daac6e1817f74136 # HEAD of "stable/rocky" as of 27.08.2018 +tacker_git_install_branch: 279b1a2840b9f28377476e0d11ca83ce2e88a0b2 # HEAD of "stable/rocky" as of 11.12.2018 tacker_git_project_group: tacker_all tacker_git_track_branch: stable/rocky ## Congress service congress_git_repo: https://git.openstack.org/openstack/congress -congress_git_install_branch: 9f2ae72854fbf1c83bb30a37d24303840340f95b # HEAD of "stable/rocky" as of 27.08.2018 +congress_git_install_branch: 6862ac9f356a5403e1e37050e12f032f661bae96 # HEAD of "stable/rocky" as of 11.12.2018 congress_git_project_group: congress_all congress_git_track_branch: stable/rocky ## Horizon Octavia dashboard plugin octavia_dashboard_git_repo: https://git.openstack.org/openstack/octavia-dashboard -octavia_dashboard_git_install_branch: 4e013a00b2ffa003bdaee019b3aac3df80fb13bf # HEAD of "stable/rocky" as of 27.08.2018 +octavia_dashboard_git_install_branch: 80766f9390492c24de38911d7240c5490c7ef562 # HEAD of "stable/rocky" as of 11.12.2018 octavia_dashboard_git_project_group: horizon_all octavia_dashboard_git_track_branch: stable/rocky diff --git a/xci/installer/osa/files/setup-openstack.yml b/xci/installer/osa/files/setup-openstack.yml index 544a9999..904215b7 100644 --- a/xci/installer/osa/files/setup-openstack.yml +++ b/xci/installer/osa/files/setup-openstack.yml @@ -19,11 +19,13 @@ - include: os-nova-install.yml - include: os-neutron-install.yml - include: os-heat-install.yml +- include: os-ceilometer-install.yml - include: os-horizon-install.yml when: not core_openstack | default(False) - include: os-swift-install.yml - include: os-ironic-install.yml when: not core_openstack | default(False) +- include: os-barbican-install.yml - include: os-tacker-install.yml - include: os-tempest-install.yml when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool diff --git a/xci/installer/osa/playbooks/configure-opnfvhost.yml b/xci/installer/osa/playbooks/configure-opnfvhost.yml index 4fc966a3..07ad683b 100644 --- a/xci/installer/osa/playbooks/configure-opnfvhost.yml +++ b/xci/installer/osa/playbooks/configure-opnfvhost.yml @@ -152,6 +152,7 @@ - name: Install ARA callback plugin in OSA virtualenv pip: name: ara + version: 0.16.4 state: present extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt' executable: '/opt/ansible-runtime/bin/pip' diff --git a/xci/installer/osa/playbooks/post-deployment.yml b/xci/installer/osa/playbooks/post-deployment.yml index 9f2fec38..36c052c9 100644 --- a/xci/installer/osa/playbooks/post-deployment.yml +++ b/xci/installer/osa/playbooks/post-deployment.yml @@ -12,6 +12,7 @@ vars_files: - "{{ xci_path }}/xci/var/opnfv.yml" - "{{ xci_path }}/xci/installer/osa/files/openstack_services.yml" + - "{{ xci_path }}/xci/installer/osa/files/{{ xci_flavor }}/user_variables.yml" environment: http_proxy: "{{ lookup('env','http_proxy') }}" diff --git a/xci/installer/osh/README b/xci/installer/osh/README new file mode 100644 index 00000000..902ac10e --- /dev/null +++ b/xci/installer/osh/README @@ -0,0 +1,50 @@ +Requirements: + 1. Performance of hosts + The performance settings are not required officially. I recommend the following: + - VM_CPU=6 + - VM_DISK=80GB + - VM_MEMORY_SIZE=16GB + + 2. Distributions + - Ubuntu 16.04 + + 3. Packages: + - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands + - Jinja 2.9 (or newer) is required to run the Ansible Playbooks + + 4. Others: + - The target servers must have access to the Internet in order to pull docker images. + - The target servers are configured to allow IPv4 forwarding. + - Your ssh key must be copied to all the servers part of your inventory. + - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall. + +Flavors: + 1. mini: One deployment host, 1 master host and 1 node host. + 2. noha: One deployment host, 1 master host and 2 node hosts. + +Components Installed: + 1. etcd + 2. network plugins: + - calico + 3. kubernetes + 4. docker + +How to use: + +Clone the OPNFV Releng repository + + git clone https://gerrit.opnfv.org/gerrit/releng-xci.git + +Change into the directory where the sandbox script is located + + cd releng-xci/xci + +Set the variable to run openstack-helm + + export INSTALLER_TYPE=osh + export DEPLOY_SCENARIO=k8-calico-nofeature + export XCI_FLAVOR=mini + +Execute sandbox script + + ./xci-deploy.sh diff --git a/xci/installer/osh/deploy.sh b/xci/installer/osh/deploy.sh new file mode 100755 index 00000000..e56845b8 --- /dev/null +++ b/xci/installer/osh/deploy.sh @@ -0,0 +1,170 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2017 Huawei +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -o errexit +set -o nounset +set -o pipefail + +OSH_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks" +export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles + +#------------------------------------------------------------------------------- +# Configure localhost +#------------------------------------------------------------------------------- +# This playbook +# - removes directories that were created by the previous xci run +# - clones opnfv/releng-xci repository +# - clones kubernetes-incubator/kubespray repository +# - creates log directory +#------------------------------------------------------------------------------- + +echo "Info: Configuring localhost for kubespray" +echo "-----------------------------------------------------------------------" +cd $XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \ + -i dynamic_inventory.py configure-localhost.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured localhost for kubespray" + +#------------------------------------------------------------------------------- +# Configure installer +#------------------------------------------------------------------------------- +# TODO: summarize what this playbook does +#------------------------------------------------------------------------------- + +echo "Info: Configuring kubespray installer" +echo "-----------------------------------------------------------------------" +cd $OSH_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-installer.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured kubespray installer" + +#------------------------------------------------------------------------------- +# Configure deployment host, opnfv +#------------------------------------------------------------------------------- +# This playbook +# - removes directories that were created by the previous xci run +# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories +# - generates/prepares ssh keys +# - copies flavor files to be used by kubespray +# - install packages required by kubespray +#------------------------------------------------------------------------------- +echo "Info: Configuring opnfv deployment host for kubespray" +echo "-----------------------------------------------------------------------" +cd $OSH_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-opnfvhost.yml +echo "-----------------------------------------------------------------------" +echo "Info: Configured opnfv deployment host for kubespray" + +#------------------------------------------------------------------------------- +# Configure target hosts for kubespray +#------------------------------------------------------------------------------- +# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml +# This playbook +# - adds public keys to target hosts +# - install packages required by kubespray +# - configures haproxy service +#------------------------------------------------------------------------------- +if [ $XCI_FLAVOR != "aio" ]; then + echo "Info: Configuring target hosts for kubespray" + echo "-----------------------------------------------------------------------" + cd $OSH_XCI_PLAYBOOKS + ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-targethosts.yml + echo "-----------------------------------------------------------------------" + echo "Info: Configured target hosts for kubespray" +fi + + +echo "Info: Using kubespray to deploy the kubernetes cluster" +echo "-----------------------------------------------------------------------" +ssh root@$OPNFV_HOST_IP "set -o pipefail; export XCI_FLAVOR=$XCI_FLAVOR; export INSTALLER_TYPE=$INSTALLER_TYPE; \ + export IDF=/root/releng-xci/xci/var/idf.yml; export PDF=/root/releng-xci/xci/var/pdf.yml; \ + cd releng-xci/.cache/repos/kubespray/; ansible-playbook \ + -i inventory/opnfv/dynamic_inventory.py cluster.yml -b | tee setup-kubernetes.log" +scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \ + $LOG_PATH/setup-kubernetes.log + + +cd $OSH_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + -i ${XCI_PLAYBOOKS}/dynamic_inventory.py configure-kubenet.yml +echo +echo "-----------------------------------------------------------------------" +echo "Info: Kubernetes installation is successfully completed!" +echo "-----------------------------------------------------------------------" + +#------------------------------------------------------------------------------- +# Execute post-installation tasks +#------------------------------------------------------------------------------- +# Playbook post.yml is used in order to execute any post-deployment tasks that +# are required for the scenario under test. +#------------------------------------------------------------------------------- +# copy admin.conf +ssh root@$OPNFV_HOST_IP "mkdir -p ~/.kube/;\ + cp -f ~/admin.conf ~/.kube/config;" + +echo "-----------------------------------------------------------------------" +echo "Info: Running post-deployment scenario role" +echo "-----------------------------------------------------------------------" +cd $OSH_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \ + post-deployment.yml +echo "-----------------------------------------------------------------------" +echo "Info: Post-deployment scenario role execution done" +echo "-----------------------------------------------------------------------" +echo +echo "Login opnfv host ssh root@$OPNFV_HOST_IP +according to the user-guide to create a service +https://kubernetes.io/docs/user-guide/walkthrough/k8s201/" +echo +echo "-----------------------------------------------------------------------" +echo "Info: Kubernetes login details" +echo "-----------------------------------------------------------------------" +echo +# Get the dashboard URL +if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $OPNFV_HOST_IP; +then +ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $OPNFV_HOST_IP; +echo "Info: known_hosts entry for opnfv host from previous deployment found and deleted" +fi +DASHBOARD_SERVICE=$(ssh -q -o StrictHostKeyChecking=no root@$OPNFV_HOST_IP "kubectl get service -n kube-system |grep kubernetes-dashboard") +DASHBOARD_PORT=$(echo ${DASHBOARD_SERVICE} | awk '{print $5}' |awk -F "[:/]" '{print $2}') +KUBER_SERVER_URL=$(ssh root@$OPNFV_HOST_IP "grep -r server ~/.kube/config") +echo "Info: Kubernetes Dashboard URL:" +echo $KUBER_SERVER_URL | awk '{print $2}'| sed -n "s#:[0-9]*\$#:$DASHBOARD_PORT#p" + +# Get the dashboard user and password +MASTER_IP=$(echo ${KUBER_SERVER_URL} | awk '{print $2}' |awk -F "[:/]" '{print $4}') +if ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -F $MASTER_IP; +then +ssh-keygen -f "/home/opnfv/.ssh/known_hosts" -R $MASTER_IP; +echo "Info: known_hosts entry for master host from previous deployment found and deleted" +fi +USER_CSV=$(ssh -q -o StrictHostKeyChecking=no root@$MASTER_IP " cat /etc/kubernetes/users/known_users.csv") +USERNAME=$(echo $USER_CSV |awk -F ',' '{print $2}') +PASSWORD=$(echo $USER_CSV |awk -F ',' '{print $1}') +echo "Info: Dashboard username: ${USERNAME}" +echo "Info: Dashboard password: ${PASSWORD}" + +echo "-----------------------------------------------------------------------" +echo "Info: Continue with running the openstack-helm installation" +echo "-----------------------------------------------------------------------" +cd $OSH_XCI_PLAYBOOKS +ansible-playbook ${XCI_ANSIBLE_PARAMS} -v -i ${XCI_PLAYBOOKS}/dynamic_inventory.py \ + install-openstack-helm.yml +echo "-----------------------------------------------------------------------" +echo "Info: Openstack-helm installation execution done" +echo "-----------------------------------------------------------------------" +echo + + +# vim: set ts=4 sw=4 expandtab: diff --git a/xci/installer/osh/files/ha/inventory/group_vars/all.yml b/xci/installer/osh/files/ha/inventory/group_vars/all.yml new file mode 100644 index 00000000..d1b946a7 --- /dev/null +++ b/xci/installer/osh/files/ha/inventory/group_vars/all.yml @@ -0,0 +1,8 @@ +--- +loadbalancer_apiserver: + address: 192.168.122.222 + port: 8383 + +apiserver_loadbalancer_domain_name: 192.168.122.222 +supplementary_addresses_in_ssl_keys: + - 192.168.122.222 diff --git a/xci/installer/osh/playbooks/configure-installer.yml b/xci/installer/osh/playbooks/configure-installer.yml new file mode 100644 index 00000000..383f55fc --- /dev/null +++ b/xci/installer/osh/playbooks/configure-installer.yml @@ -0,0 +1,51 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2019 Ericsson Software Technology and Others +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: localhost + connection: local + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + tasks: + - name: delete existing kubespray/inventory/opnfv directory + file: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv" + state: absent + + - name: copy kubespray/inventory/sample as kubespray/inventory/opnfv + copy: + src: "{{ xci_path }}/.cache/repos/kubespray/inventory/sample/" + dest: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv" + + - name: update kubespray k8s-cluster.yml for xci + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: "kube_version:.*", line: "kube_version: {{ kubernetes_version }}" } + - { regexp: "kubeconfig_localhost:.*", line: "kubeconfig_localhost: true" } + - { regexp: "kube_basic_auth:.*", line: "kube_basic_auth: true" } + - { regexp: "dashboard_enabled:.*", line: "dashboard_enabled: true" } + +# NOTE(fdegir): the reason for this task to be separate from the task which uses lineinfile +# module is that escaping curly braces does not work with with_items. what happens is that +# ansible tries to resolve {{ ansible_env.HOME }} which we don't want since it should point +# to home folder of the user executing this task at runtime. + - name: update kubespray artifacts_dir + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml" + regexp: "artifacts_dir:.*" + line: "artifacts_dir: '{{ '{{' }} ansible_env.HOME {{ '}}' }}'" + + - name: change dashboard server type to NodePort + lineinfile: + path: "{{ xci_path }}/.cache/repos/kubespray/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2" + insertafter: 'targetPort' + line: " type: NodePort" diff --git a/xci/installer/osh/playbooks/configure-kubenet.yml b/xci/installer/osh/playbooks/configure-kubenet.yml new file mode 100644 index 00000000..18a126c1 --- /dev/null +++ b/xci/installer/osh/playbooks/configure-kubenet.yml @@ -0,0 +1,51 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 SUSE LINUX GmbH and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary +# to do that as part of the node configuration. All we need is to add static routes on every node +# so cbr0 interfaces can talk to each other. +- name: Prepare networking for kubenet + hosts: k8s-cluster + remote_user: root + gather_facts: True + become: yes + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + tasks: + - name: Configure static routes + block: + - name: Collect cbr0 information from the nodes + set_fact: + kubenet_xci_static_routes: |- + {% set static_routes = [] %} + {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %} + {%- set _ = static_routes.append( + {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+ + hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'), + 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%} + {% endfor %} + {{ static_routes }} + + - name: Add static routes on each node + shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}" + with_items: "{{ kubenet_xci_static_routes }}" + loop_control: + label: "{{ item.network }}" + when: deploy_scenario.find('k8-nosdn-') != -1 + + - name: Ensure rp_filter is disabled on localhost + sysctl: + name: net.ipv4.conf.all.rp_filter + sysctl_set: yes + state: present + value: "{{ (kubenet_xci_static_routes is defined) | ternary(0, 1) }}" + reload: yes + delegate_to: localhost + run_once: True diff --git a/xci/installer/osh/playbooks/configure-opnfvhost.yml b/xci/installer/osh/playbooks/configure-opnfvhost.yml new file mode 100644 index 00000000..52e42b06 --- /dev/null +++ b/xci/installer/osh/playbooks/configure-opnfvhost.yml @@ -0,0 +1,101 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: opnfv + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + pre_tasks: + - name: Load distribution variables + include_vars: + file: "{{ item }}" + with_items: + - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml" + - name: Set facts for remote deployment + set_fact: + remote_xci_path: "{{ ansible_env.HOME }}/releng-xci" + remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ installer_type }}/files/{{ xci_flavor }}" + remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks" + + roles: + - role: bootstrap-host + configure_network: xci_flavor != 'aio' + + tasks: + - name: Create list of files to copy + shell: | + git ls-tree -r --name-only HEAD > {{ xci_cache }}/releng-xci.files + echo ".git/" >> {{ xci_cache }}/releng-xci.files + echo ".cache/repos/" >> {{ xci_cache }}/releng-xci.files + echo ".cache/xci.env" >> {{ xci_cache }}/releng-xci.files + args: + executable: /bin/bash + chdir: "{{ xci_path }}" + changed_when: False + delegate_to: 127.0.0.1 + tags: + - skip_ansible_lint + + - name: Copy releng-xci to remote host + synchronize: + archive: yes + src: "{{ xci_path }}/" + dest: "{{ remote_xci_path }}" + delete: yes + rsync_opts: + - "--recursive" + - "--files-from={{ xci_cache }}/releng-xci.files" + + - name: link xci dynamic inventory to kubespray/inventory/opnfv directory + file: + src: "{{ remote_xci_playbooks }}/dynamic_inventory.py" + path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/dynamic_inventory.py" + state: link + + - name: Download kubectl and place it to /usr/local/bin + get_url: + url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/kubectl" + dest: /usr/local/bin/kubectl + owner: root + group: root + mode: 0755 + + - name: Reload XCI deployment host facts + setup: + filter: ansible_local + gather_subset: "!all" + delegate_to: 127.0.0.1 + + - name: Prepare everything to run the {{ deploy_scenario }} role + include_role: + name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}" + + - name: Install required packages + package: + name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}" + state: present + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" + when: xci_flavor == 'aio' + + - name: pip install required packages + pip: + name: "{{ item.name }}" + version: "{{ item.version | default(omit) }}" + with_items: + - { name: 'ansible', version: "{{ xci_kube_ansible_pip_version }}" } + - { name: 'netaddr' } + - { name: 'ansible-modules-hashivault' } + + - name: fetch xci environment + copy: + src: "{{ xci_path }}/.cache/xci.env" + dest: /root/xci.env + + - name: Manage SSH keys + include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml" diff --git a/xci/installer/osh/playbooks/configure-targethosts.yml b/xci/installer/osh/playbooks/configure-targethosts.yml new file mode 100644 index 00000000..2fde9877 --- /dev/null +++ b/xci/installer/osh/playbooks/configure-targethosts.yml @@ -0,0 +1,40 @@ +--- +- hosts: k8s-cluster + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + pre_tasks: + - name: Load distribution variables + include_vars: + file: "{{ item }}" + with_items: + - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml" + + roles: + - role: bootstrap-host + + tasks: + - name: Manage SSH keys + include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml" + + - name: Install dbus + package: + name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}" + state: present + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" + +- hosts: kube-master + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + pre_tasks: + - name: Load distribution variables + include_vars: + file: "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml" + roles: + - role: "keepalived" + when: xci_flavor == 'ha' + - role: "haproxy_server" + haproxy_service_configs: "{{ haproxy_default_services}}" + when: xci_flavor == 'ha' diff --git a/xci/installer/osh/playbooks/group_vars/all.yml b/xci/installer/osh/playbooks/group_vars/all.yml new file mode 100644 index 00000000..7453bdab --- /dev/null +++ b/xci/installer/osh/playbooks/group_vars/all.yml @@ -0,0 +1,55 @@ +--- +keepalived_ubuntu_src: "uca" +keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}" + +keepalived_sync_groups: + haproxy: + instances: + - external + +haproxy_keepalived_external_interface: "{{ ansible_default_ipv4.interface }}" +haproxy_keepalived_authentication_password: 'keepalived' +keepalived_instances: + external: + interface: "{{ haproxy_keepalived_external_interface }}" + state: "BACKUP" + virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}" + priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}" + authentication_password: "{{ haproxy_keepalived_authentication_password }}" + vips: + - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}" + +haproxy_default_services: + - service: + haproxy_service_name: proxy-apiserver + haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}" + haproxy_port: 8383 + haproxy_backend_port: 6443 + haproxy_balance_type: tcp + +haproxy_bind_on_non_local: "True" +haproxy_use_keepalived: "True" +keepalived_selinux_compile_rules: + - keepalived_ping + - keepalived_haproxy_pid_file + +# Ensure that the package state matches the global setting +haproxy_package_state: "latest" + +haproxy_whitelist_networks: + - 192.168.0.0/16 + - 172.16.0.0/12 + - 10.0.0.0/8 + +haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}" +haproxy_ssl: false + +internal_lb_vip_address: "192.168.122.222" +external_lb_vip_address: "{{ internal_lb_vip_address }}" diff --git a/xci/installer/osh/playbooks/install-openstack-helm.yml b/xci/installer/osh/playbooks/install-openstack-helm.yml new file mode 100644 index 00000000..a16572a5 --- /dev/null +++ b/xci/installer/osh/playbooks/install-openstack-helm.yml @@ -0,0 +1,24 @@ +--- +- hosts: kube-node + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + roles: + - role: prepare-kube-nodes-osh + +- hosts: opnfv + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + roles: + - role: prepare-opnfvhost-osh + - role: prepare-osh + - role: install-osh-mini + when: xci_flavor == 'mini' + environment: + - CONTAINER_DISTRO_NAME: "{{ container_distro_name }}" + - CONTAINER_DISTRO_VERSION: "{{ container_distro_version }}" + - OPENSTACK_RELEASE: "{{ openstack_osh_version }}" + - role: install-osh-noha + when: xci_flavor == 'noha' diff --git a/xci/installer/osh/playbooks/post-deployment.yml b/xci/installer/osh/playbooks/post-deployment.yml new file mode 100644 index 00000000..5c2f7f36 --- /dev/null +++ b/xci/installer/osh/playbooks/post-deployment.yml @@ -0,0 +1,42 @@ +--- +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 Ericsson AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +- hosts: opnfv + remote_user: root + vars_files: + - "{{ xci_path }}/xci/var/opnfv.yml" + + pre_tasks: + - name: Load distribution variables + include_vars: + file: "{{ item }}" + with_items: + - "{{ xci_path }}/xci/var/{{ ansible_os_family }}.yml" + - name: Set facts for remote deployment + set_fact: + remote_xci_scenario_path: "{{ ansible_env.HOME }}/releng-xci/.cache/repos/scenarios/{{ deploy_scenario }}/scenarios/{{ deploy_scenario }}" + + tasks: + - name: Reload XCI deployment host facts + setup: + filter: ansible_local + gather_subset: "!all" + delegate_to: 127.0.0.1 + + - name: Check if any post-deployment task defined for {{ deploy_scenario }} role + stat: + path: "{{ remote_xci_scenario_path }}/role/{{ deploy_scenario }}/tasks/post-deployment.yml" + register: post_deployment_yml + + - name: Execute post-deployment tasks of {{ deploy_scenario }} role + include_role: + name: "{{ hostvars['opnfv'].ansible_local.xci.scenarios.role }}" + tasks_from: post-deployment + when: + - post_deployment_yml.stat.exists diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml new file mode 100644 index 00000000..e5df54fa --- /dev/null +++ b/xci/installer/osh/playbooks/roles/install-osh-mini/tasks/main.yml @@ -0,0 +1,109 @@ +--- + +- name: Setup Clients + command: ./tools/deployment/common/setup-client.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy the ingress controller + command: ./tools/deployment/component/common/ingress.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy MariaDB + command: ./tools/deployment/component/common/mariadb.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy memcached + command: ./tools/deployment/component/common/memcached.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy RabbitMQ + command: ./tools/deployment/component/common/rabbitmq.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Update nfs-provisioner helm-chart + shell: helm dependency update nfs-provisioner + args: + chdir: /root/repos/openstack-helm-infra + executable: /bin/bash + tags: + - skip_ansible_lint + +- name: Deploy nfs-provisioner + command: ./tools/deployment/component/nfs-provisioner/nfs-provisioner.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Keystone + command: ./tools/deployment/component/keystone/keystone.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Heat + command: ./tools/deployment/component/heat/heat.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Glance + command: ./tools/deployment/component/glance/glance.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy OpenvSwitch + command: ./tools/deployment/component/compute-kit/openvswitch.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Libvirt + command: ./tools/deployment/component/compute-kit/libvirt.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Add br-vxlan as the tunnel interface + lineinfile: + path: /root/repos/openstack-helm/tools/deployment/component/compute-kit/compute-kit.sh + regexp: 'tunnel: docker0' + line: ' tunnel: br-vxlan' + +- name: Deploy Compute Kit (Nova and Neutron) + command: ./tools/deployment/component/compute-kit/compute-kit.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Copy script to the worker node + command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh" + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Setup the gateway to the public network at worker node + command: /root/170-setup-gateway.sh + changed_when: false + delegate_to: node1 + +- name: Add a route from opnfv to worker node for the public network + command: ip route add 172.24.4.0/24 via 192.168.122.4 + changed_when: false + +# Deployment validation +- name: Exercise the cloud + command: ./tools/deployment/developer/common/900-use-it.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm diff --git a/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml new file mode 100644 index 00000000..03c02a83 --- /dev/null +++ b/xci/installer/osh/playbooks/roles/install-osh-mini/vars/main.yml @@ -0,0 +1,18 @@ +--- +# Copyright 2019, SUSE Linux GmbH +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cacheable: yes +container_distro_name: "{{ (osh_distro=='opensuse') | ternary('opensuse', 'ubuntu') }}" +container_distro_version: "{{ (osh_distro=='opensuse') | ternary('15', 'xenial') }}" diff --git a/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml new file mode 100644 index 00000000..befdcfce --- /dev/null +++ b/xci/installer/osh/playbooks/roles/install-osh-noha/tasks/main.yml @@ -0,0 +1,130 @@ +--- +- name: Setup Clients + command: ./tools/deployment/multinode/010-setup-client.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy the ingress controller + command: ./tools/deployment/multinode/020-ingress.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Ceph + command: ./tools/deployment/multinode/030-ceph.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Activate the openstack namespace to be able to use Ceph + command: ./tools/deployment/multinode/040-ceph-ns-activate.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy MariaDB + command: ./tools/deployment/multinode/050-mariadb.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy RabbitMQ + command: ./tools/deployment/multinode/060-rabbitmq.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy memcached + command: ./tools/deployment/multinode/070-memcached.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Keystone + command: ./tools/deployment/multinode/080-keystone.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Horizon + command: ./tools/deployment/multinode/085-horizon.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Rados Gateway for object store + command: ./tools/deployment/multinode/090-ceph-radosgateway.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Glance + command: ./tools/deployment/multinode/100-glance.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Cinder + command: ./tools/deployment/multinode/110-cinder.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy OpenvSwitch + command: ./tools/deployment/multinode/120-openvswitch.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Libvirt + command: ./tools/deployment/multinode/130-libvirt.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Add br-vxlan as the tunnel interface + lineinfile: + path: /root/repos/openstack-helm/tools/deployment/multinode/140-compute-kit.sh + regexp: 'NETWORK_TUNNEL_DEV="$(network_tunnel_dev)"' + line: 'NETWORK_TUNNEL_DEV=br-vxlan' + +- name: Deploy Compute Kit (Nova and Neutron) + command: ./tools/deployment/multinode/140-compute-kit.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Heat + command: ./tools/deployment/multinode/150-heat.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Deploy Barbican + command: ./tools/deployment/multinode/160-barbican.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Copy script to the worker node + command: "scp -o \"StrictHostKeyChecking no\" tools/deployment/developer/ceph/170-setup-gateway.sh root@{{ hostvars.node1.ip }}:170-setup-gateway.sh" + changed_when: false + args: + chdir: /root/repos/openstack-helm + +- name: Setup the gateway to the public network at worker node + command: /root/170-setup-gateway.sh + changed_when: false + delegate_to: node1 + +- name: Add a route from opnfv to worker node for the public network + command: ip route add 172.24.4.0/24 via 192.168.122.4 + changed_when: false + +# Deployment validation +- name: Exercise the cloud + command: ./tools/deployment/developer/common/900-use-it.sh + changed_when: false + args: + chdir: /root/repos/openstack-helm diff --git a/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml new file mode 100644 index 00000000..ff0aff60 --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-kube-nodes-osh/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Install packages in kubernetes nodes + package: + name: "{{ packages }}" + state: present + changed_when: false + vars: + packages: + - ceph-common + - rbd-nbd + - apparmor + - nfs-common diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service new file mode 100644 index 00000000..c3988d6f --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/files/helm-serve.service @@ -0,0 +1,11 @@ +[Unit] +Description=Helm Server +After=network.target + +[Service] +User=root +Restart=always +ExecStart=/usr/bin/helm serve + +[Install] +WantedBy=multi-user.target diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml new file mode 100644 index 00000000..72ae821f --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/tasks/main.yml @@ -0,0 +1,130 @@ +--- +- name: Set kubernetes service account permissions + command: "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default" + changed_when: false + +- name: Set kubernetes node labels + command: "kubectl label nodes {{ item }} {{ node_labels[item]|join(' ') }}" + changed_when: false + with_items: "{{ groups['kube-node'] }}" + +- name: Create directories + file: + path: /root/{{ item }} + state: directory + with_items: + ['repos','tmp', '.helm/repository/local'] + +- name: Rename bifrost clouds file to get it out of precedence + command: "mv .config/openstack/clouds.yaml .config/openstack/clouds.yaml.bifrost" + changed_when: false + +- name: Clone openstack-helm + git: + repo: "{{ osh_git_url }}" + dest: /root/repos/openstack-helm + version: "{{ osh_version }}" + update: true + force: true + register: git_clone + until: git_clone is success + retries: 2 + delay: 5 + +- name: Fix dns nameserver for openstack installation (mini flavor) + lineinfile: + path: /root/repos/openstack-helm/tools/gate/files/heat-public-net-deployment.yaml + regexp: '10\.96\.0\.10' + line: " - 10.233.0.3" + +- name: Fix dns nameserver for openstack installation (noha flavor) + lineinfile: + path: /root/repos/openstack-helm/tempest/values.yaml + regexp: 'dns_servers' + line: " dns_servers: 10.233.0.3" + +- name: Clone openstack-helm-infra + git: + repo: "{{ osh_infra_git_url }}" + dest: /root/repos/openstack-helm-infra + version: "{{ osh_infra_version }}" + update: true + force: true + register: git_clone + until: git_clone is success + retries: 2 + delay: 5 + +- name: Get helm + get_url: + url: "{{ osh_helm_binary_url }}/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz" + dest: tmp + +- name: Uncompress helm package + command: "tar zxvf tmp/helm-{{ osh_helm_binary_version }}-linux-amd64.tar.gz --strip-components=1 -C tmp/" + changed_when: false + tags: + - skip_ansible_lint + +- name: Put helm in system binaries + copy: + src: tmp/helm + dest: /usr/bin/helm + remote_src: yes + mode: 0755 + +- name: Create helm-serve service file + copy: + src: helm-serve.service + dest: "/etc/systemd/system/helm-serve.service" + mode: 0640 + +- name: Start helm-serve service + service: + name: helm-serve + state: started + enabled: yes + +- name: Wait for helm-serve service to start + wait_for: + port: 8879 + host: 127.0.0.1 + +- name: Install pyhelm + pip: + name: pyhelm + +- name: Init helm + command: "helm init" + changed_when: false + +- name: Remove stable (external) service from helm + command: "helm repo remove stable" + changed_when: false + +- name: Add local repositories service to helm + command: "helm repo add local http://localhost:8879/charts" + changed_when: false + +- name: Make charts from infra + make: + chdir: /root/repos/openstack-helm-infra + target: "{{ item }}" + with_items: + - helm-toolkit + - ingress + - mariadb + - rabbitmq + - memcached + - ceph-mon + - ceph-osd + - ceph-client + - ceph-provisioners + - ceph-rgw + - openvswitch + - libvirt + +- name: Install packages + package: + name: "{{ required_packages }}" + state: present diff --git a/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml new file mode 100644 index 00000000..979c3329 --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-opnfvhost-osh/vars/main.yml @@ -0,0 +1,31 @@ +--- +required_packages: +- patch +- ipcalc +- jq +- nmap +- bc + +node_labels: + node1: + - openstack-control-plane=enabled + - openstack-compute-node={{ (xci_flavor == 'mini') | ternary('enabled', 'disable') }} + - openstack-helm-node-class=primary + - openvswitch=enabled + - linuxbridge=enabled + - ceph-mon=enabled + - ceph-osd=enabled + - ceph-mds=enabled + - ceph-mgr=enabled + - ceph-rgw=enabled + node2: + - openstack-control-plane={{ (xci_flavor == 'noha') | ternary('disable', 'enabled') }} + - openstack-compute-node=enabled + - openstack-helm-node-class=secondary + - openvswitch=enabled + - linuxbridge=enabled + - ceph-mon=enabled + - ceph-osd=enabled + - ceph-mds=enabled + - ceph-mgr=enabled + - ceph-rgw=enabled diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml new file mode 100644 index 00000000..453a815c --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-osh/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: Write new resolv.conf file + template: + src: resolv.conf.j2 + dest: /etc/resolv.conf + +- name: Make resolv.conf immutable + shell: "chattr +i /etc/resolv.conf" + changed_when: false + args: + executable: /bin/bash + tags: + - skip_ansible_lint + +#TODO Fetch the value from a file generated by k8s deployer +- name: Get kube service addresses + shell: "grep -r 'kube_service_addresses:' /root/releng-xci/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml | awk '{print $2}'" + changed_when: false + args: + executable: /bin/bash + register: kube_service_addresses + tags: + - skip_ansible_lint + +#This rule allows openstack client in OPNFV VM to reach openstack +- name: Update routing table with kube service addresses + shell: "ip route add {{ kube_service_addresses.stdout }} via 192.168.122.3 dev br-vlan onlink" + changed_when: false + args: + executable: /bin/bash + tags: + - skip_ansible_lint + diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 new file mode 100644 index 00000000..ae706e02 --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-osh/templates/resolv.conf.j2 @@ -0,0 +1,4 @@ +{{ dns_var }} +{% for nameserver in external_dns_nameservers %} +nameserver {{ nameserver }} +{% endfor %} diff --git a/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml new file mode 100644 index 00000000..4d6f9cbb --- /dev/null +++ b/xci/installer/osh/playbooks/roles/prepare-osh/vars/main.yml @@ -0,0 +1,7 @@ +--- +kube_dns_ip: "10.233.0.3" +external_dns_nameservers: +- '{{kube_dns_ip}}' +- '192.168.122.1' +dns_var: "search svc.cluster.local cluster.local" + diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml index f5fc62df..98abf528 100644 --- a/xci/opnfv-scenario-requirements.yml +++ b/xci/opnfv-scenario-requirements.yml @@ -28,6 +28,14 @@ - opensuse - ubuntu - centos + - installer: osh + flavors: + - mini + - noha + distros: + - ubuntu + - opensuse + - ubuntu-bionic - scenario: os-nosdn-osm scm: git @@ -71,6 +79,20 @@ - opensuse - ubuntu +- scenario: os-odl-sfc_osm + scm: git + src: https://gerrit.opnfv.org/gerrit/sfc + version: master + role: scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm + installers: + - installer: osa + flavors: + - ha + - mini + - noha + distros: + - ubuntu + - scenario: os-odl-bgpvpn scm: git src: https://gerrit.opnfv.org/gerrit/sdnvpn @@ -135,6 +157,14 @@ - ubuntu - centos - opensuse + - installer: osh + flavors: + - mini + - noha + distros: + - ubuntu + - opensuse + - ubuntu-bionic - scenario: k8-flannel-nofeature scm: git diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml index 5b64c785..7aab18f3 100644 --- a/xci/playbooks/configure-localhost.yml +++ b/xci/playbooks/configure-localhost.yml @@ -46,21 +46,21 @@ repo: "{{ kubespray_git_url }}" dest: "{{ xci_cache }}/repos/kubespray" version: "{{ kubespray_version }}" - when: installer_type == "kubespray" + when: installer_type in ["kubespray", "osh"] - role: clone-repository project: "openstack/openstack-ansible-haproxy_server" repo: "{{ openstack_osa_haproxy_git_url }}" dest: roles/haproxy_server version: "{{ haproxy_version }}" when: - - installer_type == "kubespray" + - installer_type == "kubespray" or installer_type == "osh" - role: clone-repository project: "ansible-keepalived" repo: "{{ keepalived_git_url }}" dest: roles/keepalived version: "{{ keepalived_version }}" when: - - installer_type == "kubespray" + - installer_type == "kubespray" or installer_type == "osh" tasks: - name: create log directory {{log_path}} @@ -99,3 +99,18 @@ args: executable: /bin/bash creates: "{{ xci_path }}/.cache/xci.env" + + #TODO: Create an Ansible variable for + # kube_service_addresses(10.233.0.0/18) + - name: Update iptables + command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 10.233.0.0/18 -j RETURN" + become: true + tags: + - skip_ansible_lint + + #Provide access to the external network (for tests) + - name: Update iptables + command: "iptables -t nat -I POSTROUTING 3 -s 192.168.122.0/24 -d 172.24.4.0/24 -j RETURN" + become: true + tags: + - skip_ansible_lint diff --git a/xci/playbooks/dynamic_inventory.py b/xci/playbooks/dynamic_inventory.py index 7f60f94e..ed63141c 100755 --- a/xci/playbooks/dynamic_inventory.py +++ b/xci/playbooks/dynamic_inventory.py @@ -21,6 +21,12 @@ import json class XCIInventory(object): + """ + + Generates the ansible inventory based on the idf and pdf files provided + when executing the deployment script + + """ def __init__(self): super(XCIInventory, self).__init__() self.inventory = {} @@ -47,7 +53,7 @@ class XCIInventory(object): self.opnfv_networks['opnfv']['public'] = {} self.opnfv_networks['opnfv']['public']['address'] = '192.168.122.2/24' self.opnfv_networks['opnfv']['public']['gateway'] = '192.168.122.1' - self.opnfv_networks['opnfv']['public']['dns'] = '192.168.122.1' + self.opnfv_networks['opnfv']['public']['dns'] = ['192.168.122.1'] self.opnfv_networks['opnfv']['private'] = {} self.opnfv_networks['opnfv']['private']['address'] = '172.29.240.10/22' self.opnfv_networks['opnfv']['storage'] = {} @@ -121,8 +127,8 @@ class XCIInventory(object): for role in idf['xci']['installers'][self.installer]['nodes_roles'][host]: self.add_to_group(role, hostname) - pdf_host_info = filter(lambda x: x['name'] == host, pdf['nodes'])[0] - native_vlan_if = filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces']) + pdf_host_info = list(filter(lambda x: x['name'] == host, pdf['nodes']))[0] + native_vlan_if = list(filter(lambda x: x['vlan'] == 'native', pdf_host_info['interfaces'])) self.add_hostvar(hostname, 'ansible_host', native_vlan_if[0]['address']) self.add_hostvar(hostname, 'ip', native_vlan_if[0]['address']) host_networks[hostname] = {} @@ -134,7 +140,9 @@ class XCIInventory(object): if 'gateway' in ndata.keys(): host_networks[hostname][network]['gateway'] = str(ndata['gateway']) + "/" + str(ndata['mask']) if 'dns' in ndata.keys(): - host_networks[hostname][network]['dns'] = str(ndata['dns']) + host_networks[hostname][network]['dns'] = [] + for d in ndata['dns']: + host_networks[hostname][network]['dns'].append(str(d)) # Get also vlan and mac_address from pdf host_networks[hostname][network]['mac_address'] = str(pdf_host_info['interfaces'][int(network_interface_num)]['mac_address']) @@ -165,7 +173,8 @@ class XCIInventory(object): # Now add the additional groups for parent in idf['xci']['installers'][self.installer]['groups'].keys(): - map(lambda x: self.add_group(x, parent), idf['xci']['installers'][self.installer]['groups'][parent]) + for host in idf['xci']['installers'][self.installer]['groups'][parent]: + self.add_group(host, parent) # Read additional group variables self.read_additional_group_vars() diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml index f2a138f8..176c7eb1 100644 --- a/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml +++ b/xci/playbooks/roles/bootstrap-host/tasks/network_debian.yml @@ -51,8 +51,48 @@ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" } loop_control: label: "{{ item.name }}" + when: baremetal | bool != true + + +- name: "Configure baremetal networking for blade: {{ inventory_hostname }}" + template: + src: "{{ installer_type }}/debian.interface.j2" + dest: "/etc/network/interfaces.d/{{ item.name }}.cfg" + with_items: + - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" } + - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" } + - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" } + - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" } + - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" } + - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" } + - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" } + - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" } + - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" } + loop_control: + label: "{{ item.name }}" + when: + - baremetal | bool == true + - "'opnfv' not in inventory_hostname" + +- name: "Configure baremetal networking for VM: {{ inventory_hostname }}" + template: + src: "{{ installer_type }}/debian.interface.j2" + dest: "/etc/network/interfaces.d/{{ item.name }}.cfg" + with_items: + - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" } + - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" } + - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" } + - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" } + loop_control: + label: "{{ item.name }}" + when: + - baremetal | bool == true + - "'opnfv' in inventory_hostname" - name: restart network service - shell: "/sbin/ifconfig {{ ansible_local.xci.network.xci_interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a" + shell: "/sbin/ip addr flush dev {{ item }}; /sbin/ifdown -a; /sbin/ifup -a" async: 15 poll: 0 + with_items: + - "{{ public_interface }}" + - "{{ mgmt_interface }}" diff --git a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml index 569644bf..a8f1bf59 100644 --- a/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml +++ b/xci/playbooks/roles/bootstrap-host/tasks/network_suse.yml @@ -23,6 +23,46 @@ - { name: "br-storage", bridge_ports: "{{ ansible_local.xci.network.xci_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" } loop_control: label: "{{ item.name }}" + when: baremetal | bool != true + +- name: "Configure baremetal networking for blade: {{ inventory_hostname }}" + template: + src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2" + dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}" + with_items: + - { name: "{{ admin_interface }}", network: "{{ host_info[inventory_hostname].admin }}" } + - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" } + - { name: "{{ storage_interface }}", vlan_id: "{{ (storage_vlan == 'native') | ternary(omit, storage_vlan) }}" } + - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" } + - { name: "{{ private_interface }}", vlan_id: "{{ (private_vlan == 'native') | ternary(omit, private_vlan) }}" } + - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" } + - { name: "br-vxlan", bridge_ports: "{{ private_interface }}", network: "{{ host_info[inventory_hostname].private }}" } + - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" } + - { name: "br-storage", bridge_ports: "{{ storage_interface }}", network: "{{ host_info[inventory_hostname].storage }}" } + loop_control: + label: "{{ item.name }}" + when: + - baremetal | bool == true + - "'opnfv' not in inventory_hostname" + +- name: "Configure baremetal networking for VM: {{ inventory_hostname }}" + template: + src: "{{ installer_type }}/{{ ansible_os_family | lower }}.interface.j2" + dest: "/etc/sysconfig/network/ifcfg-{{ item.name }}" + with_items: + - { name: "{{ mgmt_interface }}", vlan_id: "{{ (mgmt_vlan == 'native') | ternary(omit, mgmt_vlan) }}" } + - { name: "{{ mgmt_interface }}.30", vlan_id: 30 } + - { name: "{{ mgmt_interface }}.20", vlan_id: 20 } + - { name: "{{ public_interface }}", vlan_id: "{{ (public_vlan == 'native') | ternary(omit, public_vlan) }}" } + - { name: "br-mgmt", bridge_ports: "{{ mgmt_interface }}", network: "{{ host_info[inventory_hostname].mgmt }}" } + - { name: "br-vlan", bridge_ports: "{{ public_interface }}", network: "{{ host_info[inventory_hostname].public }}" } + - { name: "br-vxlan", bridge_ports: "{{ mgmt_interface }}.30", network: "{{ host_info[inventory_hostname].private }}" } + - { name: "br-storage", bridge_ports: "{{ mgmt_interface }}.20", network: "{{ host_info[inventory_hostname].storage }}" } + loop_control: + label: "{{ item.name }}" + when: + - baremetal | bool == true + - "'opnfv' in inventory_hostname" - name: Add postup/postdown scripts on SUSE copy: @@ -33,7 +73,7 @@ - name: Configure static DNS on SUSE lineinfile: regexp: '^NETCONFIG_DNS_STATIC_SERVERS=.*' - line: "NETCONFIG_DNS_STATIC_SERVERS={{ host_info[inventory_hostname]['public']['dns'] }}" + line: "NETCONFIG_DNS_STATIC_SERVERS=\"{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}\"" path: "/etc/sysconfig/network/config" state: present when: host_info[inventory_hostname]['public']['dns'] is defined diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 index f9e4d8df..2f976002 100644 --- a/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 +++ b/xci/playbooks/roles/bootstrap-host/templates/osa/debian.interface.j2 @@ -33,7 +33,7 @@ iface {{ item.name }} inet static gateway {{ item.network.gateway | ipaddr('address') }} {% endif %} {% if item.network is defined and item.network.dns is defined %} - dns-nameservers {{ item.network.dns }} + dns-nameservers {{ item.network.dns | join(' ') }} {% endif %} {% endif %} diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 index 3a51eb86..525686d9 100644 --- a/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 +++ b/xci/playbooks/roles/bootstrap-host/templates/osa/redhat.interface.j2 @@ -21,6 +21,6 @@ IPADDR={{ item.network.address }} GATEWAY="{{ host_info[inventory_hostname]['public']['gateway'] | ipaddr('address') }}" {% endif %} {% if item.network is defined and item.network.dns is defined %} -DNS="{{ host_info[inventory_hostname]['public']['dns'] }}" +DNS="{{ host_info[inventory_hostname]['public']['dns'] | join(' ') }}" {% endif %} {% endif %} diff --git a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 index 70811a09..7c2929d6 100644 --- a/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 +++ b/xci/playbooks/roles/bootstrap-host/templates/osa/suse.interface.j2 @@ -1,8 +1,7 @@ STARTMODE='auto' BOOTPROTO='static' {% if item.vlan_id is defined %} -ETHERDEVICE={{ ansible_default_ipv4.interface }} -VLAN_ID={{ item.vlan_id }} +ETHERDEVICE={{ item.name.split('.')[0] }} {% endif %} {% if item.bridge_ports is defined %} BRIDGE='yes' diff --git a/xci/playbooks/roles/bootstrap-host/templates/osh b/xci/playbooks/roles/bootstrap-host/templates/osh new file mode 120000 index 00000000..f820fd11 --- /dev/null +++ b/xci/playbooks/roles/bootstrap-host/templates/osh @@ -0,0 +1 @@ +osa
\ No newline at end of file diff --git a/xci/playbooks/roles/bootstrap-host/vars/main.yml b/xci/playbooks/roles/bootstrap-host/vars/main.yml new file mode 100644 index 00000000..1730ad57 --- /dev/null +++ b/xci/playbooks/roles/bootstrap-host/vars/main.yml @@ -0,0 +1,70 @@ +--- +# admin network information +admin_mac: "{{ host_info[inventory_hostname].admin.mac_address }}" +admin_interface: >- + {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%} + {%- if x.macaddress == admin_mac -%} + {%- if admin_vlan == 'native' -%} + {{ x.device }} + {%- else -%} + {{ x.device }}.{{ admin_vlan }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +admin_vlan: "{{ host_info[inventory_hostname].admin.vlan }}" + +# mgmt network information +mgmt_mac: "{{ host_info[inventory_hostname].mgmt.mac_address }}" +mgmt_interface: >- + {% for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%} + {%- if x.macaddress == mgmt_mac -%} + {%- if mgmt_vlan == 'native' -%} + {{ x.device }} + {%- else -%} + {{ x.device }}.{{ mgmt_vlan }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +mgmt_vlan: "{{ host_info[inventory_hostname].mgmt.vlan }}" + +# storage network information +storage_mac: "{{ host_info[inventory_hostname].storage.mac_address }}" +storage_interface: >- + {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%} + {%- if x.macaddress == storage_mac -%} + {%- if storage_vlan == 'native' -%} + {{ x.device }} + {%- else -%} + {{ x.device }}.{{ storage_vlan }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +storage_vlan: "{{ host_info[inventory_hostname].storage.vlan }}" + +# public vlan netwrk information +public_mac: "{{ host_info[inventory_hostname].public.mac_address }}" +public_interface: >- + {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%} + {%- if x.macaddress == public_mac -%} + {%- if public_vlan == 'native' -%} + {{ x.device }} + {%- else -%} + {{ x.device }}.{{ public_vlan }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +public_vlan: "{{ host_info[inventory_hostname].public.vlan }}" + +# private vxlan network information +private_mac: "{{ host_info[inventory_hostname].private.mac_address }}" +private_interface: >- + {%- for x in (ansible_interfaces | map('regex_replace', '-', '_') | map('regex_replace', '^', 'ansible_') | map('extract', hostvars[inventory_hostname]) | selectattr('macaddress','defined')) -%} + {%- if x.macaddress == private_mac -%} + {%- if private_vlan == 'native' -%} + {{ x.device }} + {%- else -%} + {{x.device}}.{{ private_vlan }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +private_vlan: "{{ host_info[inventory_hostname].private.vlan }}" diff --git a/xci/playbooks/roles/create-nodes/README.md b/xci/playbooks/roles/create-nodes/README.md index bf079b9e..bf190296 100644 --- a/xci/playbooks/roles/create-nodes/README.md +++ b/xci/playbooks/roles/create-nodes/README.md @@ -71,7 +71,7 @@ vm_disk_cache: Disk cache mode to use by VMs disk. if that is not set, to 'writeback'. node_names: Space-separated names for nodes to be created. - Defaults to shell variable 'NODE_NAMES'. + It is taken from the hostnames variable in idf. If not set, VM names will be autogenerated. Note that independent on the number of names in this list, at most 'test_vm_num_nodes' VMs will be created. diff --git a/xci/playbooks/roles/create-nodes/defaults/main.yml b/xci/playbooks/roles/create-nodes/defaults/main.yml index 02a429cf..889f9c10 100644 --- a/xci/playbooks/roles/create-nodes/defaults/main.yml +++ b/xci/playbooks/roles/create-nodes/defaults/main.yml @@ -5,7 +5,6 @@ baremetal_json_file: '/tmp/baremetal.json' # We collect these parameters from the pdf vm_nic: "virtio" vm_disk_cache: unsafe -node_names: "{{ lookup('env', 'NODE_NAMES').split() }}" node_groups: {} node_default_groups: "{{ lookup('env', 'DEFAULT_HOST_GROUPS').split() | default(['baremetal'], true) }}" diff --git a/xci/playbooks/roles/create-nodes/files/virtualbmc.conf b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf new file mode 100644 index 00000000..f8351dc1 --- /dev/null +++ b/xci/playbooks/roles/create-nodes/files/virtualbmc.conf @@ -0,0 +1,3 @@ +[log] +logfile: /var/log/vbmc.log +debug: true diff --git a/xci/playbooks/roles/create-nodes/tasks/barematalhoststojson.yml b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml index 070221fd..ef6ec345 100644 --- a/xci/playbooks/roles/create-nodes/tasks/barematalhoststojson.yml +++ b/xci/playbooks/roles/create-nodes/tasks/baremetalhoststojson.yml @@ -29,11 +29,23 @@ block: - set_fact: + node_name: "{{ idf.kubespray.hostnames[item.name] }}" + when: installer_type == "kubespray" + + - set_fact: + node_name: "{{ idf.osa.hostnames[item.name] }}" + when: installer_type == "osa" + + - set_fact: + node_name: "{{ idf.osh.hostnames[item.name] }}" + when: installer_type == "osh" + + - set_fact: host_group: "{{ node_default_groups }}" - set_fact: - host_group: "{{ node_default_groups | union(node_groups[item.name]) }}" - when: node_groups[item.name] is defined + host_group: "{{ node_default_groups | union(node_groups[node_name]) }}" + when: node_groups[node_name] is defined - name: BAREMETAL - Fetch the ip set_fact: @@ -46,10 +58,10 @@ - name: BAREMETAL - set the json entry for baremetal nodes set_fact: node_data: - name: "{{ item.name }}" - uuid: "{{ item.name | to_uuid }}" - host_groups: "{{ vm_host_group }}" - driver: "{{ vm_node_driver|default('ipmi') }}" + name: "{{ node_name }}" + uuid: "{{ node_name | to_uuid }}" + host_groups: "{{ host_group }}" + driver: "ipmi" driver_info: power: ipmi_address: "{{ item.remote_management.address }}" @@ -74,6 +86,6 @@ - name: BAREMETAL - add created node info set_fact: - nodes_json_data: "{{ nodes_json_data | combine({item.name: node_data}) }}" + nodes_json_data: "{{ nodes_json_data | combine({node_name: node_data}) }}" when: (num_nodes | int) > (nodes_json_data | length | int) + 1 diff --git a/xci/playbooks/roles/create-nodes/tasks/create_vm.yml b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml index 7e1a745a..ac55bf32 100644 --- a/xci/playbooks/roles/create-nodes/tasks/create_vm.yml +++ b/xci/playbooks/roles/create-nodes/tasks/create_vm.yml @@ -2,7 +2,16 @@ - name: "Creating VM" block: - set_fact: - vm_name: "{{ node_names[item.0 | int] }}" + vm_name: "{{ idf.kubespray.hostnames[item.1.name] }}" + when: installer_type == "kubespray" + + - set_fact: + vm_name: "{{ idf.osa.hostnames[item.1.name] }}" + when: installer_type == "osa" + + - set_fact: + vm_name: "{{ idf.osh.hostnames[item.1.name] }}" + when: installer_type == "osh" - set_fact: vm_log_file: "{{ node_logdir }}/{{ vm_name }}_console.log" @@ -159,7 +168,7 @@ name: "{{ vm_name }}" uuid: "{{ vm_name | to_uuid }}" host_groups: "{{ vm_host_group }}" - driver: "{{ vm_node_driver|default('ipmi') }}" + driver: "ipmi" driver_info: power: ipmi_address: "192.168.122.1" diff --git a/xci/playbooks/roles/create-nodes/tasks/main.yml b/xci/playbooks/roles/create-nodes/tasks/main.yml index 0e51b411..607ac494 100644 --- a/xci/playbooks/roles/create-nodes/tasks/main.yml +++ b/xci/playbooks/roles/create-nodes/tasks/main.yml @@ -8,6 +8,8 @@ - name: "Install required packages" package: name: "{{ required_packages }}" + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" + state: present - include_tasks: prepare_libvirt.yml with_items: "{{ libvirt_networks }}" @@ -22,7 +24,7 @@ - include_tasks: create_vm.yml with_indexed_items: "{{ vms_to_create }}" -- include_tasks: barematalhoststojson.yml +- include_tasks: baremetalhoststojson.yml with_items: "{{ baremetal_nodes }}" - name: Start the opnfv vm diff --git a/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml index faf19a6f..06afaec3 100644 --- a/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml +++ b/xci/playbooks/roles/create-nodes/tasks/prepare_libvirt.yml @@ -125,5 +125,15 @@ - name: install virtualbmc pip: name: virtualbmc - version: 1.3 # >1.3 needs zmq dependency. + version: 1.5 # >1.3 needs zmq dependency. virtualenv: "{{ lookup('env', 'XCI_VENV') }}" + +- name: Create directory for the config of vbmc + file: + path: /etc/virtualbmc + state: directory + +- name: Place the config for virtualbmc + copy: + src: virtualbmc.conf + dest: /etc/virtualbmc/virtualbmc.conf diff --git a/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 index 6061fc52..9fad42b8 100644 --- a/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 +++ b/xci/playbooks/roles/create-nodes/templates/vm.xml.j2 @@ -59,19 +59,9 @@ </video> <serial type='file'> <source path='{{ vm_log_file }}'/> - <target port='0'/> - <alias name='serial0'/> - </serial> - <serial type='pty'> - <source path='/dev/pts/49'/> <target port='1'/> <alias name='serial1'/> </serial> - <console type='file'> - <source path='{{ vm_log_file }}'/> - <target type='serial' port='0'/> - <alias name='serial0'/> - </console> <memballoon model='virtio'> <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/> </memballoon> diff --git a/xci/playbooks/roles/prepare-tests/tasks/main.yml b/xci/playbooks/roles/prepare-tests/tasks/main.yml index 45a23a3a..a543ac1f 100644 --- a/xci/playbooks/roles/prepare-tests/tasks/main.yml +++ b/xci/playbooks/roles/prepare-tests/tasks/main.yml @@ -11,6 +11,7 @@ - name: install required packages package: name: "{{ required_packages[ansible_pkg_mgr] }}" + update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}" state: present # Docker is needed for test frameworks @@ -26,6 +27,10 @@ state: present extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt' +# odl scenarios require to add odl variables to env +- include_tasks: process_neutron_conf.yml + when: "'-odl-' in deploy_scenario" + - name: prepare environment file for tests template: src: env.j2 diff --git a/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml new file mode 100644 index 00000000..45608df3 --- /dev/null +++ b/xci/playbooks/roles/prepare-tests/tasks/process_neutron_conf.yml @@ -0,0 +1,19 @@ +--- +- name: Collecting ODL variables + block: + - name: Fetch odl_password variable + shell: "cat /tmp/ml2_conf.ini | grep password | cut -d ' ' -f3" + register: odl_password + + - name: Fetch odl_username variable + shell: "cat /tmp/ml2_conf.ini | grep username | cut -d ' ' -f3" + register: odl_username + + - name: Fetch odl_port variable + shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f3 | cut -d '/' -f1" + register: odl_port + + - name: Fetch odl_ip variable + shell: "cat /tmp/ml2_conf.ini | grep url | cut -d ':' -f2 | cut -d '/' -f3" + register: odl_ip + when: "'-odl-' in deploy_scenario" diff --git a/xci/playbooks/roles/prepare-tests/templates/env.j2 b/xci/playbooks/roles/prepare-tests/templates/env.j2 index d9a3bf32..d4f8f86c 100644 --- a/xci/playbooks/roles/prepare-tests/templates/env.j2 +++ b/xci/playbooks/roles/prepare-tests/templates/env.j2 @@ -5,3 +5,11 @@ ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources {% if 'os-' in deploy_scenario %} EXTERNAL_NETWORK={{ external_network }} {% endif %} +{% if '-odl-' in deploy_scenario %} +SDN_CONTROLLER_IP={{ odl_ip.stdout }} +SDN_CONTROLLER_USER={{ odl_username.stdout }} +SDN_CONTROLLER_PASSWORD={{ odl_password.stdout }} +SDN_CONTROLLER_RESTCONFPORT={{ odl_port.stdout }} +SDN_CONTROLLER_WEBPORT={{ odl_port.stdout }} +{% endif %} + diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh index 9eab0e5a..965cfe4c 100755 --- a/xci/scripts/vm/start-new-vm.sh +++ b/xci/scripts/vm/start-new-vm.sh @@ -119,14 +119,14 @@ COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-ma case ${ID,,} in *suse*) - pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu" + pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu > /dev/null" ;; centos) - pkg_mgr_cmd="sudo yum install -C -q -y epel-release" - pkg_mgr_cmd+=" && sudo yum install -C -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm" + pkg_mgr_cmd="sudo yum install -C -q -y epel-release > /dev/null" + pkg_mgr_cmd+=" && sudo yum install -C -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm > /dev/null" ;; ubuntu) - pkg_mgr_cmd="sudo apt-get install --no-upgrade -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io" + pkg_mgr_cmd="sudo apt-get install --no-upgrade -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io > /dev/null" ;; esac @@ -367,7 +367,7 @@ if [[ $? != 0 ]]; then #!/bin/bash set -o pipefail export XCI_FLAVOR=mini -export BIFROST_USE_PREBUILT_IMAGES=true +export BIFROST_CREATE_IMAGE_VIA_DIB=false cd ~/releng-xci/xci ./xci-deploy.sh | ts EOF diff --git a/xci/var/ericsson-idf-pod2.yml b/xci/var/ericsson-pod2-idf.yml index 8d20ae34..2839b120 100644 --- a/xci/var/ericsson-idf-pod2.yml +++ b/xci/var/ericsson-pod2-idf.yml @@ -9,7 +9,7 @@ ### ERICSSON POD 2 installer descriptor file ### idf: version: 0.1 - installer: ['apex', 'compass4nfv', 'daisy', 'osa'] + installer: ['apex', 'compass4nfv', 'daisy', 'osa', 'osh'] net_config: &net_config admin: interface: 2 @@ -73,6 +73,43 @@ idf: - 'ens1f1' - 'ens2f0' - 'ens2f1' + osh: &idf_osh + nodes_roles: + opnvf: [opnfv] + node1: [kube-master, etcd, vault] + node2: [kube-node] + node3: [kube-node] + node4: [kube-master, etcd, vault] + node5: [kube-master, etcd, vault] + groups: + k8s-cluster: + - kube-node + - kube-master + hostnames: + opnfv: opnfv + node1: master1 + node2: node1 + node3: node2 + node4: master2 + node5: master3 + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: admin + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public + deployment_host_interfaces: + # Ordered-list, index should be in sync with interface index in PDF + - 'ens1f1' #should be eno49 but it is currently broken + - 'ens1f0' + - 'ens1f1' + - 'ens2f0' + - 'ens2f1' kubespray: &idf_kubespray nodes_roles: opnvf: [opnfv] @@ -92,8 +129,25 @@ idf: node3: node2 node4: master2 node5: master3 - - + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: admin + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public + deployment_host_interfaces: + # Ordered-list, index should be in sync with interface index in PDF + - 'ens1f1' #should be eno49 but it is currently broken + - 'ens1f0' + - 'ens1f1' + - 'ens2f0' + - 'ens2f1' + xci: pod_name: pod1 net_config: *net_config @@ -130,3 +184,4 @@ xci: installers: osa: *idf_osa kubespray: *idf_kubespray + osh: *idf_osh diff --git a/xci/var/ericsson-pdf-pod2.yml b/xci/var/ericsson-pod2-pdf.yml index 4c7271ec..4c7271ec 100644 --- a/xci/var/ericsson-pdf-pod2.yml +++ b/xci/var/ericsson-pod2-pdf.yml diff --git a/xci/var/idf.yml b/xci/var/idf.yml index 7f20c3bf..8ed55f6f 100644 --- a/xci/var/idf.yml +++ b/xci/var/idf.yml @@ -29,7 +29,8 @@ idf: network: 192.168.122.0 mask: 24 gateway: 192.168.122.1 - dns: 192.168.122.1 + dns: + - 192.168.122.1 private: interface: 3 network: 172.29.244.0 @@ -95,6 +96,36 @@ idf: net-internal: private # Public network for VNF remote acces (ext-net in Openstack) net-vnf: public + osh: &idf_osh + nodes_roles: + opnfv: [opnfv] + node1: [kube-master, etcd, vault] + node2: [kube-node] + node3: [kube-node] + node4: [kube-master, etcd, vault] + node5: [kube-master, etcd, vault] + groups: + k8s-cluster: + - kube-node + - kube-master + hostnames: + opnfv: opnfv + node1: master1 + node2: node1 + node3: node2 + node4: master2 + node5: master3 + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: mgmt + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public xci: pod_name: vpod1 net_config: *net_config @@ -130,3 +161,4 @@ xci: installers: osa: *idf_osa kubespray: *idf_kubespray + osh: *idf_osh diff --git a/xci/var/lf-pod4-idf.yml b/xci/var/lf-pod4-idf.yml new file mode 100644 index 00000000..55ca6b63 --- /dev/null +++ b/xci/var/lf-pod4-idf.yml @@ -0,0 +1,222 @@ +############################################################################## +# Copyright (c) 2018 Linux Foundation, Enea AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +### LF POD 4 installer descriptor file ### + +idf: + version: 0.1 + installer: ['apex', 'compass4nfv', 'daisy', 'fuel', 'osa', 'osh'] + net_config: &net_config + oob: + interface: 0 + ip-range: 172.30.8.83-172.30.8.88 + vlan: 410 + mask: 24 + admin: + interface: 0 + vlan: native + network: 192.168.122.0 + gateway: 192.168.122.1 + dns: 8.8.8.8 + mask: 24 + mgmt: + interface: 1 + network: 172.29.236.0 + mask: 22 + storage: + interface: 3 + network: 172.29.240.0 + mask: 24 + private: + interface: 2 + network: 172.29.242.0 + mask: 24 + public: + interface: 4 + network: 192.168.122.0 + mask: 24 + gateway: 192.168.122.1 + dns: + - 8.8.8.8 + - 8.8.4.4 + osa: &idf_osa + nodes_roles: + opnfv: [deployment] + pod4-node1: [controller] + pod4-node2: [compute, storage] + pod4-node3: [compute, storage] + pod4-node4: [controller] + pod4-node5: [controller] + groups: + openstack: + - controller + - compute + - storage + hostnames: + opnfv: opnfv + pod4-node1: controller00 + pod4-node2: compute00 + pod4-node3: compute01 + pod4-node4: controller01 + pod4-node5: controller02 + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: admin + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public + deployment_host_interfaces: + # Ordered-list, index should be in sync with interface index in PDF + - 'eno1' + - 'eno3.450' + osh: &idf_osh + nodes_roles: + opnvf: [opnfv] + pod4-node1: [kube-master, etcd, vault] + pod4-node2: [kube-node] + pod4-node3: [kube-node] + pod4-node4: [kube-master, etcd, vault] + pod4-node5: [kube-master, etcd, vault] + groups: + k8s-cluster: + - kube-node + - kube-master + hostnames: + opnfv: opnfv + pod4-node1: master1 + pod4-node2: node1 + pod4-node3: node2 + pod4-node4: master2 + pod4-node5: master3 + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: admin + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public + deployment_host_interfaces: + # Ordered-list, index should be in sync with interface index in PDF + - 'eno1' + - 'eno3.450' + kubespray: &idf_kubespray + nodes_roles: + opnvf: [opnfv] + pod4-node1: [kube-master, etcd, vault] + pod4-node2: [kube-node] + pod4-node3: [kube-node] + pod4-node4: [kube-master, etcd, vault] + pod4-node5: [kube-master, etcd, vault] + groups: + k8s-cluster: + - kube-node + - kube-master + hostnames: + opnfv: opnfv + pod4-node1: master1 + pod4-node2: node1 + pod4-node3: node2 + pod4-node4: master2 + pod4-node5: master3 + network: + # network mapping + network_mapping: + # Management network used by installer components to communicate + net-mgmt: admin + # Storage Network + net-storage: storage + # Internal network for communication between VNF + net-internal: private + # Public network for VNF remote acces (ext-net in Openstack) + net-vnf: public + deployment_host_interfaces: + # Ordered-list, index should be in sync with interface index in PDF + - 'eno1' + - 'eno3.450' + fuel: + jumphost: + bridges: + admin: 'pxebr' + mgmt: 'br-ctl' + private: ~ + public: ~ + network: + node: + # Ordered-list, index should be in sync with node index in PDF + - interfaces: &interfaces + # Ordered-list, index should be in sync with interface index in PDF + - 'eno1' + - 'eno3' + - 'eno4' + busaddr: &busaddr + # Bus-info reported by `ethtool -i ethX` + - '0000:04:00.0' + - '0000:02:00.0' + - '0000:02:00.1' + - interfaces: *interfaces + busaddr: *busaddr + - interfaces: *interfaces + busaddr: *busaddr + - interfaces: *interfaces + busaddr: *busaddr + - interfaces: *interfaces + busaddr: *busaddr +xci: + pod_name: lf-pod4 + net_config: *net_config + nodes_roles: + opnfv_host: [opnfv_host] + pod4-node1: [compute, storage] + pod4-node2: [compute, storage] + pod4-node3: [controller, storage] + pod4-node4: [controller, storage] + pod4-node5: [controller, storage] + + # net_config network to be used by the PXE + pxe_network: admin + + # As the MAC of generated bridges are generated, we use a list of local + # bridges to create libvirt networks + jumphost_interfaces_bridges: + - name: br_admin + ip: + + extra_addresses: + opnfv_host: 192.168.12.2 + + flavors: + mini: + - opnfv + - pod4-node1 + - pod4-node2 + noha: + - opnfv + - pod4-node1 + - pod4-node2 + - pod4-node3 + ha: + - opnfv + - pod4-node1 + - pod4-node2 + - pod4-node3 + - pod4-node4 + - pod4-node5 + + installers: + osa: *idf_osa + kubespray: *idf_kubespray + osh: *idf_osh diff --git a/xci/var/lf-pod4-pdf.yml b/xci/var/lf-pod4-pdf.yml new file mode 100644 index 00000000..9607e4db --- /dev/null +++ b/xci/var/lf-pod4-pdf.yml @@ -0,0 +1,198 @@ +############################################################################## +# Copyright (c) 2018 Linux Foundation, Enea AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +### LF POD 4 descriptor file ### + +version: 1.0 +details: + pod_owner: Trevor Bramwell + contact: tbramwell@linuxfoundation.org + lab: Linux Foundation + location: Portland, Oregon, USA + type: development + link: https://wiki.opnfv.org/display/pharos/LF+POD+4 +jumphost: + name: pod4-jump + node: &nodeparams + type: baremetal + vendor: Intel Corporation + model: S2600WT2R + arch: x86_64 + cpus: 88 + cpu_cflags: haswell + cores: 22 + memory: 62G + disks: &disks + - name: 'disk1' + disk_capacity: 480G + disk_type: ssd + disk_interface: sata + disk_rotation: 0 + os: centos-7 + remote_params: &remote_params + type: ipmi + versions: + - 2.0 + user: admin + pass: octopus + remote_management: + <<: *remote_params + address: 172.30.8.83 + mac_address: "a4:bf:01:01:b0:bb" + interfaces: + - name: nic1 + speed: 1gb + features: 'dpdk|sriov' + vlan: native + mac_address: "a4:bf:01:01:b0:b9" + address: 192.168.12.1 + - name: nic2 + speed: 10gb + features: 'dpdk|sriov' + vlan: 450 + mac_address: "00:1e:67:fd:9a:04" + address: 192.168.0.2 + - name: nic3 + speed: 10gb + features: 'dpdk|sriov' + vlan: 452 + mac_address: "00:1e:67:fd:9a:04" + address: 192.168.2.2 + - name: nic4 + speed: 10gb + features: 'dpdk|sriov' + vlan: 451 + mac_address: "00:1e:67:fd:9a:05" + address: 192.168.1.2 + - name: nic5 + speed: 10gb + features: 'dpdk|sriov' + vlan: 414 + mac_address: "00:1e:67:fd:9a:05" + address: 172.30.12.83 +############################################################################## +nodes: + - name: pod4-node1 + node: *nodeparams + disks: *disks + remote_management: + <<: *remote_params + address: 172.30.8.84 + mac_address: "a4:bf:01:01:ab:b6" + interfaces: + - mac_address: "a4:bf:01:01:ab:b4" + address: 192.168.122.3 + vlan: native + - mac_address: "00:1e:67:fd:9b:32" + address: 172.29.236.11 + vlan: 450 + - mac_address: "00:1e:67:fd:9b:32" + address: 192.168.122.3 + vlan: 452 + - mac_address: "00:1e:67:fd:9b:33" + address: 172.29.240.11 + vlan: 451 + - mac_address: "00:1e:67:fd:9b:33" + address: 172.29.242.11 + vlan: 414 + ############################################################################ + - name: pod4-node2 + node: *nodeparams + disks: *disks + remote_management: + <<: *remote_params + address: 172.30.8.85 + mac_address: "a4:bf:01:01:b6:97" + interfaces: + - mac_address: "a4:bf:01:01:b6:95" + address: 192.168.122.4 + vlan: native + - mac_address: "00:1e:67:fd:98:e2" + address: 172.29.236.12 + vlan: 450 + - mac_address: "00:1e:67:fd:98:e2" + address: 192.168.122.4 + vlan: 452 + - mac_address: "00:1e:67:fd:98:e3" + address: 172.29.240.12 + vlan: 451 + - mac_address: "00:1e:67:fd:98:e3" + address: 172.29.242.12 + vlan: 414 + ############################################################################ + - name: pod4-node3 + node: *nodeparams + disks: *disks + remote_management: + <<: *remote_params + address: 172.30.8.86 + mac_address: "a4:bf:01:01:66:fe" + interfaces: + - mac_address: "a4:bf:01:01:66:fc" + address: 192.168.122.5 + vlan: native + - mac_address: "00:1e:67:fd:9c:c8" + address: 172.29.236.13 + vlan: 450 + - mac_address: "00:1e:67:fd:9c:c8" + address: 192.168.122.5 + vlan: 452 + - mac_address: "00:1e:67:fd:9c:c9" + address: 172.29.240.13 + vlan: 451 + - mac_address: "00:1e:67:fd:9c:c9" + address: 172.29.242.13 + vlan: 414 + ############################################################################ + - name: pod4-node4 + node: *nodeparams + disks: *disks + remote_management: + <<: *remote_params + address: 172.30.8.87 + mac_address: "a4:bf:01:01:b2:f5" + interfaces: + - mac_address: "a4:bf:01:01:b2:f3" + address: 192.168.122.6 + vlan: native + - mac_address: "00:1e:67:fd:9b:38" + address: 172.29.236.14 + vlan: 450 + - mac_address: "00:1e:67:fd:9b:38" + address: 192.168.122.6 + vlan: 452 + - mac_address: "00:1e:67:fd:9b:39" + address: 172.29.240.14 + vlan: 451 + - mac_address: "00:1e:67:fd:9b:39" + address: 172.29.242.14 + vlan: 414 + ############################################################################ + - name: pod4-node5 + node: *nodeparams + disks: *disks + remote_management: + <<: *remote_params + address: 172.30.8.88 + mac_address: "a4:bf:01:01:b5:11" + interfaces: + - mac_address: "a4:bf:01:01:b5:0f" + address: 192.168.122.7 + vlan: native + - mac_address: "00:1e:67:fd:99:40" + address: 172.29.236.15 + vlan: 450 + - mac_address: "00:1e:67:fd:99:40" + address: 192.168.122.7 + vlan: 452 + - mac_address: "00:1e:67:fd:99:41" + address: 172.29.240.15 + vlan: 451 + - mac_address: "00:1e:67:fd:99:41" + address: 172.29.242.14 + vlan: 414 diff --git a/xci/var/opnfv.yml b/xci/var/opnfv.yml index b24a6d98..91b9ee38 100644 --- a/xci/var/opnfv.yml +++ b/xci/var/opnfv.yml @@ -28,8 +28,18 @@ openstack_osa_haproxy_git_url: "{{ lookup('env','OPENSTACK_OSA_HAPROXY_GIT_URL') # kubespray variables kubespray_git_url: "{{ lookup('env','KUBESPRAY_GIT_URL') }}" kubespray_version: "{{ lookup('env','KUBESPRAY_VERSION') }}" +kubernetes_version: "{{ lookup('env','KUBERNETES_VERSION') }}" xci_kube_ansible_pip_version: "{{ lookup('env','XCI_KUBE_ANSIBLE_PIP_VERSION') }}" +# openstack-helm variables +osh_git_url: "{{ lookup('env','OSH_GIT_URL') }}" +osh_version: "{{ lookup('env','OSH_VERSION') }}" +osh_infra_git_url: "{{ lookup('env','OSH_INFRA_GIT_URL') }}" +osh_infra_version: "{{ lookup('env','OSH_INFRA_VERSION') }}" +osh_helm_binary_url: "{{ lookup('env','OSH_HELM_BINARY_URL') }}" +osh_helm_binary_version: "{{ lookup('env','OSH_HELM_BINARY_VERSION') }}" +openstack_osh_version: "{{ lookup('env','OPENSTACK_OSH_VERSION') }}" + # variables for other components keepalived_git_url: "{{ lookup('env','KEEPALIVED_GIT_URL') }}" haproxy_version: "{{ lookup('env','HAPROXY_VERSION') }}" @@ -49,6 +59,7 @@ run_tempest: "{{ lookup('env', 'RUN_TEMPEST') }}" core_openstack_install: "{{ lookup('env', 'CORE_OPENSTACK_INSTALL') }}" deploy_scenario: "{{ lookup('env','DEPLOY_SCENARIO') }}" installer_type: "{{ lookup('env','INSTALLER_TYPE') }}" +osh_distro: "{{ lookup('env', 'OSH_DISTRO') }}" # baremetal variables baremetal: "{{ lookup('env','BAREMETAL') }}" |