summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xxci/config/env-vars5
-rwxr-xr-xxci/config/pinned-versions7
-rwxr-xr-xxci/config/user-vars9
-rw-r--r--xci/installer/kubespray/README64
-rwxr-xr-xxci/installer/kubespray/deploy.sh85
-rw-r--r--xci/installer/kubespray/files/aio/inventory/inventory.cfg20
-rw-r--r--xci/installer/kubespray/files/ha/inventory/group_vars/all.yml8
-rw-r--r--xci/installer/kubespray/files/ha/inventory/inventory.cfg32
-rw-r--r--xci/installer/kubespray/files/k8s-cluster.yml291
-rw-r--r--xci/installer/kubespray/files/mini/inventory/inventory.cfg22
-rw-r--r--xci/installer/kubespray/files/noha/inventory/inventory.cfg24
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml77
-rw-r--r--xci/installer/kubespray/playbooks/configure-targethosts.yml28
-rw-r--r--xci/installer/kubespray/playbooks/group_vars/all58
-rwxr-xr-xxci/installer/osa/deploy.sh2
-rw-r--r--xci/installer/osa/playbooks/configure-localhost.yml75
-rw-r--r--xci/opnfv-scenario-requirements.yml15
-rw-r--r--xci/playbooks/configure-localhost.yml100
-rw-r--r--xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep (renamed from xci/scenarios/k8-nosdn-nofeature/.gitkeep)0
-rwxr-xr-xxci/scripts/vm/start-new-vm.sh8
-rw-r--r--xci/var/opnfv.yml7
21 files changed, 858 insertions, 79 deletions
diff --git a/xci/config/env-vars b/xci/config/env-vars
index c7744ca1..f188149f 100755
--- a/xci/config/env-vars
+++ b/xci/config/env-vars
@@ -7,6 +7,9 @@ export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost
export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible
export OPENSTACK_OSA_OPENRC_GIT_URL=https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
+export KUBESPRAY_GIT_URL=https://github.com/kubernetes-incubator/kubespray.git
+export OPENSTACK_OSA_HAPROXY_GIT_URL=https://git.openstack.org/openstack/openstack-ansible-haproxy_server
+export KEEPALIVED_GIT_URL=https://github.com/evrardjp/ansible-keepalived
export OPNFV_HOST_IP=192.168.122.2
export XCI_FLAVOR_ANSIBLE_FILE_PATH=$XCI_PATH/xci/installer/$XCI_INSTALLER/files/$XCI_FLAVOR
export CI_LOOP=${CI_LOOP:-daily}
@@ -34,3 +37,5 @@ export ANSIBLE_HOST_KEY_CHECKING=False
# subject of the certificate
export XCI_SSL_SUBJECT=${XCI_SSL_SUBJECT:-"/C=US/ST=California/L=San Francisco/O=IT/CN=xci.releng.opnfv.org"}
export DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature"}
+# Kubespray requires that ansible version is 2.4.0.0
+export XCI_KUBE_ANSIBLE_PIP_VERSION=2.4.0.0
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 023c9999..89b03df1 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -29,3 +29,10 @@ export OPNFV_RELENG_VERSION="master"
export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"8b4c956bf0ec6c1784e41af2a0598cb49c41461f"}
# HEAD of osa "master" as of 11.12.2017
export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"7b3aac28a0a87e5966527829f6b0abcbc2303cc7"}
+export KEEPALIVED_VERSION=$(grep -E '.*name: keepalived' -A 3 \
+ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
+ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
+export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
+ ${XCI_PATH}/xci/installer/osa/files/ansible-role-requirements.yml \
+ | tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"master"}
diff --git a/xci/config/user-vars b/xci/config/user-vars
index 64f2882b..49124233 100755
--- a/xci/config/user-vars
+++ b/xci/config/user-vars
@@ -23,6 +23,15 @@
export XCI_FLAVOR=${XCI_FLAVOR:-aio}
export XCI_DISTRO=${XCI_DISTRO:-$(source /etc/os-release &>/dev/null || source /usr/lib/os-release &>/dev/null; echo ${ID,,})}
export XCI_CEPH_ENABLED=${XCI_CEPH_ENABLED:-false}
+
+#-------------------------------------------------------------------------------
+# Set INSTALLER
+#-------------------------------------------------------------------------------
+# Currently, there are two kinds of installers in XCI: osa and kubespray
+# Examples:
+# export XCI_INSTALLER="osa"
+# or
+# export XCI_INSTALLER="kubespray"
export XCI_INSTALLER=${XCI_INSTALLER:-osa}
#-------------------------------------------------------------------------------
diff --git a/xci/installer/kubespray/README b/xci/installer/kubespray/README
new file mode 100644
index 00000000..171091e8
--- /dev/null
+++ b/xci/installer/kubespray/README
@@ -0,0 +1,64 @@
+The xci/installer/kubespray/files/k8s-cluster.yml is obtained from kubespray.
+You can change the parameters according to your needs.
+When starting the deploy, it will be copied to the right directory and will be used by kubespray
+
+For example:
+ kube_network_plugin: calico
+ docker_storage_options: -s overlay2
+ kube_service_addresses: 10.233.0.0/18
+
+Requirements:
+ 1. Performance of hosts
+ The performance settings are not required officially. I recommend the following:
+ - VM_CPU=6
+ - VM_DISK=80GB
+ - VM_MEMORY_SIZE=16GB
+
+ 2. Distributions
+ - Ubuntu 16.04
+
+ 3. Packages:
+ - Ansible v2.4 (or newer) and python-netaddr is installed on the machine that will run Ansible commands
+ - Jinja 2.9 (or newer) is required to run the Ansible Playbooks
+
+ 4. Others:
+ - The target servers must have access to the Internet in order to pull docker images.
+ - The target servers are configured to allow IPv4 forwarding.
+ - Your ssh key must be copied to all the servers part of your inventory.
+ - The firewalls are not managed, you'll need to implement your own rules the way you used to. In order to avoid any issue during the deployment you should disable your firewall.
+
+Flavors:
+ 1. aio: Single host which acts as the deployment host, master and node.
+ 2. mini: One deployment host, 1 master host and 1 node host.
+ 3. noha: One deployment host, 1 master host and 2 node hosts.
+ 4. ha: One deployment host, 3 master hosts and 2 node hosts.
+
+Components Installed:
+ 1. etcd
+ 2. network plugins:(one of following, which you can choose. Default is calico)
+ - calico
+ - flannel
+ - contive
+ - weave
+ 3. kubernetes
+ 4. docker
+
+How to use:
+
+Clone the OPNFV Releng repository
+
+ git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
+
+Change into the directory where the sandbox script is located
+
+ cd releng-xci/xci
+
+Set the variable to run kubespray
+
+ export XCI_INSTALLER=kubespray
+ export DEPLOY_SCENARIO=k8-nosdn-nofeature
+ export XCI_FLAVOR=mini
+
+Execute sandbox script
+
+ ./xci-deploy.sh
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
new file mode 100755
index 00000000..b04ed781
--- /dev/null
+++ b/xci/installer/kubespray/deploy.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Huawei
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+K8_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
+export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
+
+
+#-------------------------------------------------------------------------------
+# Configure localhost
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - clones opnfv/releng-xci repository
+# - clones kubernetes-incubator/kubespray repository
+# - creates log directory
+#-------------------------------------------------------------------------------
+
+echo "Info: Configuring localhost for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" \
+ -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
+ configure-localhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured localhost for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure deployment host, opnfv
+#-------------------------------------------------------------------------------
+# This playbook
+# - removes directories that were created by the previous xci run
+# - synchronize opnfv/releng-xci and kubernetes-incubator/kubespray repositories
+# - generates/prepares ssh keys
+# - copies flavor files to be used by kubespray
+# - install packages required by kubespray
+#-------------------------------------------------------------------------------
+echo "Info: Configuring opnfv deployment host for kubespray"
+echo "-----------------------------------------------------------------------"
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" \
+ -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
+ configure-opnfvhost.yml
+echo "-----------------------------------------------------------------------"
+echo "Info: Configured opnfv deployment host for kubespray"
+
+#-------------------------------------------------------------------------------
+# Configure target hosts for kubespray
+#-------------------------------------------------------------------------------
+# This playbook is only run for the all flavors except aio since aio is configured by the configure-opnfvhost.yml
+# This playbook
+# - adds public keys to target hosts
+# - install packages required by kubespray
+# - configures haproxy service
+#-------------------------------------------------------------------------------
+if [ $XCI_FLAVOR != "aio" ]; then
+ echo "Info: Configuring target hosts for kubespray"
+ echo "-----------------------------------------------------------------------"
+ cd $K8_XCI_PLAYBOOKS
+ ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" \
+ -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
+ configure-targethosts.yml
+ echo "-----------------------------------------------------------------------"
+ echo "Info: Configured target hosts for kubespray"
+fi
+
+echo "Info: Using kubespray to deploy the kubernetes cluster"
+echo "-----------------------------------------------------------------------"
+ssh root@$OPNFV_HOST_IP "cd releng-xci/.cache/repos/kubespray;\
+ ansible-playbook ${XCI_ANSIBLE_VERBOSITY} \
+ -i opnfv_inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log"
+scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
+ $LOG_PATH/setup-kubernetes.log
+# check the log to see if we have any error
+if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-kubernetes.log; then
+ echo "Error: Kubernetes cluster setup failed!"
+ exit 1
+fi
+echo "Info: Kubernetes installation is successfully completed!"
diff --git a/xci/installer/kubespray/files/aio/inventory/inventory.cfg b/xci/installer/kubespray/files/aio/inventory/inventory.cfg
new file mode 100644
index 00000000..a72d0fec
--- /dev/null
+++ b/xci/installer/kubespray/files/aio/inventory/inventory.cfg
@@ -0,0 +1,20 @@
+[all]
+opnfv ansible_host=192.168.122.2 ip=192.168.122.2
+
+[kube-master]
+opnfv
+
+[kube-node]
+opnfv
+
+[etcd]
+opnfv
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+
+[vault]
+opnfv
diff --git a/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
new file mode 100644
index 00000000..d1b946a7
--- /dev/null
+++ b/xci/installer/kubespray/files/ha/inventory/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+loadbalancer_apiserver:
+ address: 192.168.122.222
+ port: 8383
+
+apiserver_loadbalancer_domain_name: 192.168.122.222
+supplementary_addresses_in_ssl_keys:
+ - 192.168.122.222
diff --git a/xci/installer/kubespray/files/ha/inventory/inventory.cfg b/xci/installer/kubespray/files/ha/inventory/inventory.cfg
new file mode 100644
index 00000000..aae36329
--- /dev/null
+++ b/xci/installer/kubespray/files/ha/inventory/inventory.cfg
@@ -0,0 +1,32 @@
+[all]
+opnfv ansible_host=192.168.122.2 ip=192.168.122.2
+master1 ansible_host=192.168.122.3 ip=192.168.122.3
+master2 ansible_host=192.168.122.4 ip=192.168.122.4
+master3 ansible_host=192.168.122.5 ip=192.168.122.5
+node1 ansible_host=192.168.122.6 ip=192.168.122.6
+node2 ansible_host=192.168.122.7 ip=192.168.122.7
+
+[kube-master]
+master1
+master2
+master3
+
+[kube-node]
+node1
+node2
+
+[etcd]
+master1
+master2
+master3
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+
+[vault]
+master1
+master2
+master3
diff --git a/xci/installer/kubespray/files/k8s-cluster.yml b/xci/installer/kubespray/files/k8s-cluster.yml
new file mode 100644
index 00000000..aeee573a
--- /dev/null
+++ b/xci/installer/kubespray/files/k8s-cluster.yml
@@ -0,0 +1,291 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+#kube_basic_auth: false
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+# kubeconfig_localhost: false
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+# kubectl_localhost: false
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/installer/kubespray/files/mini/inventory/inventory.cfg b/xci/installer/kubespray/files/mini/inventory/inventory.cfg
new file mode 100644
index 00000000..bf8bf19b
--- /dev/null
+++ b/xci/installer/kubespray/files/mini/inventory/inventory.cfg
@@ -0,0 +1,22 @@
+[all]
+opnfv ansible_host=192.168.122.2 ip=192.168.122.2
+master1 ansible_host=192.168.122.3 ip=192.168.122.3
+node1 ansible_host=192.168.122.4 ip=192.168.122.4
+
+[kube-master]
+master1
+
+[kube-node]
+node1
+
+[etcd]
+master1
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+
+[vault]
+master1
diff --git a/xci/installer/kubespray/files/noha/inventory/inventory.cfg b/xci/installer/kubespray/files/noha/inventory/inventory.cfg
new file mode 100644
index 00000000..73c1e0a1
--- /dev/null
+++ b/xci/installer/kubespray/files/noha/inventory/inventory.cfg
@@ -0,0 +1,24 @@
+[all]
+opnfv ansible_host=192.168.122.2 ip=192.168.122.2
+master1 ansible_host=192.168.122.3 ip=192.168.122.3
+node1 ansible_host=192.168.122.4 ip=192.168.122.4
+node2 ansible_host=192.168.122.5 ip=192.168.122.5
+
+[kube-master]
+master1
+
+[kube-node]
+node1
+node2
+
+[etcd]
+master1
+
+[k8s-cluster:children]
+kube-node
+kube-master
+
+[calico-rr]
+
+[vault]
+master1
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
new file mode 100644
index 00000000..23f93852
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -0,0 +1,77 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: opnfv
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+
+ tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ - name: Set facts for remote deployment
+ set_fact:
+ remote_xci_path: "{{ ansible_env.HOME }}/releng-xci"
+ remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/installer/{{ XCI_INSTALLER }}/files/{{ XCI_FLAVOR }}"
+ remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks"
+
+ - name: Copy releng-xci to remote host
+ synchronize:
+ src: "{{ XCI_PATH }}/"
+ dest: "{{ remote_xci_path }}"
+ recursive: yes
+ delete: yes
+
+ - name: generate SSH keys
+ shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
+ args:
+ creates: /root/.ssh/id_rsa
+ - name: add id_rsa.pub to authorized_keys
+ shell: cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+ when: XCI_FLAVOR == 'aio'
+ - name: fetch public key
+ fetch:
+ src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub"
+ dest: "{{ XCI_PATH }}/xci/files/authorized_keys"
+ flat: yes
+ - name: delete the opnfv_inventory directory
+ file:
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
+ state: absent
+ - name: copy kubespray inventory directory
+ shell: "cp -rf {{ remote_xci_flavor_files }}/inventory \
+ {{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory"
+ - name: make sure kubespray/opnfv_inventory/group_vars/ exist
+ file:
+ path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
+ state: directory
+ - name: copy k8s_cluster.yml
+ shell: "cp -rf {{ remote_xci_path }}/xci/installer/kubespray/files/k8s-cluster.yml \
+ {{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
+ - name: install dbus and ptyhon-netaddr
+ apt:
+ name: "{{item}}"
+ update_cache: yes
+ with_items: "{{ kube_require_packages }}"
+ when: XCI_FLAVOR == 'aio'
+ - name: pip install ansible
+ pip:
+ name: ansible
+ version: "{{ XCI_KUBE_ANSIBLE_PIP_VERSION }}"
+
+
+- hosts: localhost
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Append public keys to authorized_keys
+ shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/files/authorized_keys"
diff --git a/xci/installer/kubespray/playbooks/configure-targethosts.yml b/xci/installer/kubespray/playbooks/configure-targethosts.yml
new file mode 100644
index 00000000..7e29025b
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-targethosts.yml
@@ -0,0 +1,28 @@
+---
+- hosts: all
+ remote_user: root
+ tasks:
+ - name: add public key to host
+ copy:
+ src: "{{ XCI_PATH }}/xci/files/authorized_keys"
+ dest: /root/.ssh/authorized_keys
+ - name: install dbus and ptyhon-netaddr
+ apt:
+ name: "{{item}}"
+ update_cache: yes
+ with_items: "{{ kube_require_packages }}"
+
+- hosts: kube-master
+ remote_user: root
+ vars_files:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+ roles:
+ - role: "keepalived"
+ when: XCI_FLAVOR == 'ha'
+ - role: "haproxy_server"
+ haproxy_service_configs: "{{ haproxy_default_services}}"
+ when: XCI_FLAVOR == 'ha'
diff --git a/xci/installer/kubespray/playbooks/group_vars/all b/xci/installer/kubespray/playbooks/group_vars/all
new file mode 100644
index 00000000..06dccb68
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/group_vars/all
@@ -0,0 +1,58 @@
+kube_require_packages:
+ - "python-netaddr"
+ - "dbus"
+
+keepalived_ubuntu_src: "uca"
+keepalived_uca_apt_repo_url: "{{ uca_apt_repo_url | default('http://ubuntu-cloud.archive.canonical.com/ubuntu') }}"
+
+keepalived_sync_groups:
+ haproxy:
+ instances:
+ - external
+
+haproxy_keepalived_external_interface: "{{ interface }}"
+haproxy_keepalived_authentication_password: 'keepalived'
+keepalived_instances:
+ external:
+ interface: "{{ haproxy_keepalived_external_interface }}"
+ state: "BACKUP"
+ virtual_router_id: "{{ haproxy_keepalived_external_virtual_router_id | default ('10') }}"
+ priority: "{{ ((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*100)-((ansible_play_hosts|length-ansible_play_hosts.index(inventory_hostname))*50) }}"
+ authentication_password: "{{ haproxy_keepalived_authentication_password }}"
+ vips:
+ - "{{ haproxy_keepalived_external_vip_cidr | default('192.168.122.222/32') }} dev {{ haproxy_keepalived_external_interface }}"
+
+haproxy_default_services:
+ - service:
+ haproxy_service_name: proxy-apiserver
+ haproxy_backend_nodes: "{{ groups['kube-master'] | default([]) }}"
+ haproxy_port: 8383
+ haproxy_backend_port: 6443
+ haproxy_balance_type: tcp
+
+haproxy_bind_on_non_local: "True"
+haproxy_use_keepalived: "True"
+keepalived_selinux_compile_rules:
+ - keepalived_ping
+ - keepalived_haproxy_pid_file
+
+# Ensure that the package state matches the global setting
+haproxy_package_state: "latest"
+
+haproxy_whitelist_networks:
+ - 192.168.0.0/16
+ - 172.16.0.0/12
+ - 10.0.0.0/8
+
+haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_nova_metadata_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_rabbitmq_management_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_git_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_repo_cache_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_octavia_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+haproxy_ssl: false
+
+internal_lb_vip_address: "192.168.122.222"
+external_lb_vip_address: "{{ internal_lb_vip_address }}"
diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh
index b8637f22..1e442918 100755
--- a/xci/installer/osa/deploy.sh
+++ b/xci/installer/osa/deploy.sh
@@ -38,7 +38,7 @@ fi
echo "Info: Configuring localhost for openstack-ansible"
echo "-----------------------------------------------------------------------"
-cd $OSA_XCI_PLAYBOOKS
+cd $XCI_PLAYBOOKS
ansible-playbook ${XCI_ANSIBLE_VERBOSITY} -e XCI_PATH="${XCI_PATH}" -i inventory configure-localhost.yml
echo "-----------------------------------------------------------------------"
echo "Info: Configured localhost host for openstack-ansible"
diff --git a/xci/installer/osa/playbooks/configure-localhost.yml b/xci/installer/osa/playbooks/configure-localhost.yml
deleted file mode 100644
index caa5d673..00000000
--- a/xci/installer/osa/playbooks/configure-localhost.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- hosts: localhost
- connection: local
-
- pre_tasks:
- - name: Load distribution variables
- include_vars:
- file: "{{ item }}"
- failed_when: false
- with_items:
- - "{{ XCI_PATH }}/xci/var/opnfv.yml"
- - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
-
- - name: cleanup leftovers of previous deployment
- file:
- path: "{{ item }}"
- state: absent
- recurse: no
- with_items:
- - "{{ XCI_CACHE }}/repos"
- - "{{ LOG_PATH }} "
- - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
-
- roles:
- - role: clone-repository
- project: "openstack/openstack-ansible-openstack_openrc"
- repo: "{{ OPENSTACK_OSA_OPENRC_GIT_URL }}"
- dest: roles/openstack-ansible-openstack_openrc
- version: "master"
- - role: clone-repository
- project: "openstack/openstack-ansible"
- repo: "{{ OPENSTACK_OSA_GIT_URL }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- version: "{{ OPENSTACK_OSA_VERSION }}"
-
- tasks:
- - name: create log directory {{LOG_PATH}}
- file:
- path: "{{LOG_PATH}}"
- state: directory
- recurse: no
- - name: check if certificate directory /etc/ssl/certs exists already
- stat: path=/etc/ssl/certs
- register: check_etc_ssl_certs
- - name: create certificate directory /etc/ssl/certs
- become: true
- file:
- path: "/etc/ssl/certs"
- state: directory
- when: check_etc_ssl_certs.stat.exists == false
- - name: create key directory /etc/ssl/private
- become: true
- file:
- path: "/etc/ssl/private"
- state: directory
- - name: generate self signed certificate
- command: openssl req -new -nodes -x509 -subj "{{ XCI_SSL_SUBJECT }}" -days 3650 -keyout "/etc/ssl/private/xci.key" -out "/etc/ssl/certs/xci.crt" -extensions v3_ca
- become: true
- - name: Synchronize local development OSA repository to XCI paths
- # command module is much faster than the copy module
- synchronize:
- src: "{{ OPENSTACK_OSA_DEV_PATH }}"
- dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
- recursive: yes
- delete: yes
- when:
- - OPENSTACK_OSA_DEV_PATH != ""
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index 3388b803..ece4caea 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -56,3 +56,18 @@
distros:
- opensuse
- ubuntu
+
+- scenario: k8-nosdn-nofeature
+ scm: git
+ src: https://git.opnfv.org/releng-xci
+ version: master
+ role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - aio
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
diff --git a/xci/playbooks/configure-localhost.yml b/xci/playbooks/configure-localhost.yml
new file mode 100644
index 00000000..467ab312
--- /dev/null
+++ b/xci/playbooks/configure-localhost.yml
@@ -0,0 +1,100 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+- hosts: localhost
+ connection: local
+
+ pre_tasks:
+ - name: Load distribution variables
+ include_vars:
+ file: "{{ item }}"
+ failed_when: false
+ with_items:
+ - "{{ XCI_PATH }}/xci/var/opnfv.yml"
+ - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml"
+
+ - name: cleanup leftovers of previous deployment
+ file:
+ path: "{{ item }}"
+ state: absent
+ recurse: no
+ with_items:
+ - "{{ XCI_CACHE }}/repos"
+ - "{{ LOG_PATH }} "
+ - "{{ OPNFV_SSH_HOST_KEYS_PATH }}"
+
+ roles:
+ - role: clone-repository
+ project: "openstack/openstack-ansible-openstack_openrc"
+ repo: "{{ OPENSTACK_OSA_OPENRC_GIT_URL }}"
+ dest: roles/openstack-ansible-openstack_openrc
+ version: "master"
+ when: XCI_INSTALLER == "osa"
+ - role: clone-repository
+ project: "openstack/openstack-ansible"
+ repo: "{{ OPENSTACK_OSA_GIT_URL }}"
+ dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
+ version: "{{ OPENSTACK_OSA_VERSION }}"
+ when: XCI_INSTALLER == "osa"
+ - role: clone-repository
+ project: "kubernetes-incubator/kubespray"
+ repo: "{{ KUBESPRAY_GIT_URL }}"
+ dest: "{{ XCI_CACHE }}/repos/kubespray"
+ version: "{{ KUBESPRAY_VERSION }}"
+ when: XCI_INSTALLER == "kubespray"
+ - role: clone-repository
+ project: "openstack/openstack-ansible-haproxy_server"
+ repo: "{{ OPENSTACK_OSA_HAPROXY_GIT_URL }}"
+ dest: roles/haproxy_server
+ version: "{{ HAPROXY_VERSION }}"
+ when:
+ - XCI_INSTALLER == "kubespray"
+ - role: clone-repository
+ project: "ansible-keepalived"
+ repo: "{{ KEEPALIVED_GIT_URL }}"
+ dest: roles/keepalived
+ version: "{{ KEEPALIVED_VERSION }}"
+ when:
+ - XCI_INSTALLER == "kubespray"
+
+ tasks:
+ - name: create log directory {{LOG_PATH}}
+ file:
+ path: "{{LOG_PATH}}"
+ state: directory
+ recurse: no
+ - block:
+ - name: check if certificate directory /etc/ssl/certs exists already
+ stat: path=/etc/ssl/certs
+ register: check_etc_ssl_certs
+ - name: create certificate directory /etc/ssl/certs
+ become: true
+ file:
+ path: "/etc/ssl/certs"
+ state: directory
+ when: check_etc_ssl_certs.stat.exists == false
+ - name: create key directory /etc/ssl/private
+ become: true
+ file:
+ path: "/etc/ssl/private"
+ state: directory
+ - name: generate self signed certificate
+ command: openssl req -new -nodes -x509 -subj "{{ XCI_SSL_SUBJECT }}" -days 3650 -keyout "/etc/ssl/private/xci.key" -out "/etc/ssl/certs/xci.crt" -extensions v3_ca
+ become: true
+ - name: Synchronize local development OSA repository to XCI paths
+ # command module is much faster than the copy module
+ synchronize:
+ src: "{{ OPENSTACK_OSA_DEV_PATH }}"
+ dest: "{{ XCI_CACHE }}/repos/openstack-ansible"
+ recursive: yes
+ delete: yes
+ when:
+ - OPENSTACK_OSA_DEV_PATH != ""
+ when:
+ - XCI_INSTALLER == "osa"
diff --git a/xci/scenarios/k8-nosdn-nofeature/.gitkeep b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
index e69de29b..e69de29b 100644
--- a/xci/scenarios/k8-nosdn-nofeature/.gitkeep
+++ b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh
index 3048d1b9..e5a13b6b 100755
--- a/xci/scripts/vm/start-new-vm.sh
+++ b/xci/scripts/vm/start-new-vm.sh
@@ -112,15 +112,17 @@ if ! sudo -n "true"; then
exit 1
fi
+COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-manager qemu-kvm)
+
case ${ID,,} in
*suse)
- pkg_mgr_cmd="sudo zypper -q -n install virt-manager qemu-kvm qemu-tools libvirt-daemon docker libvirt-client libvirt-daemon-driver-qemu iptables ebtables dnsmasq"
+ pkg_mgr_cmd="sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-kvm qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu"
;;
centos)
- pkg_mgr_cmd="sudo yum install -q -y epel-release && sudo yum install -q -y in virt-manager qemu-kvm qemu-kvm-tools qemu-img libvirt-daemon-kvm docker iptables ebtables dnsmasq"
+ pkg_mgr_cmd="sudo yum install -q -y epel-release && sudo yum install -q -y in ${COMMON_DISTRO_PKGS[@]} qemu-kvm-tools qemu-img libvirt-daemon-kvm"
;;
ubuntu)
- pkg_mgr_cmd="sudo apt-get install -y -q=3 virt-manager qemu-kvm libvirt-bin qemu-utils docker.io docker iptables ebtables dnsmasq"
+ pkg_mgr_cmd="sudo apt-get install -y -q=3 ${COMMON_DISTRO_PKGS[@]} libvirt-bin qemu-utils docker.io"
;;
esac
diff --git a/xci/var/opnfv.yml b/xci/var/opnfv.yml
index 5638eba4..158b1dfe 100644
--- a/xci/var/opnfv.yml
+++ b/xci/var/opnfv.yml
@@ -35,6 +35,13 @@ XCI_CEPH_ENABLED: "{{ lookup('env', 'XCI_CEPH_ENABLED') }}"
RUN_TEMPEST: "{{ lookup('env', 'RUN_TEMPEST') }}"
DEPLOY_SCENARIO: "{{ lookup('env','DEPLOY_SCENARIO') }}"
XCI_INSTALLER: "{{ lookup('env','XCI_INSTALLER') }}"
+KUBESPRAY_VERSION: "{{ lookup('env','KUBESPRAY_VERSION') }}"
+XCI_KUBE_ANSIBLE_PIP_VERSION: "{{ lookup('env','XCI_KUBE_ANSIBLE_PIP_VERSION') }}"
+KUBESPRAY_GIT_URL: "{{ lookup('env','KUBESPRAY_GIT_URL') }}"
+OPENSTACK_OSA_HAPROXY_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_HAPROXY_GIT_URL') }}"
+HAPROXY_VERSION: "{{ lookup('env','HAPROXY_VERSION') }}"
+KEEPALIVED_GIT_URL: "{{ lookup('env','KEEPALIVED_GIT_URL') }}"
+KEEPALIVED_VERSION: "{{ lookup('env','KEEPALIVED_VERSION') }}"
# install docker on opnfv host only if we are running as part of CI
opnfv_required_packages: