summaryrefslogtreecommitdiffstats
path: root/xci
diff options
context:
space:
mode:
Diffstat (limited to 'xci')
-rwxr-xr-xxci/config/pinned-versions2
-rwxr-xr-xxci/files/xci-destroy-env.sh8
-rwxr-xr-xxci/installer/kubespray/deploy.sh7
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml1
-rw-r--r--xci/opnfv-scenario-requirements.yml38
-rw-r--r--xci/playbooks/bootstrap-scenarios.yml8
-rw-r--r--xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml14
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml14
-rwxr-xr-xxci/xci-deploy.sh3
11 files changed, 662 insertions, 17 deletions
diff --git a/xci/config/pinned-versions b/xci/config/pinned-versions
index 6b20374a..72a0ff61 100755
--- a/xci/config/pinned-versions
+++ b/xci/config/pinned-versions
@@ -45,4 +45,4 @@ export HAPROXY_VERSION=$(grep -E '.*name: haproxy_server' -A 3 \
| tail -n1 | sed -n 's/\(^.*: \)\([0-9a-z].*$\)/\2/p')
# HEAD of kubspray "master" as of 27.02.2018
# kubespray's bug Reference: https://github.com/kubernetes-incubator/kubespray/issues/2400
-export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"810c10a0e9b65b0ef8ae8f7c302f7553a165631c"}
+export KUBESPRAY_VERSION=${KUBESPRAY_VERSION:-"5d9bb300d716880610c34dd680c167d2d728984d"}
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index 2e183bd7..97b76c7c 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -27,8 +27,8 @@ if which vbmc &>/dev/null || { [[ -e ${XCI_VENV}/bifrost/bin/activate ]] && sour
# Delete all libvirt VMs and hosts from vbmc (look for a port number)
for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
if which virsh &>/dev/null; then
- virsh destroy $vm || true
- virsh undefine $vm || true
+ virsh destroy $vm &>/dev/null || true
+ virsh undefine $vm &>/dev/null || true
fi
vbmc delete $vm
done
@@ -40,8 +40,8 @@ for varfile in ${flavors[@]}; do
source ${XCI_PATH}/xci/config/${varfile}-vars
for vm in ${TEST_VM_NODE_NAMES}; do
if which virsh &>/dev/null; then
- virsh destroy $vm || true
- virsh undefine $vm || true
+ virsh destroy $vm &>/dev/null || true
+ virsh undefine $vm &>/dev/null || true
fi
done
done
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
index 5136f5a8..59d11055 100755
--- a/xci/installer/kubespray/deploy.sh
+++ b/xci/installer/kubespray/deploy.sh
@@ -14,13 +14,6 @@ set -o pipefail
K8_XCI_PLAYBOOKS="$(dirname $(realpath ${BASH_SOURCE[0]}))/playbooks"
export ANSIBLE_ROLES_PATH=$HOME/.ansible/roles:/etc/ansible/roles:${XCI_PATH}/xci/playbooks/roles
-# NOTE(hwoarang): This is a workaround for SUSE until upstream PR is accepted
-# https://github.com/kubernetes-incubator/kubespray/pull/2380
-if [[ ${XCI_DISTRO} == opensuse ]]; then
- export KUBESPRAY_GIT_URL=https://github.com/hwoarang/kubespray.git
- export KUBESPRAY_VERSION=add-opensuse-support
-fi
-
#-------------------------------------------------------------------------------
# Configure localhost
#-------------------------------------------------------------------------------
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index d9c5ae6d..0ac18b50 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -48,6 +48,7 @@
{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
args:
creates: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+ - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
- name: Install required packages
package:
name: "{{ kube_require_packages[ansible_pkg_mgr] }}"
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index a8361535..e240d55d 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -28,7 +28,7 @@
- scenario: os-nosdn-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
installers:
@@ -44,7 +44,7 @@
- scenario: os-odl-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature
installers:
@@ -59,7 +59,7 @@
- scenario: k8-nosdn-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
installers:
@@ -88,3 +88,35 @@
distros:
- ubuntu
- centos
+
+- scenario: k8-canal-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
+ version: master
+ role: xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - aio
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
+
+- scenario: k8-calico-nofeature
+ scm: git
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
+ version: master
+ role: xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - aio
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
diff --git a/xci/playbooks/bootstrap-scenarios.yml b/xci/playbooks/bootstrap-scenarios.yml
index 6546d5ce..5d810fa0 100644
--- a/xci/playbooks/bootstrap-scenarios.yml
+++ b/xci/playbooks/bootstrap-scenarios.yml
@@ -25,3 +25,11 @@
include_role:
name: "os-odl-bgpvpn"
when: deploy_scenario == 'os-odl-bgpvpn'
+- name: Prepare everything to run the k8-canal-nofeature scenario
+ include_role:
+ name: "k8-canal-nofeature"
+ when: deploy_scenario == 'k8-canal-nofeature'
+- name: Prepare everything to run the k8-canal-nofeature scenario
+ include_role:
+ name: "k8-calico-nofeature"
+ when: deploy_scenario == 'k8-calico-nofeature'
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
new file mode 100644
index 00000000..20d3091d
--- /dev/null
+++ b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
new file mode 100644
index 00000000..5b2939f1
--- /dev/null
+++ b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
new file mode 100644
index 00000000..7646aefa
--- /dev/null
+++ b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: canal
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
new file mode 100644
index 00000000..5b2939f1
--- /dev/null
+++ b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/xci/xci-deploy.sh b/xci/xci-deploy.sh
index 20f67e61..a8b7adbd 100755
--- a/xci/xci-deploy.sh
+++ b/xci/xci-deploy.sh
@@ -109,10 +109,9 @@ echo "xci flavor: $XCI_FLAVOR"
echo "xci installer: $INSTALLER_TYPE"
echo "infra deployment: $INFRA_DEPLOYMENT"
echo "opnfv/releng-xci version: $(git rev-parse HEAD)"
-echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+[[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
[[ "$INSTALLER_TYPE" == "osa" ]] && echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
[[ "$INSTALLER_TYPE" == "kubespray" ]] && echo "kubespray version: $KUBESPRAY_VERSION"
-[[ "$INFRA_DEPLOYMENT" == "bifrost" ]] && echo "bifrost version: $OPENSTACK_BIFROST_VERSION"
echo "-------------------------------------------------------------------------"
#-------------------------------------------------------------------------------