summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xxci/config/env-vars13
-rwxr-xr-xxci/files/xci-destroy-env.sh8
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml6
-rw-r--r--xci/opnfv-scenario-requirements.yml22
-rw-r--r--xci/playbooks/bootstrap-scenarios.yml4
-rw-r--r--xci/playbooks/roles/prepare-functest/tasks/main.yml21
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/env.j26
-rw-r--r--xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j261
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml292
-rw-r--r--xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml14
10 files changed, 412 insertions, 35 deletions
diff --git a/xci/config/env-vars b/xci/config/env-vars
index f72a0240..dc9328ee 100755
--- a/xci/config/env-vars
+++ b/xci/config/env-vars
@@ -16,15 +16,24 @@ export KEEPALIVED_GIT_URL=${KEEPALIVED_GIT_URL:-https://github.com/evrardjp/ansi
export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
export OPNFV_HOST_IP=192.168.122.2
export XCI_FLAVOR_ANSIBLE_FILE_PATH=$XCI_PATH/xci/installer/$INSTALLER_TYPE/files/$XCI_FLAVOR
-export CI_LOOP=${CI_LOOP:-daily}
-export JOB_NAME=${JOB_NAME:-false}
+
# XCI_CACHE is a cache on localhost where repositories and scenarios are cloned.
export XCI_CACHE=${XCI_PATH}/.cache
+
# OPNFV_XCI_CACHE is similar to XCI_CACHE but refers to the remote OPNFV host.
export OPNFV_XCI_CACHE="/root/releng-xci/.cache"
export XCI_SCENARIOS_CACHE="${XCI_CACHE}/repos/scenarios"
export XCI_PLAYBOOKS=${XCI_PATH}/xci/playbooks
+# Functest parameters
+export FUNCTEST_MODE=${FUNCTEST_MODE:-"tier"}
+export FUNCTEST_SUITE_NAME=${FUNCTEST_SUITE_NAME:-"healthcheck"}
+
+# CI paremeters
+export CI_LOOP=${CI_LOOP:-"daily"}
+export BUILD_TAG=${BUILD_TAG:-"notag"}
+export NODE_NAME=${NODE_NAME:-$(hostname)}
+
#-------------------------------------------------------------------------------
# Paths where git repositories of XCI Components will be cloned on the OPNFV host
#-------------------------------------------------------------------------------
diff --git a/xci/files/xci-destroy-env.sh b/xci/files/xci-destroy-env.sh
index 2e183bd7..97b76c7c 100755
--- a/xci/files/xci-destroy-env.sh
+++ b/xci/files/xci-destroy-env.sh
@@ -27,8 +27,8 @@ if which vbmc &>/dev/null || { [[ -e ${XCI_VENV}/bifrost/bin/activate ]] && sour
# Delete all libvirt VMs and hosts from vbmc (look for a port number)
for vm in $(vbmc list | awk '/[0-9]/{{ print $2 }}'); do
if which virsh &>/dev/null; then
- virsh destroy $vm || true
- virsh undefine $vm || true
+ virsh destroy $vm &>/dev/null || true
+ virsh undefine $vm &>/dev/null || true
fi
vbmc delete $vm
done
@@ -40,8 +40,8 @@ for varfile in ${flavors[@]}; do
source ${XCI_PATH}/xci/config/${varfile}-vars
for vm in ${TEST_VM_NODE_NAMES}; do
if which virsh &>/dev/null; then
- virsh destroy $vm || true
- virsh undefine $vm || true
+ virsh destroy $vm &>/dev/null || true
+ virsh undefine $vm &>/dev/null || true
fi
done
done
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index f4a0602d..0ac18b50 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -48,6 +48,7 @@
{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
args:
creates: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
+ - include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
- name: Install required packages
package:
name: "{{ kube_require_packages[ansible_pkg_mgr] }}"
@@ -69,5 +70,10 @@
- name: Configure SSL certificates
include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssl-certs.yml"
+ - name: fetch xci environment
+ copy:
+ src: "{{ xci_path }}/.cache/xci.env"
+ dest: /root/xci.env
+
- name: Manage SSH keys
include_tasks: "{{ xci_path }}/xci/playbooks/manage-ssh-keys.yml"
diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml
index a8361535..e1c682ab 100644
--- a/xci/opnfv-scenario-requirements.yml
+++ b/xci/opnfv-scenario-requirements.yml
@@ -28,7 +28,7 @@
- scenario: os-nosdn-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature
installers:
@@ -44,7 +44,7 @@
- scenario: os-odl-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature
installers:
@@ -59,7 +59,7 @@
- scenario: k8-nosdn-nofeature
scm: git
- src: https://git.opnfv.org/releng-xci
+ src: https://gerrit.opnfv.org/gerrit/releng-xci
version: master
role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature
installers:
@@ -88,3 +88,19 @@
distros:
- ubuntu
- centos
+
+- scenario: k8-canal-nofeature
+ scm: git
+ src: https://git.opnfv.org/releng-xci
+ version: master
+ role: xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature
+ installers:
+ - installer: kubespray
+ flavors:
+ - aio
+ - ha
+ - mini
+ - noha
+ distros:
+ - ubuntu
+ - centos
diff --git a/xci/playbooks/bootstrap-scenarios.yml b/xci/playbooks/bootstrap-scenarios.yml
index 6546d5ce..788a71c6 100644
--- a/xci/playbooks/bootstrap-scenarios.yml
+++ b/xci/playbooks/bootstrap-scenarios.yml
@@ -25,3 +25,7 @@
include_role:
name: "os-odl-bgpvpn"
when: deploy_scenario == 'os-odl-bgpvpn'
+- name: Prepare everything to run the k8-canal-nofeature scenario
+ include_role:
+ name: "k8-canal-nofeature"
+ when: deploy_scenario == 'k8-canal-nofeature'
diff --git a/xci/playbooks/roles/prepare-functest/tasks/main.yml b/xci/playbooks/roles/prepare-functest/tasks/main.yml
index ad578bfd..c29baca9 100644
--- a/xci/playbooks/roles/prepare-functest/tasks/main.yml
+++ b/xci/playbooks/roles/prepare-functest/tasks/main.yml
@@ -26,16 +26,19 @@
state: present
extra_args: '-c https://raw.githubusercontent.com/openstack/requirements/{{ requirements_git_install_branch }}/upper-constraints.txt'
-- name: check if the gateway was already set
- shell: "ip a | grep {{ gateway_ip }}"
- register: gateway_ip_result
- ignore_errors: True
- changed_when: False
+- name: create public network gateway for functest
+ block:
+ - name: check if the gateway was already set
+ shell: "ip a | grep {{ gateway_ip }}"
+ register: gateway_ip_result
+ ignore_errors: True
+ changed_when: False
-- name: add public network gateway
- command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
- changed_when: False
- when: gateway_ip_result|failed
+ - name: add public network gateway
+ command: "ip addr add {{ gateway_ip_mask }} brd {{ broadcast_ip }} dev {{ gateway_interface }}"
+ changed_when: False
+ when: gateway_ip_result|failed
+ when: deploy_scenario is match("os-.*")
- name: prepare environment file for functest
template:
diff --git a/xci/playbooks/roles/prepare-functest/templates/env.j2 b/xci/playbooks/roles/prepare-functest/templates/env.j2
index af271ac7..d9a3bf32 100644
--- a/xci/playbooks/roles/prepare-functest/templates/env.j2
+++ b/xci/playbooks/roles/prepare-functest/templates/env.j2
@@ -1,5 +1,7 @@
INSTALLER_IP=192.168.122.2
-EXTERNAL_NETWORK={{ external_network }}
-CI_LOOP=daily
TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
+{# external network is only valid for OpenStack based scenarios #}
+{% if 'os-' in deploy_scenario %}
+EXTERNAL_NETWORK={{ external_network }}
+{% endif %}
diff --git a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2 b/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
index ee02fc65..0f97c3b4 100644
--- a/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
+++ b/xci/playbooks/roles/prepare-functest/templates/run-functest.sh.j2
@@ -1,21 +1,10 @@
#!/bin/bash
# Variables that we need to pass from XCI to functest
-XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION)
+XCI_ENV=(INSTALLER_TYPE XCI_FLAVOR OPENSTACK_OSA_VERSION CI_LOOP BUILD_TAG NODE_NAME FUNCTEST_MODE FUNCTEST_SUITE_NAME)
-source /root/openrc
-
-openstack --insecure network create --external \
- --provider-physical-network flat \
- --provider-network-type flat {{ external_network }}
-
-openstack --insecure subnet create --network {{ external_network }} \
- --allocation-pool {{ allocation_pool }} \
- --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
- --no-dhcp {{ subnet_name }}
-
-mkdir ~/results/
-mkdir ~/images && cd ~/images && wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img && cd ~
+# Create directory to store functest logs
+mkdir -p ~/results/
# Extract variables from xci.env file
if [[ -e /root/xci.env ]]; then
@@ -38,8 +27,50 @@ echo "------------- functest environment file --------------"
cat /root/env
echo "------------------------------------------------------"
+# we need to ensure the necessary environment variables are sourced
+source /root/env
+
+{% if 'os-' in deploy_scenario %}
+{# stuff needed for OpenStack based scenarios #}
+source /root/openrc
+
+openstack --insecure network create --external \
+ --provider-physical-network flat \
+ --provider-network-type flat {{ external_network }}
+
+openstack --insecure subnet create --network {{ external_network }} \
+ --allocation-pool {{ allocation_pool }} \
+ --subnet-range {{ subnet_cidr }} --gateway {{ gateway_ip }} \
+ --no-dhcp {{ subnet_name }}
+
+mkdir ~/images && cd ~/images && wget -q http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img && cd ~
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-${FUNCTEST_SUITE_NAME}"
+
sudo docker run --env-file env \
-v $(pwd)/openrc:/home/opnfv/functest/conf/env_file \
-v $(pwd)/images:/home/opnfv/functest/images \
-v $(pwd)/results:/home/opnfv/functest/results \
- opnfv/functest-healthcheck
+ $DOCKER_IMAGE_NAME
+{% else %}
+{# stuff needed for Kubernetes based scenarios #}
+# Create k8s.creds file for functest
+KUBE_MASTER_URL=$(grep -r server ~/.kube/config | awk '{print $2}')
+KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL | awk -F "[:/]" '{print $4}')
+cat << EOF > ~/k8s.creds
+KUBERNETES_PROVIDER=local
+KUBE_MASTER_URL=$KUBE_MASTER_URL
+KUBE_MASTER_IP=$KUBE_MASTER_IP
+EOF
+
+# docker image to use will be different for healthcheck and smoke test
+DOCKER_IMAGE_NAME="opnfv/functest-kubernetes-${FUNCTEST_SUITE_NAME}"
+
+sudo docker run --env-file env \
+ -v $(pwd)/k8s.creds:/home/opnfv/functest/conf/env_file \
+ -v $(pwd)/.kube/config:/root/.kube/config \
+ -v $(pwd)/results:/home/opnfv/functest/results \
+ $DOCKER_IMAGE_NAME
+{% endif %}
+
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
new file mode 100644
index 00000000..7646aefa
--- /dev/null
+++ b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: canal
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
new file mode 100644
index 00000000..5b2939f1
--- /dev/null
+++ b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"