aboutsummaryrefslogtreecommitdiffstats
path: root/scenarios
diff options
context:
space:
mode:
authorFatih Degirmenci <fdegir@gmail.com>2018-05-09 09:42:16 +0200
committerFatih Degirmenci <fdegir@gmail.com>2018-05-09 09:43:37 +0200
commitd013255d33d28ac70c6276a81a3129c83d441740 (patch)
treee080f84bda8d4c12f60353931749c453243f539a /scenarios
parent1a936625eaa2bb7ca6844202138118907b5bcb91 (diff)
Move generic scenarios into scenarios folder for alignment6.0.0
All the external scenarios are developed and maintained in corresponding project repos under scenarios folder. In order to handle generic and external scenarios in same way, we need to move generic scenarios into scenarios folder in this repo. Change-Id: I30064d3e3105a88279e40384a8efb888cc3bcc68 Signed-off-by: Fatih Degirmenci <fdegir@gmail.com>
Diffstat (limited to 'scenarios')
-rw-r--r--scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml292
-rw-r--r--scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml14
-rw-r--r--scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml292
-rw-r--r--scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml14
-rw-r--r--scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml292
-rw-r--r--scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml14
-rw-r--r--scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml292
-rw-r--r--scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep0
-rw-r--r--scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml14
-rw-r--r--scenarios/os-nosdn-nofeature/README.rst2
-rw-r--r--scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml255
-rw-r--r--scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml170
-rw-r--r--scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml172
-rw-r--r--scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml35
-rw-r--r--scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml18
-rw-r--r--scenarios/os-odl-nofeature/.gitkeep0
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml256
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml22
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml171
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml173
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml26
-rw-r--r--scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j245
-rw-r--r--scenarios/os-odl-nofeature/vars/main.yml2
-rw-r--r--scenarios/os-odl-nofeature/xci_overrides7
24 files changed, 2578 insertions, 0 deletions
diff --git a/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml b/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
new file mode 100644
index 0000000..20d3091
--- /dev/null
+++ b/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: calico
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml b/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
new file mode 100644
index 0000000..5b2939f
--- /dev/null
+++ b/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
new file mode 100644
index 0000000..7646aef
--- /dev/null
+++ b/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: canal
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
new file mode 100644
index 0000000..5b2939f
--- /dev/null
+++ b/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
new file mode 100644
index 0000000..3c3dc5d
--- /dev/null
+++ b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: flannel
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml
new file mode 100644
index 0000000..5efd7c8
--- /dev/null
+++ b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 taseer94@gmail.com & others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy the k8-cluster config file
+ copy:
+ src: k8-cluster.yml
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
new file mode 100644
index 0000000..614d784
--- /dev/null
+++ b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml
@@ -0,0 +1,292 @@
+# Valid bootstrap options (required): ubuntu, coreos, centos, none
+bootstrap_os: none
+
+#Directory where etcd data stored
+etcd_data_dir: /var/lib/etcd
+
+# Directory where the binaries will be installed
+bin_dir: /usr/local/bin
+
+## The access_ip variable is used to define how other nodes should access
+## the node. This is used in flannel to allow other flannel nodes to see
+## this node for example. The access_ip is really useful AWS and Google
+## environments where the nodes are accessed remotely by the "public" ip,
+## but don't know about that address themselves.
+#access_ip: 1.1.1.1
+
+### LOADBALANCING AND ACCESS MODES
+## Enable multiaccess to configure etcd clients to access all of the etcd members directly
+## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
+## This may be the case if clients support and loadbalance multiple etcd servers natively.
+#etcd_multiaccess: true
+
+## Internal loadbalancers for apiservers
+#loadbalancer_apiserver_localhost: true
+
+## Local loadbalancer should use this port instead, if defined.
+## Defaults to kube_apiserver_port (6443)
+#nginx_kube_apiserver_port: 8443
+
+### OTHER OPTIONAL VARIABLES
+## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
+## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
+## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
+## modules.
+# kubelet_load_modules: false
+
+## Internal network total size. This is the prefix of the
+## entire network. Must be unused in your environment.
+#kube_network_prefix: 18
+
+## With calico it is possible to distributed routes with border routers of the datacenter.
+## Warning : enabling router peering will disable calico's default behavior ('node mesh').
+## The subnets of each nodes will be distributed by the datacenter router
+#peer_with_router: false
+
+## Upstream dns servers used by dnsmasq
+#upstream_dns_servers:
+# - 8.8.8.8
+# - 8.8.4.4
+
+## There are some changes specific to the cloud providers
+## for instance we need to encapsulate packets with some network plugins
+## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
+## When openstack is used make sure to source in the openstack credentials
+## like you would do when using nova-client before starting the playbook.
+#cloud_provider:
+
+## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
+#openstack_blockstorage_version: "v1/v2/auto (default)"
+## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
+#openstack_lbaas_enabled: True
+#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
+#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
+#openstack_lbaas_create_monitor: "yes"
+#openstack_lbaas_monitor_delay: "1m"
+#openstack_lbaas_monitor_timeout: "30s"
+#openstack_lbaas_monitor_max_retries: "3"
+
+## Uncomment to enable experimental kubeadm deployment mode
+#kubeadm_enabled: false
+#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
+#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
+#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
+#
+## Set these proxy values in order to update package manager and docker daemon to use proxies
+#http_proxy: ""
+#https_proxy: ""
+## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
+#no_proxy: ""
+
+## Uncomment this if you want to force overlay/overlay2 as docker storage driver
+## Please note that overlay2 is only supported on newer kernels
+#docker_storage_options: -s overlay2
+
+# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
+#docker_dns_servers_strict: false
+
+## Default packages to install within the cluster, f.e:
+#kpm_packages:
+# - name: kube-system/grafana
+
+## Certificate Management
+## This setting determines whether certs are generated via scripts or whether a
+## cluster of Hashicorp's Vault is started to issue certificates (using etcd
+## as a backend). Options are "script" or "vault"
+#cert_management: script
+
+# Set to true to allow pre-checks to fail and continue deployment
+#ignore_assert_errors: false
+
+## Etcd auto compaction retention for mvcc key value store in hour
+#etcd_compaction_retention: 0
+
+## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
+#etcd_metrics: basic
+
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+kube_config_dir: /etc/kubernetes
+kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
+kube_manifest_dir: "{{ kube_config_dir }}/manifests"
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+# This is where all the cert scripts and certs will be located
+kube_cert_dir: "{{ kube_config_dir }}/ssl"
+
+# This is where all of the bearer tokens will be stored
+kube_token_dir: "{{ kube_config_dir }}/tokens"
+
+# This is where to save basic auth file
+kube_users_dir: "{{ kube_config_dir }}/users"
+
+kube_api_anonymous_auth: false
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+#kube_version: v1.9.0
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+# Random shifts for retrying failed ops like pushing/downloading
+retry_stagger: 5
+
+# This is the group that the cert creation scripts chgrp the
+# cert files to. Not really changable...
+kube_cert_group: kube-cert
+
+# Cluster Loglevel configuration
+kube_log_level: 2
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+#kube_token_auth: false
+
+
+## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
+## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
+
+# kube_oidc_url: https:// ...
+# kube_oidc_client_id: kubernetes
+## Optional settings for OIDC
+# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
+# kube_oidc_username_claim: sub
+# kube_oidc_groups_claim: groups
+
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: cloud
+
+# weave's network password for encryption
+# if null then no network encryption
+# you can use --extra-vars to pass the password in command line
+weave_password: EnterPasswordHere
+
+# Weave uses consensus mode by default
+# Enabling seed mode allow to dynamically add or remove hosts
+# https://www.weave.works/docs/net/latest/ipam/
+weave_mode_seed: false
+
+# This two variable are automatically changed by the weave's role, do not manually change these values
+# To reset values :
+# weave_seed: uninitialized
+# weave_peers: uninitialized
+weave_seed: uninitialized
+weave_peers: uninitialized
+
+# Enable kubernetes network policies
+enable_network_policy: false
+
+# Kubernetes internal network for services, unused block of space.
+kube_service_addresses: 10.233.0.0/18
+
+# internal network. When used, it will assign IP
+# addresses from this range to individual pods.
+# This network must be unused in your network infrastructure!
+kube_pods_subnet: 10.233.64.0/18
+
+# internal network node size allocation (optional). This is the size allocated
+# to each node on your network. With these defaults you should have
+# room for 4096 nodes with 254 pods per node.
+kube_network_node_prefix: 24
+
+# The port the API Server will be listening on.
+kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
+kube_apiserver_port: 6443 # (https)
+kube_apiserver_insecure_port: 8080 # (http)
+
+# DNS configuration.
+# Kubernetes cluster name, also will be used as DNS domain
+cluster_name: cluster.local
+# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
+ndots: 2
+# Can be dnsmasq_kubedns, kubedns or none
+dns_mode: kubedns
+# Can be docker_dns, host_resolvconf or none
+resolvconf_mode: docker_dns
+# Deploy netchecker app to verify DNS resolve as an HTTP service
+deploy_netchecker: false
+# Ip address of the kubernetes skydns service
+skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
+dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
+dns_domain: "{{ cluster_name }}"
+
+# Path used to store Docker data
+docker_daemon_graph: "/var/lib/docker"
+
+## A string of extra options to pass to the docker daemon.
+## This string should be exactly as you wish it to appear.
+## An obvious use case is allowing insecure-registry access
+## to self hosted registries like so:
+
+docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
+docker_bin_dir: "/usr/bin"
+
+# Settings for containerized control plane (etcd/kubelet/secrets)
+etcd_deployment_type: docker
+kubelet_deployment_type: host
+vault_deployment_type: docker
+helm_deployment_type: host
+
+# K8s image pull policy (imagePullPolicy)
+k8s_image_pull_policy: IfNotPresent
+
+# Kubernetes dashboard
+# RBAC required. see docs/getting-started.md for access details.
+dashboard_enabled: true
+
+# Monitoring apps for k8s
+efk_enabled: false
+
+# Helm deployment
+helm_enabled: false
+
+# Istio deployment
+istio_enabled: false
+
+# Local volume provisioner deployment
+local_volumes_enabled: false
+
+# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
+persistent_volumes_enabled: false
+
+# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
+kubeconfig_localhost: true
+# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
+kubectl_localhost: true
+artifacts_dir: "{{ ansible_env.HOME }}"
+
+# dnsmasq
+# dnsmasq_upstream_dns_servers:
+# - /resolvethiszone.with/10.0.4.250
+# - 8.8.8.8
+
+# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
+# kubelet_cgroups_per_qos: true
+
+# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
+# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
+# kubelet_enforce_node_allocatable: pods
+
+## Supplementary addresses that can be added in kubernetes ssl keys.
+## That can be usefull for example to setup a keepalived virtual IP
+# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep
diff --git a/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml
new file mode 100644
index 0000000..5b2939f
--- /dev/null
+++ b/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+- name: copy k8s-cluster.yml
+ copy:
+ src: "k8s-cluster.yml"
+ dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
diff --git a/scenarios/os-nosdn-nofeature/README.rst b/scenarios/os-nosdn-nofeature/README.rst
new file mode 100644
index 0000000..dcdc83f
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/README.rst
@@ -0,0 +1,2 @@
+This scenario is currently incomplete. In order for it to be
+complete, changes for CEPH must be moved here, combining OVS + CEPH.
diff --git a/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
new file mode 100644
index 0000000..1aaf84d
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml
@@ -0,0 +1,255 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+ - "172.29.236.222"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
new file mode 100644
index 0000000..86b87c1
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml
@@ -0,0 +1,170 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
new file mode 100644
index 0000000..99b768c
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml
@@ -0,0 +1,172 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
new file mode 100644
index 0000000..2f67854
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) 2017 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# Ensure the openvswitch kernel module is loaded
+openstack_host_specific_kernel_modules:
+ - name: "openvswitch"
+ pattern: "CONFIG_OPENVSWITCH"
+ group: "network_hosts"
+
+# neutron specific config
+neutron_plugin_type: ml2.ovs
+
+neutron_ml2_drivers_type: "flat,vlan,vxlan"
+
+neutron_provider_networks:
+ network_flat_networks: "*"
+ network_types: "vxlan"
+ network_vxlan_ranges: "1:1000" \ No newline at end of file
diff --git a/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
new file mode 100644
index 0000000..79aa3aa
--- /dev/null
+++ b/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: copy user_variables_os-nosdn-nofeature.yml
+ copy:
+ src: "user_variables_os-nosdn-nofeature.yml"
+ dest: "{{openstack_osa_etc_path}}/user_variables_os-nosdn-nofeature.yml"
+- name: copy os-nosdn-nofeature scenario specific openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-nofeature/.gitkeep b/scenarios/os-odl-nofeature/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scenarios/os-odl-nofeature/.gitkeep
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
new file mode 100644
index 0000000..2ca5a98
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml
@@ -0,0 +1,256 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+ - "172.29.236.222"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml
new file mode 100644
index 0000000..25cd683
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) 2017 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# Enable clustering for opendaylight
+cluster: true \ No newline at end of file
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
new file mode 100644
index 0000000..0f8ccd1
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml
@@ -0,0 +1,171 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
new file mode 100644
index 0000000..7ed9cd3
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml
@@ -0,0 +1,173 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml b/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
new file mode 100644
index 0000000..7e87278
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: copy user_variables_os-odl-nofeature.yml
+ template:
+ src: "user_variables_os-odl-nofeature.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature.yml"
+
+- name: copy user_variables_os-odl-nofeature-ha.yml
+ copy:
+ src: "{{xci_flavor}}/user_variables_os-odl-nofeature-ha.yml"
+ dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature-ha.yml"
+ when:
+ - xci_flavor == "ha"
+
+- name: copy os-odl-nofeature scenario specific openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 b/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
new file mode 100644
index 0000000..eb08adc
--- /dev/null
+++ b/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2
@@ -0,0 +1,45 @@
+---
+# Copyright (c) 2017 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+{% raw %}
+# Ensure the openvswitch kernel module is loaded
+openstack_host_specific_kernel_modules:
+ - name: "openvswitch"
+ pattern: "CONFIG_OPENVSWITCH"
+ group: "network_hosts"
+
+# Use OpenDaylight SDN Controller
+neutron_plugin_type: "ml2.opendaylight"
+neutron_opendaylight_conf_ini_overrides:
+ ml2_odl:
+ username: "admin"
+ password: "admin"
+ port_binding_controller: "pseudo-agentdb-binding"
+ url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron"
+
+neutron_ml2_drivers_type: "flat,vlan,vxlan"
+
+neutron_plugin_base:
+ - odl-router_v2
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-nofeature/vars/main.yml b/scenarios/os-odl-nofeature/vars/main.yml
new file mode 100644
index 0000000..629b50c
--- /dev/null
+++ b/scenarios/os-odl-nofeature/vars/main.yml
@@ -0,0 +1,2 @@
+---
+odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
diff --git a/scenarios/os-odl-nofeature/xci_overrides b/scenarios/os-odl-nofeature/xci_overrides
new file mode 100644
index 0000000..2c65df0
--- /dev/null
+++ b/scenarios/os-odl-nofeature/xci_overrides
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+if [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]] && [[ $XCI_FLAVOR == "ha" ]]; then
+ export VM_MEMORY_SIZE=20480
+elif [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]]; then
+ export VM_MEMORY_SIZE=16384
+fi