diff options
32 files changed, 22 insertions, 2600 deletions
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh index 1a0b34bc..02a9d430 100755 --- a/xci/installer/kubespray/deploy.sh +++ b/xci/installer/kubespray/deploy.sh @@ -75,13 +75,13 @@ fi echo "Info: Using kubespray to deploy the kubernetes cluster" echo "-----------------------------------------------------------------------" ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/kubespray;\ - ansible-playbook ${XCI_ANSIBLE_PARAMS} \ + ansible-playbook \ -i opnfv_inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log" scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \ $LOG_PATH/setup-kubernetes.log cd $K8_XCI_PLAYBOOKS -ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \ +ansible-playbook ${XCI_ANSIBLE_PARAMS} \ -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \ configure-kubenet.yml echo diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml index 1c3740b2..3b1cb013 100644 --- a/xci/installer/kubespray/playbooks/configure-kubenet.yml +++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml @@ -13,6 +13,7 @@ # so cbr0 interfaces can talk to each other. - name: Prepare networking for kubenet hosts: k8s-cluster + remote_user: root gather_facts: True become: yes vars_files: diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml index 7626b949..01904ba3 100644 --- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml +++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml @@ -56,7 +56,7 @@ - name: Install required packages package: - name: "{{ kube_require_packages[ansible_pkg_mgr] }}" + name: "{{ (ansible_pkg_mgr == 'zypper') | ternary('dbus-1', 'dbus') }}" state: present update_cache: "{{ (ansible_pkg_mgr == 'apt') | ternary('yes', omit) }}" when: xci_flavor == 'aio' diff --git a/xci/installer/osa/deploy.sh b/xci/installer/osa/deploy.sh index 6dada3f5..b356dda5 100755 --- a/xci/installer/osa/deploy.sh +++ b/xci/installer/osa/deploy.sh @@ -91,7 +91,7 @@ fi #------------------------------------------------------------------------------- echo "Info: Setting up target hosts for openstack-ansible" echo "-----------------------------------------------------------------------" -ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \ +ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-hosts.yml | tee setup-hosts.log " scp root@$OPNFV_HOST_IP:~/setup-hosts.log $LOG_PATH/setup-hosts.log echo "-----------------------------------------------------------------------" @@ -113,7 +113,7 @@ echo "Info: Set up target hosts for openstack-ansible successfuly" echo "Info: Gathering facts" echo "-----------------------------------------------------------------------" ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/openstack-ansible/playbooks; \ - ansible ${XCI_ANSIBLE_PARAMS} -m setup -a gather_subset=network,hardware,virtual all" + ansible -m setup -a gather_subset=network,hardware,virtual all" echo "-----------------------------------------------------------------------" #------------------------------------------------------------------------------- @@ -124,7 +124,7 @@ echo "-----------------------------------------------------------------------" echo "Info: Setting up infrastructure" echo "-----------------------------------------------------------------------" echo "xci: running ansible playbook setup-infrastructure.yml" -ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \ +ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-infrastructure.yml | tee setup-infrastructure.log" scp root@$OPNFV_HOST_IP:~/setup-infrastructure.log $LOG_PATH/setup-infrastructure.log echo "-----------------------------------------------------------------------" @@ -153,7 +153,7 @@ echo "Info: Database cluster verification successful!" #------------------------------------------------------------------------------- echo "Info: Installing OpenStack on target hosts" echo "-----------------------------------------------------------------------" -ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible ${XCI_ANSIBLE_PARAMS} \ +ssh root@$OPNFV_HOST_IP "set -o pipefail; openstack-ansible \ releng-xci/.cache/repos/openstack-ansible/playbooks/setup-openstack.yml | tee opnfv-setup-openstack.log" scp root@$OPNFV_HOST_IP:~/opnfv-setup-openstack.log $LOG_PATH/opnfv-setup-openstack.log echo "-----------------------------------------------------------------------" diff --git a/xci/installer/osa/files/ansible-role-requirements.yml b/xci/installer/osa/files/ansible-role-requirements.yml index 21f6840b..f631c839 100644 --- a/xci/installer/osa/files/ansible-role-requirements.yml +++ b/xci/installer/osa/files/ansible-role-requirements.yml @@ -200,7 +200,7 @@ - name: opendaylight scm: git src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight - version: 1f0f943499dcdd28a1b6971992c46bb4513ce8fb + version: 9d5951c39da7722c71632a10ec53e7ab93b8ac9b - name: haproxy_endpoints scm: git src: https://github.com/logan2211/ansible-haproxy-endpoints diff --git a/xci/opnfv-scenario-requirements.yml b/xci/opnfv-scenario-requirements.yml index 925789a9..f61bc021 100644 --- a/xci/opnfv-scenario-requirements.yml +++ b/xci/opnfv-scenario-requirements.yml @@ -28,9 +28,9 @@ - scenario: os-nosdn-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature + role: scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature installers: - installer: osa flavors: @@ -44,9 +44,9 @@ - scenario: os-odl-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/os-odl-nofeature/role/os-odl-nofeature + role: scenarios/os-odl-nofeature/role/os-odl-nofeature installers: - installer: osa flavors: @@ -59,9 +59,9 @@ - scenario: k8-nosdn-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature + role: scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature installers: - installer: kubespray flavors: @@ -91,9 +91,9 @@ - scenario: k8-canal-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature + role: scenarios/k8-canal-nofeature/role/k8-canal-nofeature installers: - installer: kubespray flavors: @@ -108,9 +108,9 @@ - scenario: k8-calico-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature + role: scenarios/k8-calico-nofeature/role/k8-calico-nofeature installers: - installer: kubespray flavors: @@ -125,9 +125,9 @@ - scenario: k8-flannel-nofeature scm: git - src: https://gerrit.opnfv.org/gerrit/releng-xci + src: https://gerrit.opnfv.org/gerrit/releng-xci-scenarios version: master - role: xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature + role: scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature installers: - installer: kubespray flavors: diff --git a/xci/scenarios/README.rst b/xci/scenarios/README.rst deleted file mode 100644 index 5d9bdf06..00000000 --- a/xci/scenarios/README.rst +++ /dev/null @@ -1 +0,0 @@ -This folder keeps the roles for the generic scenarios. diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml deleted file mode 100644 index 20d3091d..00000000 --- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/files/k8s-cluster.yml +++ /dev/null @@ -1,292 +0,0 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -#access_ip: 1.1.1.1 - -### LOADBALANCING AND ACCESS MODES -## Enable multiaccess to configure etcd clients to access all of the etcd members directly -## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. -#etcd_multiaccess: true - -## Internal loadbalancers for apiservers -#loadbalancer_apiserver_localhost: true - -## Local loadbalancer should use this port instead, if defined. -## Defaults to kube_apiserver_port (6443) -#nginx_kube_apiserver_port: 8443 - -### OTHER OPTIONAL VARIABLES -## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed -## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes -## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel -## modules. -# kubelet_load_modules: false - -## Internal network total size. This is the prefix of the -## entire network. Must be unused in your environment. -#kube_network_prefix: 18 - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -#peer_with_router: false - -## Upstream dns servers used by dnsmasq -#upstream_dns_servers: -# - 8.8.8.8 -# - 8.8.4.4 - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using nova-client before starting the playbook. -#cloud_provider: - -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461) -#openstack_blockstorage_version: "v1/v2/auto (default)" -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables. -#openstack_lbaas_enabled: True -#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -#openstack_lbaas_create_monitor: "yes" -#openstack_lbaas_monitor_delay: "1m" -#openstack_lbaas_monitor_timeout: "30s" -#openstack_lbaas_monitor_max_retries: "3" - -## Uncomment to enable experimental kubeadm deployment mode -#kubeadm_enabled: false -#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" -#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" -#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" -# -## Set these proxy values in order to update package manager and docker daemon to use proxies -#http_proxy: "" -#https_proxy: "" -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -#no_proxy: "" - -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -#docker_storage_options: -s overlay2 - -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -#docker_dns_servers_strict: false - -## Default packages to install within the cluster, f.e: -#kpm_packages: -# - name: kube-system/grafana - -## Certificate Management -## This setting determines whether certs are generated via scripts or whether a -## cluster of Hashicorp's Vault is started to issue certificates (using etcd -## as a backend). Options are "script" or "vault" -#cert_management: script - -# Set to true to allow pre-checks to fail and continue deployment -#ignore_assert_errors: false - -## Etcd auto compaction retention for mvcc key value store in hour -#etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -#etcd_metrics: basic - - -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -kube_api_anonymous_auth: false - -## Change this to use another Kubernetes version, e.g. a current beta release -#kube_version: v1.9.0 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -# Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - groups: - - system:masters - -## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) -#kube_oidc_auth: false -kube_basic_auth: true -#kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem -# kube_oidc_username_claim: sub -# kube_oidc_groups_claim: groups - - -# Choose network plugin (calico, contiv, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: calico - -# weave's network password for encryption -# if null then no network encryption -# you can use --extra-vars to pass the password in command line -weave_password: EnterPasswordHere - -# Weave uses consensus mode by default -# Enabling seed mode allow to dynamically add or remove hosts -# https://www.weave.works/docs/net/latest/ipam/ -weave_mode_seed: false - -# This two variable are automatically changed by the weave's role, do not manually change these values -# To reset values : -# weave_seed: uninitialized -# weave_peers: uninitialized -weave_seed: uninitialized -weave_peers: uninitialized - -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false - -# Local volume provisioner deployment -local_volumes_enabled: false - -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false - -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts -kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: true -artifacts_dir: "{{ ansible_env.HOME }}" - -# dnsmasq -# dnsmasq_upstream_dns_servers: -# - /resolvethiszone.with/10.0.4.250 -# - 8.8.8.8 - -# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) -# kubelet_cgroups_per_qos: true - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be usefull for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] diff --git a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml b/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml deleted file mode 100644 index 5b2939f1..00000000 --- a/xci/scenarios/k8-calico-nofeature/role/k8-calico-nofeature/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- - -- name: copy k8s-cluster.yml - copy: - src: "k8s-cluster.yml" - dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml" diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml deleted file mode 100644 index 7646aefa..00000000 --- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/files/k8s-cluster.yml +++ /dev/null @@ -1,292 +0,0 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -#access_ip: 1.1.1.1 - -### LOADBALANCING AND ACCESS MODES -## Enable multiaccess to configure etcd clients to access all of the etcd members directly -## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. -#etcd_multiaccess: true - -## Internal loadbalancers for apiservers -#loadbalancer_apiserver_localhost: true - -## Local loadbalancer should use this port instead, if defined. -## Defaults to kube_apiserver_port (6443) -#nginx_kube_apiserver_port: 8443 - -### OTHER OPTIONAL VARIABLES -## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed -## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes -## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel -## modules. -# kubelet_load_modules: false - -## Internal network total size. This is the prefix of the -## entire network. Must be unused in your environment. -#kube_network_prefix: 18 - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -#peer_with_router: false - -## Upstream dns servers used by dnsmasq -#upstream_dns_servers: -# - 8.8.8.8 -# - 8.8.4.4 - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using nova-client before starting the playbook. -#cloud_provider: - -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461) -#openstack_blockstorage_version: "v1/v2/auto (default)" -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables. -#openstack_lbaas_enabled: True -#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -#openstack_lbaas_create_monitor: "yes" -#openstack_lbaas_monitor_delay: "1m" -#openstack_lbaas_monitor_timeout: "30s" -#openstack_lbaas_monitor_max_retries: "3" - -## Uncomment to enable experimental kubeadm deployment mode -#kubeadm_enabled: false -#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" -#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" -#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" -# -## Set these proxy values in order to update package manager and docker daemon to use proxies -#http_proxy: "" -#https_proxy: "" -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -#no_proxy: "" - -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -#docker_storage_options: -s overlay2 - -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -#docker_dns_servers_strict: false - -## Default packages to install within the cluster, f.e: -#kpm_packages: -# - name: kube-system/grafana - -## Certificate Management -## This setting determines whether certs are generated via scripts or whether a -## cluster of Hashicorp's Vault is started to issue certificates (using etcd -## as a backend). Options are "script" or "vault" -#cert_management: script - -# Set to true to allow pre-checks to fail and continue deployment -#ignore_assert_errors: false - -## Etcd auto compaction retention for mvcc key value store in hour -#etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -#etcd_metrics: basic - - -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -kube_api_anonymous_auth: false - -## Change this to use another Kubernetes version, e.g. a current beta release -#kube_version: v1.9.0 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -# Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - groups: - - system:masters - -## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) -#kube_oidc_auth: false -kube_basic_auth: true -#kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem -# kube_oidc_username_claim: sub -# kube_oidc_groups_claim: groups - - -# Choose network plugin (calico, contiv, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: canal - -# weave's network password for encryption -# if null then no network encryption -# you can use --extra-vars to pass the password in command line -weave_password: EnterPasswordHere - -# Weave uses consensus mode by default -# Enabling seed mode allow to dynamically add or remove hosts -# https://www.weave.works/docs/net/latest/ipam/ -weave_mode_seed: false - -# This two variable are automatically changed by the weave's role, do not manually change these values -# To reset values : -# weave_seed: uninitialized -# weave_peers: uninitialized -weave_seed: uninitialized -weave_peers: uninitialized - -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false - -# Local volume provisioner deployment -local_volumes_enabled: false - -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false - -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts -kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: true -artifacts_dir: "{{ ansible_env.HOME }}" - -# dnsmasq -# dnsmasq_upstream_dns_servers: -# - /resolvethiszone.with/10.0.4.250 -# - 8.8.8.8 - -# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) -# kubelet_cgroups_per_qos: true - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be usefull for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] diff --git a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml b/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml deleted file mode 100644 index 5b2939f1..00000000 --- a/xci/scenarios/k8-canal-nofeature/role/k8-canal-nofeature/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- - -- name: copy k8s-cluster.yml - copy: - src: "k8s-cluster.yml" - dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml" diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml b/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml deleted file mode 100644 index 3c3dc5d9..00000000 --- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml +++ /dev/null @@ -1,292 +0,0 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -#access_ip: 1.1.1.1 - -### LOADBALANCING AND ACCESS MODES -## Enable multiaccess to configure etcd clients to access all of the etcd members directly -## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. -#etcd_multiaccess: true - -## Internal loadbalancers for apiservers -#loadbalancer_apiserver_localhost: true - -## Local loadbalancer should use this port instead, if defined. -## Defaults to kube_apiserver_port (6443) -#nginx_kube_apiserver_port: 8443 - -### OTHER OPTIONAL VARIABLES -## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed -## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes -## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel -## modules. -# kubelet_load_modules: false - -## Internal network total size. This is the prefix of the -## entire network. Must be unused in your environment. -#kube_network_prefix: 18 - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -#peer_with_router: false - -## Upstream dns servers used by dnsmasq -#upstream_dns_servers: -# - 8.8.8.8 -# - 8.8.4.4 - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using nova-client before starting the playbook. -#cloud_provider: - -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461) -#openstack_blockstorage_version: "v1/v2/auto (default)" -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables. -#openstack_lbaas_enabled: True -#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -#openstack_lbaas_create_monitor: "yes" -#openstack_lbaas_monitor_delay: "1m" -#openstack_lbaas_monitor_timeout: "30s" -#openstack_lbaas_monitor_max_retries: "3" - -## Uncomment to enable experimental kubeadm deployment mode -#kubeadm_enabled: false -#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" -#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" -#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" -# -## Set these proxy values in order to update package manager and docker daemon to use proxies -#http_proxy: "" -#https_proxy: "" -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -#no_proxy: "" - -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -#docker_storage_options: -s overlay2 - -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -#docker_dns_servers_strict: false - -## Default packages to install within the cluster, f.e: -#kpm_packages: -# - name: kube-system/grafana - -## Certificate Management -## This setting determines whether certs are generated via scripts or whether a -## cluster of Hashicorp's Vault is started to issue certificates (using etcd -## as a backend). Options are "script" or "vault" -#cert_management: script - -# Set to true to allow pre-checks to fail and continue deployment -#ignore_assert_errors: false - -## Etcd auto compaction retention for mvcc key value store in hour -#etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -#etcd_metrics: basic - - -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -kube_api_anonymous_auth: false - -## Change this to use another Kubernetes version, e.g. a current beta release -#kube_version: v1.9.0 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -# Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - groups: - - system:masters - -## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) -#kube_oidc_auth: false -kube_basic_auth: true -#kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem -# kube_oidc_username_claim: sub -# kube_oidc_groups_claim: groups - - -# Choose network plugin (calico, contiv, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: flannel - -# weave's network password for encryption -# if null then no network encryption -# you can use --extra-vars to pass the password in command line -weave_password: EnterPasswordHere - -# Weave uses consensus mode by default -# Enabling seed mode allow to dynamically add or remove hosts -# https://www.weave.works/docs/net/latest/ipam/ -weave_mode_seed: false - -# This two variable are automatically changed by the weave's role, do not manually change these values -# To reset values : -# weave_seed: uninitialized -# weave_peers: uninitialized -weave_seed: uninitialized -weave_peers: uninitialized - -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false - -# Local volume provisioner deployment -local_volumes_enabled: false - -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false - -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts -kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: true -artifacts_dir: "{{ ansible_env.HOME }}" - -# dnsmasq -# dnsmasq_upstream_dns_servers: -# - /resolvethiszone.with/10.0.4.250 -# - 8.8.8.8 - -# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) -# kubelet_cgroups_per_qos: true - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be usefull for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] diff --git a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml b/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml deleted file mode 100644 index 5efd7c83..00000000 --- a/xci/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Copyright (c) 2018 taseer94@gmail.com & others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- - -- name: copy the k8-cluster config file - copy: - src: k8-cluster.yml - dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml" diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml deleted file mode 100644 index 614d784e..00000000 --- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/files/k8s-cluster.yml +++ /dev/null @@ -1,292 +0,0 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -#access_ip: 1.1.1.1 - -### LOADBALANCING AND ACCESS MODES -## Enable multiaccess to configure etcd clients to access all of the etcd members directly -## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. -#etcd_multiaccess: true - -## Internal loadbalancers for apiservers -#loadbalancer_apiserver_localhost: true - -## Local loadbalancer should use this port instead, if defined. -## Defaults to kube_apiserver_port (6443) -#nginx_kube_apiserver_port: 8443 - -### OTHER OPTIONAL VARIABLES -## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed -## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes -## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel -## modules. -# kubelet_load_modules: false - -## Internal network total size. This is the prefix of the -## entire network. Must be unused in your environment. -#kube_network_prefix: 18 - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -#peer_with_router: false - -## Upstream dns servers used by dnsmasq -#upstream_dns_servers: -# - 8.8.8.8 -# - 8.8.4.4 - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using nova-client before starting the playbook. -#cloud_provider: - -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461) -#openstack_blockstorage_version: "v1/v2/auto (default)" -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables. -#openstack_lbaas_enabled: True -#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -#openstack_lbaas_create_monitor: "yes" -#openstack_lbaas_monitor_delay: "1m" -#openstack_lbaas_monitor_timeout: "30s" -#openstack_lbaas_monitor_max_retries: "3" - -## Uncomment to enable experimental kubeadm deployment mode -#kubeadm_enabled: false -#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" -#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" -#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" -# -## Set these proxy values in order to update package manager and docker daemon to use proxies -#http_proxy: "" -#https_proxy: "" -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -#no_proxy: "" - -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -#docker_storage_options: -s overlay2 - -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -#docker_dns_servers_strict: false - -## Default packages to install within the cluster, f.e: -#kpm_packages: -# - name: kube-system/grafana - -## Certificate Management -## This setting determines whether certs are generated via scripts or whether a -## cluster of Hashicorp's Vault is started to issue certificates (using etcd -## as a backend). Options are "script" or "vault" -#cert_management: script - -# Set to true to allow pre-checks to fail and continue deployment -#ignore_assert_errors: false - -## Etcd auto compaction retention for mvcc key value store in hour -#etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -#etcd_metrics: basic - - -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -kube_api_anonymous_auth: false - -## Change this to use another Kubernetes version, e.g. a current beta release -#kube_version: v1.9.0 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -# Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - groups: - - system:masters - -## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) -#kube_oidc_auth: false -kube_basic_auth: true -#kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem -# kube_oidc_username_claim: sub -# kube_oidc_groups_claim: groups - - -# Choose network plugin (calico, contiv, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: cloud - -# weave's network password for encryption -# if null then no network encryption -# you can use --extra-vars to pass the password in command line -weave_password: EnterPasswordHere - -# Weave uses consensus mode by default -# Enabling seed mode allow to dynamically add or remove hosts -# https://www.weave.works/docs/net/latest/ipam/ -weave_mode_seed: false - -# This two variable are automatically changed by the weave's role, do not manually change these values -# To reset values : -# weave_seed: uninitialized -# weave_peers: uninitialized -weave_seed: uninitialized -weave_peers: uninitialized - -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false - -# Local volume provisioner deployment -local_volumes_enabled: false - -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false - -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts -kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: true -artifacts_dir: "{{ ansible_env.HOME }}" - -# dnsmasq -# dnsmasq_upstream_dns_servers: -# - /resolvethiszone.with/10.0.4.250 -# - 8.8.8.8 - -# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) -# kubelet_cgroups_per_qos: true - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be usefull for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep deleted file mode 100644 index e69de29b..00000000 --- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/.gitkeep +++ /dev/null diff --git a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml b/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml deleted file mode 100644 index 5b2939f1..00000000 --- a/xci/scenarios/k8-nosdn-nofeature/role/k8-nosdn-nofeature/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ -############################################################################## -# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## ---- - -- name: copy k8s-cluster.yml - copy: - src: "k8s-cluster.yml" - dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml" diff --git a/xci/scenarios/os-nosdn-nofeature/README.rst b/xci/scenarios/os-nosdn-nofeature/README.rst deleted file mode 100644 index dcdc83fc..00000000 --- a/xci/scenarios/os-nosdn-nofeature/README.rst +++ /dev/null @@ -1,2 +0,0 @@ -This scenario is currently incomplete. In order for it to be -complete, changes for CEPH must be moved here, combining OVS + CEPH. diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml deleted file mode 100644 index 1aaf84d8..00000000 --- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/ha/openstack_user_config.yml +++ /dev/null @@ -1,255 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - - "172.29.236.222" - -global_overrides: - internal_lb_vip_address: 172.29.236.222 - external_lb_vip_address: 192.168.122.220 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - type: "vlan" - range: "1:1" - net_name: "vlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - controller01: - ip: 172.29.236.12 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - controller02: - ip: 172.29.236.13 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.14 - compute01: - ip: 172.29.236.15 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" - controller01: - ip: 172.29.236.12 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" - controller02: - ip: 172.29.236.13 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml deleted file mode 100644 index 86b87c15..00000000 --- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/mini/openstack_user_config.yml +++ /dev/null @@ -1,170 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - -global_overrides: - internal_lb_vip_address: 172.29.236.11 - external_lb_vip_address: 192.168.122.3 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - type: "vlan" - range: "1:1" - net_name: "vlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.12" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.12 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.12" - share: "/volumes" diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml deleted file mode 100644 index 99b768c4..00000000 --- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/noha/openstack_user_config.yml +++ /dev/null @@ -1,172 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - -global_overrides: - internal_lb_vip_address: 172.29.236.11 - external_lb_vip_address: 192.168.122.3 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - type: "vlan" - range: "1:1" - net_name: "vlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.12" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.12 - compute01: - ip: 172.29.236.13 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.12" - share: "/volumes" diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml deleted file mode 100644 index 2f678544..00000000 --- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/files/user_variables_os-nosdn-nofeature.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Copyright (c) 2017 Ericsson AB and others. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ## -# ## This file contains commonly used overrides for convenience. Please inspect -# ## the defaults for each role to find additional override options. -# ## - -# Ensure the openvswitch kernel module is loaded -openstack_host_specific_kernel_modules: - - name: "openvswitch" - pattern: "CONFIG_OPENVSWITCH" - group: "network_hosts" - -# neutron specific config -neutron_plugin_type: ml2.ovs - -neutron_ml2_drivers_type: "flat,vlan,vxlan" - -neutron_provider_networks: - network_flat_networks: "*" - network_types: "vxlan" - network_vxlan_ranges: "1:1000"
\ No newline at end of file diff --git a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml b/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml deleted file mode 100644 index 79aa3aa1..00000000 --- a/xci/scenarios/os-nosdn-nofeature/role/os-nosdn-nofeature/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# SPDX-license-identifier: Apache-2.0 -############################################################################## -# Copyright (c) 2017 Ericsson AB and others. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -- name: copy user_variables_os-nosdn-nofeature.yml - copy: - src: "user_variables_os-nosdn-nofeature.yml" - dest: "{{openstack_osa_etc_path}}/user_variables_os-nosdn-nofeature.yml" -- name: copy os-nosdn-nofeature scenario specific openstack_user_config.yml - copy: - src: "{{xci_flavor}}/openstack_user_config.yml" - dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml" diff --git a/xci/scenarios/os-odl-nofeature/.gitkeep b/xci/scenarios/os-odl-nofeature/.gitkeep deleted file mode 100644 index e69de29b..00000000 --- a/xci/scenarios/os-odl-nofeature/.gitkeep +++ /dev/null diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml deleted file mode 100644 index 2ca5a987..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/openstack_user_config.yml +++ /dev/null @@ -1,256 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - - "172.29.236.222" - -global_overrides: - internal_lb_vip_address: 172.29.236.222 - external_lb_vip_address: 192.168.122.220 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - host_bind_override: "eth12" - type: "vlan" - range: "102:199" - net_name: "physnet1" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - controller01: - ip: 172.29.236.12 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - controller02: - ip: 172.29.236.13 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.14" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - controller01: - ip: 172.29.236.12 - controller02: - ip: 172.29.236.13 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.14 - compute01: - ip: 172.29.236.15 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" - controller01: - ip: 172.29.236.12 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" - controller02: - ip: 172.29.236.13 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.14" - share: "/volumes" diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml deleted file mode 100644 index 25cd6839..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/ha/user_variables_os-odl-nofeature-ha.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Copyright (c) 2017 Ericsson AB and others. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ## -# ## This file contains commonly used overrides for convenience. Please inspect -# ## the defaults for each role to find additional override options. -# ## - -# Enable clustering for opendaylight -cluster: true
\ No newline at end of file diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml deleted file mode 100644 index 0f8ccd18..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/mini/openstack_user_config.yml +++ /dev/null @@ -1,171 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - -global_overrides: - internal_lb_vip_address: 172.29.236.11 - external_lb_vip_address: 192.168.122.3 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - host_bind_override: "eth12" - type: "vlan" - range: "102:199" - net_name: "physnet1" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.12" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.12 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.12" - share: "/volumes" diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml deleted file mode 100644 index 7ed9cd32..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/files/noha/openstack_user_config.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -cidr_networks: - container: 172.29.236.0/22 - tunnel: 172.29.240.0/22 - storage: 172.29.244.0/22 - -used_ips: - - "172.29.236.1,172.29.236.50" - - "172.29.240.1,172.29.240.50" - - "172.29.244.1,172.29.244.50" - - "172.29.248.1,172.29.248.50" - -global_overrides: - internal_lb_vip_address: 172.29.236.11 - external_lb_vip_address: 192.168.122.3 - tunnel_bridge: "br-vxlan" - management_bridge: "br-mgmt" - provider_networks: - - network: - container_bridge: "br-mgmt" - container_type: "veth" - container_interface: "eth1" - ip_from_q: "container" - type: "raw" - group_binds: - - all_containers - - hosts - is_container_address: true - is_ssh_address: true - - network: - container_bridge: "br-vxlan" - container_type: "veth" - container_interface: "eth10" - ip_from_q: "tunnel" - type: "vxlan" - range: "1:1000" - net_name: "vxlan" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth12" - host_bind_override: "eth12" - type: "flat" - net_name: "flat" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-vlan" - container_type: "veth" - container_interface: "eth11" - host_bind_override: "eth12" - type: "vlan" - range: "102:199" - net_name: "physnet1" - group_binds: - - neutron_openvswitch_agent - - network: - container_bridge: "br-storage" - container_type: "veth" - container_interface: "eth2" - ip_from_q: "storage" - type: "raw" - group_binds: - - glance_api - - cinder_api - - cinder_volume - - nova_compute - -# ## -# ## Infrastructure -# ## - -# galera, memcache, rabbitmq, utility -shared-infra_hosts: - controller00: - ip: 172.29.236.11 - -# repository (apt cache, python packages, etc) -repo-infra_hosts: - controller00: - ip: 172.29.236.11 - -# load balancer -# Ideally the load balancer should not use the Infrastructure hosts. -# Dedicated hardware is best for improved performance and security. -haproxy_hosts: - controller00: - ip: 172.29.236.11 - -# rsyslog server -# log_hosts: -# log1: -# ip: 172.29.236.14 - -# ## -# ## OpenStack -# ## - -# keystone -identity_hosts: - controller00: - ip: 172.29.236.11 - -# cinder api services -storage-infra_hosts: - controller00: - ip: 172.29.236.11 - -# glance -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -image_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - limit_container_types: glance - glance_nfs_client: - - server: "172.29.244.12" - remote_path: "/images" - local_path: "/var/lib/glance/images" - type: "nfs" - options: "_netdev,auto" - -# nova api, conductor, etc services -compute-infra_hosts: - controller00: - ip: 172.29.236.11 - -# heat -orchestration_hosts: - controller00: - ip: 172.29.236.11 - -# horizon -dashboard_hosts: - controller00: - ip: 172.29.236.11 - -# neutron server, agents (L3, etc) -network_hosts: - controller00: - ip: 172.29.236.11 - -# nova hypervisors -compute_hosts: - compute00: - ip: 172.29.236.12 - compute01: - ip: 172.29.236.13 - -# cinder volume hosts (NFS-backed) -# The settings here are repeated for each infra host. -# They could instead be applied as global settings in -# user_variables, but are left here to illustrate that -# each container could have different storage targets. -storage_hosts: - controller00: - ip: 172.29.236.11 - container_vars: - cinder_backends: - limit_container_types: cinder_volume - nfs_volume: - volume_backend_name: NFS_VOLUME1 - volume_driver: cinder.volume.drivers.nfs.NfsDriver - nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" - nfs_shares_config: /etc/cinder/nfs_shares - shares: - - ip: "172.29.244.12" - share: "/volumes" diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml deleted file mode 100644 index 7e872787..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/tasks/main.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# SPDX-license-identifier: Apache-2.0 -############################################################################## -# Copyright (c) 2017 Ericsson AB and others. -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -- name: copy user_variables_os-odl-nofeature.yml - template: - src: "user_variables_os-odl-nofeature.yml.j2" - dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature.yml" - -- name: copy user_variables_os-odl-nofeature-ha.yml - copy: - src: "{{xci_flavor}}/user_variables_os-odl-nofeature-ha.yml" - dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-nofeature-ha.yml" - when: - - xci_flavor == "ha" - -- name: copy os-odl-nofeature scenario specific openstack_user_config.yml - copy: - src: "{{xci_flavor}}/openstack_user_config.yml" - dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml" diff --git a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 b/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 deleted file mode 100644 index eb08adc0..00000000 --- a/xci/scenarios/os-odl-nofeature/role/os-odl-nofeature/templates/user_variables_os-odl-nofeature.yml.j2 +++ /dev/null @@ -1,45 +0,0 @@ ---- -# Copyright (c) 2017 Ericsson AB and others. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ## -# ## This file contains commonly used overrides for convenience. Please inspect -# ## the defaults for each role to find additional override options. -# ## - -{% raw %} -# Ensure the openvswitch kernel module is loaded -openstack_host_specific_kernel_modules: - - name: "openvswitch" - pattern: "CONFIG_OPENVSWITCH" - group: "network_hosts" - -# Use OpenDaylight SDN Controller -neutron_plugin_type: "ml2.opendaylight" -neutron_opendaylight_conf_ini_overrides: - ml2_odl: - username: "admin" - password: "admin" - port_binding_controller: "pseudo-agentdb-binding" - url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron" - -neutron_ml2_drivers_type: "flat,vlan,vxlan" - -neutron_plugin_base: - - odl-router_v2 -{% endraw %} - -{% if odl_repo_version is defined %} -odl_version: "{{ odl_repo_version }}" -{% endif %} diff --git a/xci/scenarios/os-odl-nofeature/vars/main.yml b/xci/scenarios/os-odl-nofeature/vars/main.yml deleted file mode 100644 index 629b50c7..00000000 --- a/xci/scenarios/os-odl-nofeature/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -odl_repo_version: "{{ lookup('env','ODL_VERSION') }}" diff --git a/xci/scenarios/os-odl-nofeature/xci_overrides b/xci/scenarios/os-odl-nofeature/xci_overrides deleted file mode 100644 index 2c65df0d..00000000 --- a/xci/scenarios/os-odl-nofeature/xci_overrides +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]] && [[ $XCI_FLAVOR == "ha" ]]; then - export VM_MEMORY_SIZE=20480 -elif [[ $DEPLOY_SCENARIO == "os-odl-nofeature" ]]; then - export VM_MEMORY_SIZE=16384 -fi diff --git a/xci/scripts/vm/start-new-vm.sh b/xci/scripts/vm/start-new-vm.sh index f266d64f..8f6effe0 100755 --- a/xci/scripts/vm/start-new-vm.sh +++ b/xci/scripts/vm/start-new-vm.sh @@ -118,7 +118,7 @@ fi COMMON_DISTRO_PKGS=(vim strace gdb htop dnsmasq docker iptables ebtables virt-manager qemu-kvm) case ${ID,,} in - *suse) + *suse*) pkg_mgr_cmd="sudo zypper -q -n ref" pkg_mgr_cmd+=" && sudo zypper -q -n install ${COMMON_DISTRO_PKGS[@]} qemu-tools libvirt-daemon libvirt-client libvirt-daemon-driver-qemu" ;; |