diff options
Diffstat (limited to 'scenarios/k8-flannel-nofeature')
-rw-r--r-- | scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml | 292 | ||||
-rw-r--r-- | scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml | 17 |
2 files changed, 11 insertions, 298 deletions
diff --git a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml deleted file mode 100644 index 3c3dc5d..0000000 --- a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/files/k8-cluster.yml +++ /dev/null @@ -1,292 +0,0 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -## The access_ip variable is used to define how other nodes should access -## the node. This is used in flannel to allow other flannel nodes to see -## this node for example. The access_ip is really useful AWS and Google -## environments where the nodes are accessed remotely by the "public" ip, -## but don't know about that address themselves. -#access_ip: 1.1.1.1 - -### LOADBALANCING AND ACCESS MODES -## Enable multiaccess to configure etcd clients to access all of the etcd members directly -## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. -#etcd_multiaccess: true - -## Internal loadbalancers for apiservers -#loadbalancer_apiserver_localhost: true - -## Local loadbalancer should use this port instead, if defined. -## Defaults to kube_apiserver_port (6443) -#nginx_kube_apiserver_port: 8443 - -### OTHER OPTIONAL VARIABLES -## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed -## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes -## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel -## modules. -# kubelet_load_modules: false - -## Internal network total size. This is the prefix of the -## entire network. Must be unused in your environment. -#kube_network_prefix: 18 - -## With calico it is possible to distributed routes with border routers of the datacenter. -## Warning : enabling router peering will disable calico's default behavior ('node mesh'). -## The subnets of each nodes will be distributed by the datacenter router -#peer_with_router: false - -## Upstream dns servers used by dnsmasq -#upstream_dns_servers: -# - 8.8.8.8 -# - 8.8.4.4 - -## There are some changes specific to the cloud providers -## for instance we need to encapsulate packets with some network plugins -## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external' -## When openstack is used make sure to source in the openstack credentials -## like you would do when using nova-client before starting the playbook. -#cloud_provider: - -## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461) -#openstack_blockstorage_version: "v1/v2/auto (default)" -## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables. -#openstack_lbaas_enabled: True -#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" -#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" -#openstack_lbaas_create_monitor: "yes" -#openstack_lbaas_monitor_delay: "1m" -#openstack_lbaas_monitor_timeout: "30s" -#openstack_lbaas_monitor_max_retries: "3" - -## Uncomment to enable experimental kubeadm deployment mode -#kubeadm_enabled: false -#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}" -#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}" -#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}" -# -## Set these proxy values in order to update package manager and docker daemon to use proxies -#http_proxy: "" -#https_proxy: "" -## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy -#no_proxy: "" - -## Uncomment this if you want to force overlay/overlay2 as docker storage driver -## Please note that overlay2 is only supported on newer kernels -#docker_storage_options: -s overlay2 - -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -#docker_dns_servers_strict: false - -## Default packages to install within the cluster, f.e: -#kpm_packages: -# - name: kube-system/grafana - -## Certificate Management -## This setting determines whether certs are generated via scripts or whether a -## cluster of Hashicorp's Vault is started to issue certificates (using etcd -## as a backend). Options are "script" or "vault" -#cert_management: script - -# Set to true to allow pre-checks to fail and continue deployment -#ignore_assert_errors: false - -## Etcd auto compaction retention for mvcc key value store in hour -#etcd_compaction_retention: 0 - -## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. -#etcd_metrics: basic - - -# Kubernetes configuration dirs and system namespace. -# Those are where all the additional config stuff goes -# kubernetes normally puts in /srv/kubernetes. -# This puts them in a sane location and namespace. -# Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system - -# Logging directory (sysvinit systems) -kube_log_dir: "/var/log/kubernetes" - -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - -kube_api_anonymous_auth: false - -## Change this to use another Kubernetes version, e.g. a current beta release -#kube_version: v1.9.0 - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -# Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - groups: - - system:masters - -## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) -#kube_oidc_auth: false -kube_basic_auth: true -#kube_token_auth: false - - -## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ -## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) - -# kube_oidc_url: https:// ... -# kube_oidc_client_id: kubernetes -## Optional settings for OIDC -# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem -# kube_oidc_username_claim: sub -# kube_oidc_groups_claim: groups - - -# Choose network plugin (calico, contiv, weave or flannel) -# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing -kube_network_plugin: flannel - -# weave's network password for encryption -# if null then no network encryption -# you can use --extra-vars to pass the password in command line -weave_password: EnterPasswordHere - -# Weave uses consensus mode by default -# Enabling seed mode allow to dynamically add or remove hosts -# https://www.weave.works/docs/net/latest/ipam/ -weave_mode_seed: false - -# This two variable are automatically changed by the weave's role, do not manually change these values -# To reset values : -# weave_seed: uninitialized -# weave_peers: uninitialized -weave_seed: uninitialized -weave_peers: uninitialized - -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker -kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false - -# Local volume provisioner deployment -local_volumes_enabled: false - -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false - -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts -kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: true -artifacts_dir: "{{ ansible_env.HOME }}" - -# dnsmasq -# dnsmasq_upstream_dns_servers: -# - /resolvethiszone.with/10.0.4.250 -# - 8.8.8.8 - -# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true) -# kubelet_cgroups_per_qos: true - -# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. -# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". -# kubelet_enforce_node_allocatable: pods - -## Supplementary addresses that can be added in kubernetes ssl keys. -## That can be usefull for example to setup a keepalived virtual IP -# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] diff --git a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml index 5efd7c8..fa63a9c 100644 --- a/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml +++ b/scenarios/k8-flannel-nofeature/role/k8-flannel-nofeature/tasks/main.yml @@ -1,14 +1,19 @@ +--- ############################################################################## -# Copyright (c) 2018 taseer94@gmail.com & others. +# Copyright (c) 2019 Ericsson Software Technology and others. +# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## ---- -- name: copy the k8-cluster config file - copy: - src: k8-cluster.yml - dest: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml" +# set networking plugin to flannel +- name: Set network plugin to flannel + lineinfile: + path: "{{ remote_xci_path }}/.cache/repos/kubespray/inventory/opnfv/group_vars/k8s-cluster/k8s-cluster.yml" + regexp: "^kube_network_plugin:.*" + line: "kube_network_plugin: flannel" + +# vim: set ts=2 sw=2 expandtab: |