summaryrefslogtreecommitdiffstats
path: root/xci/installer
diff options
context:
space:
mode:
authorMarkos Chandras <mchandras@suse.de>2018-04-17 10:39:04 +0000
committerGerrit Code Review <gerrit@opnfv.org>2018-04-17 10:39:04 +0000
commitc02dddca973bbd3dd160c41000d4036363e94db0 (patch)
treeb4c7111a92a936d7b8099f0b9a9e57014c9eefde /xci/installer
parentaf5039cd7d74a5a7a8248c364bd5576946e5d451 (diff)
parenta045105f5b1fa4d0e11ee283eb44d02d39766d66 (diff)
Merge "xci: Add k8-nosdn-nofeature role"
Diffstat (limited to 'xci/installer')
-rwxr-xr-xxci/installer/kubespray/deploy.sh5
-rw-r--r--xci/installer/kubespray/files/k8s-cluster.yml292
-rw-r--r--xci/installer/kubespray/playbooks/configure-kubenet.yml50
-rw-r--r--xci/installer/kubespray/playbooks/configure-opnfvhost.yml5
4 files changed, 55 insertions, 297 deletions
diff --git a/xci/installer/kubespray/deploy.sh b/xci/installer/kubespray/deploy.sh
index 59d11055..1a0b34bc 100755
--- a/xci/installer/kubespray/deploy.sh
+++ b/xci/installer/kubespray/deploy.sh
@@ -79,6 +79,11 @@ ssh root@$OPNFV_HOST_IP "set -o pipefail; cd releng-xci/.cache/repos/kubespray;\
-i opnfv_inventory/inventory.cfg cluster.yml -b | tee setup-kubernetes.log"
scp root@$OPNFV_HOST_IP:~/releng-xci/.cache/repos/kubespray/setup-kubernetes.log \
$LOG_PATH/setup-kubernetes.log
+
+cd $K8_XCI_PLAYBOOKS
+ansible-playbook ${XCI_ANSIBLE_PARAMS} -e XCI_PATH="${XCI_PATH}" \
+ -i ${XCI_FLAVOR_ANSIBLE_FILE_PATH}/inventory/inventory.cfg \
+ configure-kubenet.yml
echo
echo "-----------------------------------------------------------------------"
echo "Info: Kubernetes installation is successfully completed!"
diff --git a/xci/installer/kubespray/files/k8s-cluster.yml b/xci/installer/kubespray/files/k8s-cluster.yml
deleted file mode 100644
index 20d3091d..00000000
--- a/xci/installer/kubespray/files/k8s-cluster.yml
+++ /dev/null
@@ -1,292 +0,0 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-## The access_ip variable is used to define how other nodes should access
-## the node. This is used in flannel to allow other flannel nodes to see
-## this node for example. The access_ip is really useful AWS and Google
-## environments where the nodes are accessed remotely by the "public" ip,
-## but don't know about that address themselves.
-#access_ip: 1.1.1.1
-
-### LOADBALANCING AND ACCESS MODES
-## Enable multiaccess to configure etcd clients to access all of the etcd members directly
-## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
-## This may be the case if clients support and loadbalance multiple etcd servers natively.
-#etcd_multiaccess: true
-
-## Internal loadbalancers for apiservers
-#loadbalancer_apiserver_localhost: true
-
-## Local loadbalancer should use this port instead, if defined.
-## Defaults to kube_apiserver_port (6443)
-#nginx_kube_apiserver_port: 8443
-
-### OTHER OPTIONAL VARIABLES
-## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
-## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
-## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
-## modules.
-# kubelet_load_modules: false
-
-## Internal network total size. This is the prefix of the
-## entire network. Must be unused in your environment.
-#kube_network_prefix: 18
-
-## With calico it is possible to distributed routes with border routers of the datacenter.
-## Warning : enabling router peering will disable calico's default behavior ('node mesh').
-## The subnets of each nodes will be distributed by the datacenter router
-#peer_with_router: false
-
-## Upstream dns servers used by dnsmasq
-#upstream_dns_servers:
-# - 8.8.8.8
-# - 8.8.4.4
-
-## There are some changes specific to the cloud providers
-## for instance we need to encapsulate packets with some network plugins
-## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
-## When openstack is used make sure to source in the openstack credentials
-## like you would do when using nova-client before starting the playbook.
-#cloud_provider:
-
-## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (https://github.com/kubernetes/kubernetes/issues/50461)
-#openstack_blockstorage_version: "v1/v2/auto (default)"
-## When OpenStack is used, if LBaaSv2 is available you can enable it with the following variables.
-#openstack_lbaas_enabled: True
-#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
-#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
-#openstack_lbaas_create_monitor: "yes"
-#openstack_lbaas_monitor_delay: "1m"
-#openstack_lbaas_monitor_timeout: "30s"
-#openstack_lbaas_monitor_max_retries: "3"
-
-## Uncomment to enable experimental kubeadm deployment mode
-#kubeadm_enabled: false
-#kubeadm_token_first: "{{ lookup('password', 'credentials/kubeadm_token_first length=6 chars=ascii_lowercase,digits') }}"
-#kubeadm_token_second: "{{ lookup('password', 'credentials/kubeadm_token_second length=16 chars=ascii_lowercase,digits') }}"
-#kubeadm_token: "{{ kubeadm_token_first }}.{{ kubeadm_token_second }}"
-#
-## Set these proxy values in order to update package manager and docker daemon to use proxies
-#http_proxy: ""
-#https_proxy: ""
-## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
-#no_proxy: ""
-
-## Uncomment this if you want to force overlay/overlay2 as docker storage driver
-## Please note that overlay2 is only supported on newer kernels
-#docker_storage_options: -s overlay2
-
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-#docker_dns_servers_strict: false
-
-## Default packages to install within the cluster, f.e:
-#kpm_packages:
-# - name: kube-system/grafana
-
-## Certificate Management
-## This setting determines whether certs are generated via scripts or whether a
-## cluster of Hashicorp's Vault is started to issue certificates (using etcd
-## as a backend). Options are "script" or "vault"
-#cert_management: script
-
-# Set to true to allow pre-checks to fail and continue deployment
-#ignore_assert_errors: false
-
-## Etcd auto compaction retention for mvcc key value store in hour
-#etcd_compaction_retention: 0
-
-## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
-#etcd_metrics: basic
-
-
-# Kubernetes configuration dirs and system namespace.
-# Those are where all the additional config stuff goes
-# kubernetes normally puts in /srv/kubernetes.
-# This puts them in a sane location and namespace.
-# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
-system_namespace: kube-system
-
-# Logging directory (sysvinit systems)
-kube_log_dir: "/var/log/kubernetes"
-
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
-kube_api_anonymous_auth: false
-
-## Change this to use another Kubernetes version, e.g. a current beta release
-#kube_version: v1.9.0
-
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
-# Cluster Loglevel configuration
-kube_log_level: 2
-
-# Users to create for basic auth in Kubernetes API via HTTP
-# Optionally add groups for user
-kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
-kube_users:
- kube:
- pass: "{{kube_api_pwd}}"
- role: admin
- groups:
- - system:masters
-
-## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
-#kube_oidc_auth: false
-kube_basic_auth: true
-#kube_token_auth: false
-
-
-## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
-## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
-
-# kube_oidc_url: https:// ...
-# kube_oidc_client_id: kubernetes
-## Optional settings for OIDC
-# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
-# kube_oidc_username_claim: sub
-# kube_oidc_groups_claim: groups
-
-
-# Choose network plugin (calico, contiv, weave or flannel)
-# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
-kube_network_plugin: calico
-
-# weave's network password for encryption
-# if null then no network encryption
-# you can use --extra-vars to pass the password in command line
-weave_password: EnterPasswordHere
-
-# Weave uses consensus mode by default
-# Enabling seed mode allow to dynamically add or remove hosts
-# https://www.weave.works/docs/net/latest/ipam/
-weave_mode_seed: false
-
-# This two variable are automatically changed by the weave's role, do not manually change these values
-# To reset values :
-# weave_seed: uninitialized
-# weave_peers: uninitialized
-weave_seed: uninitialized
-weave_peers: uninitialized
-
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
-kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-
-# Local volume provisioner deployment
-local_volumes_enabled: false
-
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
-
-# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
-kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: true
-artifacts_dir: "{{ ansible_env.HOME }}"
-
-# dnsmasq
-# dnsmasq_upstream_dns_servers:
-# - /resolvethiszone.with/10.0.4.250
-# - 8.8.8.8
-
-# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
-# kubelet_cgroups_per_qos: true
-
-# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
-# Acceptible options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
-# kubelet_enforce_node_allocatable: pods
-
-## Supplementary addresses that can be added in kubernetes ssl keys.
-## That can be usefull for example to setup a keepalived virtual IP
-# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
diff --git a/xci/installer/kubespray/playbooks/configure-kubenet.yml b/xci/installer/kubespray/playbooks/configure-kubenet.yml
new file mode 100644
index 00000000..1c3740b2
--- /dev/null
+++ b/xci/installer/kubespray/playbooks/configure-kubenet.yml
@@ -0,0 +1,50 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE LINUX GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# NOTE(hwoarang) Kubenet expects networking to be prepared by the administrator so it's necessary
+# to do that as part of the node configuration. All we need is to add static routes on every node
+# so cbr0 interfaces can talk to each other.
+- name: Prepare networking for kubenet
+ hosts: k8s-cluster
+ gather_facts: True
+ become: yes
+ vars_files:
+ - "{{ xci_path }}/xci/var/opnfv.yml"
+ tasks:
+ - name: Configure static routes
+ block:
+ - name: Collect cbr0 information from the nodes
+ set_fact:
+ kubenet_xci_static_routes: |-
+ {% set static_routes = [] %}
+ {% for host in groups['k8s-cluster']|select("ne", inventory_hostname) %}
+ {%- set _ = static_routes.append(
+ {'network': (hostvars[host]['ansible_cbr0']['ipv4']['network']+'/'+
+ hostvars[host]['ansible_cbr0']['ipv4']['netmask'])|ipaddr('net'),
+ 'gateway': hostvars[host]['ansible_default_ipv4']['address']}) -%}
+ {% endfor %}
+ {{ static_routes }}
+
+ - name: Add static routes on each node
+ shell: "ip route show | grep -q {{ item.network }} || ip route add {{ item.network }} via {{ item.gateway }}"
+ with_items: "{{ kubenet_xci_static_routes }}"
+ loop_control:
+ label: "{{ item.network }}"
+ when: deploy_scenario == 'k8-nosdn-nofeature'
+
+ - name: Ensure rp_filter is disabled on localhost
+ sysctl:
+ name: net.ipv4.conf.all.rp_filter
+ sysctl_set: yes
+ state: present
+ value: "{{ deploy_scenario == 'k8-nosdn-nofeature' | ternary(0, 1) }}"
+ reload: yes
+ delegate_to: localhost
+ run_once: True
diff --git a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
index 0ac18b50..697d44c2 100644
--- a/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
+++ b/xci/installer/kubespray/playbooks/configure-opnfvhost.yml
@@ -43,11 +43,6 @@
file:
path: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
state: directory
- - name: copy k8s_cluster.yml
- command: "cp -rf {{ remote_xci_path }}/xci/installer/kubespray/files/k8s-cluster.yml \
- {{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars"
- args:
- creates: "{{ remote_xci_path }}/.cache/repos/kubespray/opnfv_inventory/group_vars/k8s-cluster.yml"
- include: "{{ xci_path }}/xci/playbooks/bootstrap-scenarios.yml"
- name: Install required packages
package: