summaryrefslogtreecommitdiffstats
path: root/playbooks/roles/bmra-config/templates/all.j2
blob: 1dbabe2ff7810f0fcca5f2ef6d8bd83d2638c65d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
##
##   Copyright (c) 2020-2021 Intel Corporation.
##
##   Licensed under the Apache License, Version 2.0 (the "License");
##   you may not use this file except in compliance with the License.
##   You may obtain a copy of the License at
##
##       http://www.apache.org/licenses/LICENSE-2.0
##
##   Unless required by applicable law or agreed to in writing, software
##   distributed under the License is distributed on an "AS IS" BASIS,
##   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##   See the License for the specific language governing permissions and
##   limitations under the License.
##
---
## BMRA primary playbook variables ##

# Kubernetes version
kubernetes: true
#kube_version: v1.20.4
kube_version: v1.19.8
#kube_version: v1.18.16

# Run system-wide package update (apt dist-upgrade, yum update, ...)
# Note: enabling this may lead to unexpected results
# Tip: you can set this per host using host_vars
update_all_packages: false
update_kernel: true

# Node Feature Discovery
nfd_enabled: {{ bmra.features.nfd }}
nfd_build_image_locally: false
nfd_namespace: kube-system
nfd_sleep_interval: 60s

# Intel CPU Manager for Kubernetes (CMK)
cmk_enabled: {{ bmra.features.cmk.enable }}
cmk_namespace: kube-system
cmk_use_all_hosts: false # 'true' will deploy CMK on the controller nodes too
#cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes
cmk_shared_num_cores: {{ bmra.features.cmk.num_shared_cores }} # number of CPU cores to be assigned to the "shared" pool on each of the nodes
cmk_exclusive_num_cores: {{ bmra.features.cmk.num_exclusive_cores }} # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
# cmk_shared_mode: packed # choose between: packed, spread, default: packed
# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
autogenerate_isolcpus: {{ bmra.features.isolcpus.autogenerate }}

# Native CPU Manager (Kubernetes built-in)
# Note: Enabling CMK and built-in Native CPU Manager is NOT recommended.
# Setting this option as "true" enables the "static" policy, otherwise the default "none" policy is used.
# The reserved CPU cores settings are individual per each worker node, and therefore are available to configure in the host_vars file
native_cpu_manager_enabled: false

# Enable Kubernetes built-in Topology Manager
topology_manager_enabled: {{ bmra.features.topology_manager.enable }}
# There are four supported policies: none, best-effort, restricted, single-numa-node.
topology_manager_policy: "{{ bmra.features.topology_manager.policy }}"

# Intel SRIOV Network Device Plugin
sriov_net_dp_enabled: {{ bmra.features.sriov_net_dp }}
sriov_net_dp_namespace: kube-system
# whether to build and store image locally or use one from public external registry
sriov_net_dp_build_image_locally: true
# SR-IOV network device plugin configuration.
# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations
sriovdp_config_data: |
    {
        "resourceList": [{
                "resourceName": "intel_sriov_netdevice",
                "selectors": {
                    "vendors": ["8086"],
                    "devices": ["154c", "10ed", "1889"],
                    "drivers": ["iavf", "ixgbevf"]
                }
            },
            {
                "resourceName": "intel_sriov_dpdk_700_series",
                "selectors": {
                    "vendors": ["8086"],
                    "devices": ["154c", "10ed"],
                    "drivers": ["vfio-pci"]
                }
            },
            {
                "resourceName": "intel_sriov_dpdk_800_series",
                "selectors": {
                    "vendors": ["8086"],
                    "devices": ["1889"],
                    "drivers": ["vfio-pci"]
                }
            },
            {
                "resourceName": "intel_fpga",
                "deviceType": "accelerator",
                "selectors": {
                    "vendors": ["8086"],
                    "devices": ["0d90"]
                }
            }
        ]
    }

# Intel QAT Device Plugin for Kubernetes
qat_dp_enabled: {{ bmra.features.qat.enable }}
qat_dp_namespace: kube-system
qat_dp_build_image_locally: true

# Intel GPU Device Plugin for Kubernetes
gpu_dp_enabled: false
gpu_dp_namespace: kube-system
gpu_dp_build_image_locally: true

# Intel SGX Device Plugin for Kubernetes
sgx_dp_enabled: false
sgx_dp_build_image_locally: true
# ProvisionLimit is a number of containers that can share
# the same SGX provision device.
sgx_dp_provision_limit: 20
# EnclaveLimit is a number of containers that can share the
# same SGX enclave device.
sgx_dp_enclave_limit: 20

# Intel Telemetry Aware Scheduling
tas_enabled: {{ bmra.features.tas.enable }}
tas_namespace: monitoring
# create and enable TAS demonstration policy: [true, false]
tas_enable_demo_policy: {{ bmra.features.tas.demo_policy }}

# Create reference net-attach-def objects
example_net_attach_defs:
  userspace_ovs_dpdk: false # Update to match host_vars CNI configuration
  userspace_vpp: false # Update to match host_vars CNI configuration
  sriov_net_dp: {{ bmra.features.sriov_net_dp }} # Update to match host_vars CNI configuration

## Proxy configuration ##
#http_proxy: "http://proxy.example.com:1080"
#https_proxy: "http://proxy.example.com:1080"
#additional_no_proxy: ".example.com,mirror_ip"

# (Ubuntu only) disables DNS stub listener which may cause issues on Ubuntu
dns_disable_stub_listener: false

# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local

## Kubespray variables ##

# default network plugins and kube-proxy configuration
kube_network_plugin_multus: true
multus_version: v3.4.2
kube_network_plugin: flannel
kube_pods_subnet: 10.244.0.0/16
kube_service_addresses: 10.233.0.0/18
kube_proxy_mode: iptables

# comment this line out if you want to expose k8s services of type nodePort externally.
kube_proxy_nodeport_addresses_cidr: 127.0.0.0/8

# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed
helm_enabled: true

# local Docker Hub mirror, if it exists
#docker_registry_mirrors:
#  - http://mirror_ip:mirror_port

# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
registry_local_address: "localhost:30500"

# Enable Pod Security Policy. This option enables PSP admission controller and creates minimal set of rules.
psp_enabled: {{ bmra.features.psp }}

# Set image pull policy to Always. Pulls images prior to starting containers. Valid credentials must be configured.
always_pull_enabled: true

# Telemetry configuration
collectd_scrap_interval: 30