1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
---
## BMRA master playbook variables ##
# Node Feature Discovery
nfd_enabled: false
nfd_build_image_locally: false
nfd_namespace: kube-system
nfd_sleep_interval: 30s
# Intel CPU Manager for Kubernetes
cmk_enabled: false
cmk_namespace: kube-system
cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too
cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes
cmk_shared_num_cores: 2 # number of CPU cores to be assigned to the "shared" pool on each of the nodes
cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
# cmk_shared_mode: packed # choose between: packed, spread, default: packed
# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
# Intel SRIOV Network Device Plugin
sriov_net_dp_enabled: false
sriov_net_dp_namespace: kube-system
# whether to build and store image locally or use one from public external registry
sriov_net_dp_build_image_locally: false
# SR-IOV network device plugin configuration.
# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations
sriovdp_config_data: |
{
"resourceList": [{
"resourceName": "intel_sriov_netdevice",
"selectors": {
"vendors": ["8086"],
"devices": ["154c", "10ed"],
"drivers": ["iavf", "i40evf", "ixgbevf"]
}
},
{
"resourceName": "intel_sriov_dpdk",
"selectors": {
"vendors": ["8086"],
"devices": ["154c", "10ed"],
"drivers": ["vfio-pci"]
}
}
]
}
# Intel Device Plugins for Kubernetes
qat_dp_enabled: false
qat_dp_namespace: kube-system
gpu_dp_enabled: false
gpu_dp_namespace: kube-system
# Intel Telemetry Aware Scheduling
tas_enabled: false
tas_namespace: default
# create default TAS policy: [true, false]
tas_create_policy: false
# Create reference net-attach-def objects
example_net_attach_defs:
userspace_ovs_dpdk: false
userspace_vpp: false
sriov_net_dp: false
## Proxy configuration ##
# http_proxy: "http://proxy.example.com:1080"
# https_proxy: "http://proxy.example.com:1080"
# additional_no_proxy: ".example.com"
# Topology Manager flags
kubelet_node_custom_flags:
- "--feature-gates=TopologyManager=true"
- "--topology-manager-policy=none"
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
## Kubespray variables ##
# default network plugins and kube-proxy configuration
kube_network_plugin_multus: true
multus_version: v3.3
kube_network_plugin: flannel
kube_pods_subnet: 10.244.0.0/16
kube_service_addresses: 10.233.0.0/18
kube_proxy_mode: iptables
# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed
helm_enabled: true
# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
registry_enabled: true
registry_storage_class: ""
registry_local_address: "localhost:5000"
|