summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/roles/bmra-config/templates/all.j254
-rw-r--r--playbooks/roles/bmra-config/templates/inventory.j24
-rw-r--r--playbooks/roles/bmra-config/templates/kube-node.j2122
3 files changed, 148 insertions, 32 deletions
diff --git a/playbooks/roles/bmra-config/templates/all.j2 b/playbooks/roles/bmra-config/templates/all.j2
index 6dc074e..1dbabe2 100644
--- a/playbooks/roles/bmra-config/templates/all.j2
+++ b/playbooks/roles/bmra-config/templates/all.j2
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2020 Intel Corporation.
+## Copyright (c) 2020-2021 Intel Corporation.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -18,14 +18,15 @@
# Kubernetes version
kubernetes: true
-kube_version: v1.18.8
-#kube_version: v1.17.11
-#kube_version: v1.16.14
+#kube_version: v1.20.4
+kube_version: v1.19.8
+#kube_version: v1.18.16
# Run system-wide package update (apt dist-upgrade, yum update, ...)
# Note: enabling this may lead to unexpected results
# Tip: you can set this per host using host_vars
update_all_packages: false
+update_kernel: true
# Node Feature Discovery
nfd_enabled: {{ bmra.features.nfd }}
@@ -33,7 +34,7 @@ nfd_build_image_locally: false
nfd_namespace: kube-system
nfd_sleep_interval: 60s
-# Intel CPU Manager for Kubernetes
+# Intel CPU Manager for Kubernetes (CMK)
cmk_enabled: {{ bmra.features.cmk.enable }}
cmk_namespace: kube-system
cmk_use_all_hosts: false # 'true' will deploy CMK on the controller nodes too
@@ -42,19 +43,13 @@ cmk_shared_num_cores: {{ bmra.features.cmk.num_shared_cores }} # number of CPU c
cmk_exclusive_num_cores: {{ bmra.features.cmk.num_exclusive_cores }} # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
# cmk_shared_mode: packed # choose between: packed, spread, default: packed
# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
+autogenerate_isolcpus: {{ bmra.features.isolcpus.autogenerate }}
# Native CPU Manager (Kubernetes built-in)
-# Note: Enabling CMK and built-in CPU Manager is not recommended.
+# Note: Enabling CMK and built-in Native CPU Manager is NOT recommended.
# Setting this option as "true" enables the "static" policy, otherwise the default "none" policy is used.
+# The reserved CPU cores settings are individual per each worker node, and therefore are available to configure in the host_vars file
native_cpu_manager_enabled: false
-# Amount of CPU cores that will be reserved for the housekeeping (2000m = 2000 millicores = 2 cores)
-native_cpu_manager_system_reserved_cpus: 2000m
-# Amount of CPU cores that will be reserved for Kubelet
-native_cpu_manager_kube_reserved_cpus: 1000m
-# Explicit list of the CPUs reserved from pods scheduling.
-# Note: Supported only with kube_version 1.17 and newer, overrides native_cpu_manager_system_reserved_cpus and native_cpu_manager_kube_reserved_cpus.
-#native_cpu_manager_reserved_cpus: "0,1,2"
-# Note: All reamining unreserved CPU cores will be consumed by the workloads.
# Enable Kubernetes built-in Topology Manager
topology_manager_enabled: {{ bmra.features.topology_manager.enable }}
@@ -93,17 +88,37 @@ sriovdp_config_data: |
"devices": ["1889"],
"drivers": ["vfio-pci"]
}
+ },
+ {
+ "resourceName": "intel_fpga",
+ "deviceType": "accelerator",
+ "selectors": {
+ "vendors": ["8086"],
+ "devices": ["0d90"]
+ }
}
]
}
# Intel QAT Device Plugin for Kubernetes
-qat_dp_enabled: false
+qat_dp_enabled: {{ bmra.features.qat.enable }}
qat_dp_namespace: kube-system
+qat_dp_build_image_locally: true
# Intel GPU Device Plugin for Kubernetes
gpu_dp_enabled: false
gpu_dp_namespace: kube-system
+gpu_dp_build_image_locally: true
+
+# Intel SGX Device Plugin for Kubernetes
+sgx_dp_enabled: false
+sgx_dp_build_image_locally: true
+# ProvisionLimit is a number of containers that can share
+# the same SGX provision device.
+sgx_dp_provision_limit: 20
+# EnclaveLimit is a number of containers that can share the
+# same SGX enclave device.
+sgx_dp_enclave_limit: 20
# Intel Telemetry Aware Scheduling
tas_enabled: {{ bmra.features.tas.enable }}
@@ -120,7 +135,7 @@ example_net_attach_defs:
## Proxy configuration ##
#http_proxy: "http://proxy.example.com:1080"
#https_proxy: "http://proxy.example.com:1080"
-#additional_no_proxy: ".example.com"
+#additional_no_proxy: ".example.com,mirror_ip"
# (Ubuntu only) disables DNS stub listener which may cause issues on Ubuntu
dns_disable_stub_listener: false
@@ -138,9 +153,16 @@ kube_pods_subnet: 10.244.0.0/16
kube_service_addresses: 10.233.0.0/18
kube_proxy_mode: iptables
+# comment this line out if you want to expose k8s services of type nodePort externally.
+kube_proxy_nodeport_addresses_cidr: 127.0.0.0/8
+
# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed
helm_enabled: true
+# local Docker Hub mirror, if it exists
+#docker_registry_mirrors:
+# - http://mirror_ip:mirror_port
+
# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
registry_local_address: "localhost:30500"
diff --git a/playbooks/roles/bmra-config/templates/inventory.j2 b/playbooks/roles/bmra-config/templates/inventory.j2
index 6008179..7f6cde0 100644
--- a/playbooks/roles/bmra-config/templates/inventory.j2
+++ b/playbooks/roles/bmra-config/templates/inventory.j2
@@ -2,7 +2,7 @@
{% for node in nodes %}
{{ idf.kubespray.hostnames[node.name] }} ansible_host={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }} ip={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }}
{% endfor %}
-localhost ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_python_interpreter=/usr/bin/python2
{% for role in ['kube-master', 'etcd', 'kube-node'] %}
[{{ role }}]
@@ -19,7 +19,7 @@ kube-node
[calico-rr]
-{% if deployment_type == 'k8s' %}
+{% if os_distro|lower != 'centos7' %}
[all:vars]
ansible_python_interpreter=/usr/bin/python3
{% endif %}
diff --git a/playbooks/roles/bmra-config/templates/kube-node.j2 b/playbooks/roles/bmra-config/templates/kube-node.j2
index f32fbdb..09a6958 100644
--- a/playbooks/roles/bmra-config/templates/kube-node.j2
+++ b/playbooks/roles/bmra-config/templates/kube-node.j2
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2020 Intel Corporation.
+## Copyright (c) 2020-2021 Intel Corporation.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -20,33 +20,44 @@
iommu_enabled: {{ bmra.features.sriov.enable }}
# dataplane interface configuration list
+{% if bmra.network_roles.sriov is defined or bmra.network_roles.sriov_dpdk is defined %}
dataplane_interfaces:
{% for intf in bmra.network_roles.sriov %}
- name: {{ intf.name }}
- bus_info: "{{ intf.bus_info }}"
- device_info: "{{ intf.device_info }}"
+ bus_info: "{{ intf.pci }}"
+ pf_driver: {{ intf.pf_driver }}
+ default_vf_driver: "{{ intf.vf_driver }}"
sriov_numvfs: {{ bmra.features.sriov.sriov_vfs_per_port }}
- vf_driver: {{ intf.driver }}
{% endfor %}
{% for intf in bmra.network_roles.sriov_dpdk %}
- name: {{ intf.name }}
- bus_info: "{{ intf.bus_info }}"
- device_info: "{{ intf.device_info }}"
+ bus_info: "{{ intf.pci }}"
+ pf_driver: {{ intf.pf_driver }}
+ default_vf_driver: "{{ intf.vf_driver }}"
sriov_numvfs: {{ bmra.features.sriov.sriov_dpdk_vfs_per_port }}
- vf_driver: {{ intf.driver }}
{% endfor %}
+{% else %}
+dataplane_interface: []
+{% endif %}
sriov_cni_enabled: {{ bmra.features.sriov_cni }}
# Bond CNI
-bond_cni_enabled: false
+bond_cni_enabled: {{ bmra.features.bond_cni }}
# Install DPDK (required for SR-IOV networking)
install_dpdk: true
+# DPDK version
+dpdk_version: "19.11.6"
+
+# Custom DPDK patches local path
+# dpdk_local_patches_dir: "/tmp/patches/dpdk-19.11.6"
+
# Userspace networking
userspace_cni_enabled: false
ovs_dpdk_enabled: false # Should be enabled with Userspace CNI, when VPP is set to "false"; 1G hugepages required
+ovs_version: "v2.13.0"
# CPU mask for OVS-DPDK PMD threads
ovs_dpdk_lcore_mask: 0x1
# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes
@@ -69,9 +80,41 @@ firmware_update_nics: []
# install Intel x700 & x800 series NICs DDP packages
install_ddp_packages: false
-
-# set how many VFs per single QAT device PF should be created
-qat_sriov_numvfs: 16
+# set 'true' to enable custom ddp package to be loaded after reboot
+enable_ice_systemd_service: false
+# Enabling this feature will install QAT drivers + services
+update_qat_drivers: {{ bmra.features.qat.update_drivers }}
+
+# qat interface configuration list
+{% if bmra.device_roles.qat is defined %}
+qat_devices:
+{% for dev in bmra.device_roles.qat %}
+ - qat_dev: {{ dev.name }}
+ qat_id: "{{ dev.pci }}"
+ qat_module_type: {{ dev.mod_type }}
+ qat_pci_type: {{ dev.pci_type }}
+ qat_sriov_numvfs: {{ dev.vfs }}
+{% endfor %}
+{% else %}
+qat_devices: []
+{% endif %}
+# - qat_dev: crypto01 # Device name as separate QAT Symmetric Crypto devices on which qat_sriov_numvfs will be distributed
+# qat_id: "0000:ab:00.0" # Device QAT id one using DPDK compatible driver for VF devices to be used by vfio-pci kernel driver, replace as required
+# qat_module_type: qat_c62x # QAT Crypto Poll Mode Kernel Module supported are qat_dh895xcc,qat_c62x,qat_c3xxx,qat_200xx,qat_c4xxx,qat_d15xx
+# qat_pci_type: c6xx # QAT Crypto Poll Mode Pci Driver id supported are dh895xcc,c6xx,c3xxx,d15xx,200xx & c4xxx
+# qat_sriov_numvfs: 12 # set how many VFs per qat_id to be created such as c6xxvf support 32 so per device will be 10+10+12=32, replace as required
+ # Note: If don't want to assign VFs to id leave it as 0 such as qat_sriov_numvfs: 0
+# - qat_dev: crypto02
+# qat_id: "0000:xy:00.0"
+# qat_module_type: qat_c62x
+# qat_pci_type: c6xx
+# qat_sriov_numvfs: 10
+
+# - qat_dev: crypto03
+# qat_id: "0000:yz:00.0"
+# qat_module_type: qat_c62x
+# qat_pci_type: c6xx
+# qat_sriov_numvfs: 10
# Enables hugepages support
hugepages_enabled: {{ bmra.features.hugepages.enable }}
@@ -85,10 +128,19 @@ hugepages_2M: {{ bmra.features.hugepages.hugepages_2M }}
# CPU isolation from Linux scheduler
isolcpus_enabled: {{ bmra.features.isolcpus.enable }}
-isolcpus: "{{ bmra.features.isolcpus.cpus }}" # Update to match group_vars requested exclusive/shared cores
+# Disable CMKs autogenerate_isolcpus in group_vars to set custom isolcpus range; Otherwise this range will be automatically generated
+# If defining range and using CMK you must account group_vars requested exclusive/shared cores
+isolcpus: "{{ bmra.features.isolcpus.cpus }}"
-# Max number of processors to support (physical & logical cores)
-cpu_count: 144
+# Native CPU Manager (Kubernetes built-in)
+
+native_cpu_manager_system_reserved_cpus: 2000m
+# Amount of CPU cores that will be reserved for Kubelet
+native_cpu_manager_kube_reserved_cpus: 1000m
+# Explicit list of the CPUs reserved from pods scheduling.
+# Note: Supported only with kube_version 1.17 and newer, overrides native_cpu_manager_system_reserved_cpus and native_cpu_manager_kube_reserved_cpus.
+#native_cpu_manager_reserved_cpus: "0,1,2"
+# Note: All reamining unreserved CPU cores will be consumed by the workloads.
# Enable/Disable Intel PState scaling driver
intel_pstate_enabled: true
@@ -109,9 +161,51 @@ sst_bf_configuration_enabled: false
# [r] Revert cores to min/Turbo (set min/max to 800/3900)
clx_sst_bf_mode: s
+# Intel Speed Select Base-Frequency configuration for Ice Lake (ICX) Platforms.
+# [true] Enable Intel Speed Select Base Frequency (SST-BF)
+# [false] Disable Intel Speed Select Base Frequency (SST-BF)
+# Requires `sst_bf_configuration_enabled` variable to be 'true'
+icx_sst_bf_enabled: false
+# Prioritze (SST-CP) power flow to high frequency cores in case of CPU power constraints.
+icx_sst_bf_with_core_priority: false
+
+# SST CP config
+# Variables are only examples.
+# For more information, please visit:
+# https://www.kernel.org/doc/html/latest/admin-guide/pm/intel-speed-select.html#enable-clos-based-prioritization
+# Enabling this configuration overrides `icx_sst_bf_with_core_priority`.
+sst_cp_configuration_enabled: false
+sst_cp_priority_type: 0 # 0 - Proportional, 1 - Ordered
+sst_cp_clos_groups: # configure up to 4 CLOS groups
+ - id: 0
+ frequency_weight: 0 # used only with Proportional type
+ min_MHz: 0
+ max_MHz: 25500
+ - id: 1
+ frequency_weight: 0 # used only with Proportional type
+ min_MHz: 0
+ max_MHz: 25500
+ - id: 2
+ frequency_weight: 0 # used only with Proportional type
+ min_MHz: 0
+ max_MHz: 25500
+ - id: 3
+ frequency_weight: 0 # used only with Proportional type
+ min_MHz: 0
+ max_MHz: 25500
+sst_cp_cpu_clos:
+ - clos: 0
+ cpus: 1,2,4..6,8-10
+ - clos: 1
+ cpus: 3,7
+
+
# (CentOS 7 only) install real time kernel and related packages for flexran
install_real_time_package: false
+# Intel Software Guard Extensions (SGX)
+sgx_enabled: false
+
# Telemetry configuration
# intel_pmu plugin collects information provided by Linux perf interface.
enable_intel_pmu_plugin: false