summaryrefslogtreecommitdiffstats
path: root/playbooks/roles/bmra-config
diff options
context:
space:
mode:
authorMichael S. Pedersen <michaelx.pedersen@intel.com>2021-09-22 13:35:35 +0200
committerRihab Banday <rihab.banday@ericsson.com>2021-10-15 08:59:44 +0000
commitcb5c652c37763043e695f123808a3ee9c512689d (patch)
treeffeea72f22d73c61123537e003b78061e2520b76 /playbooks/roles/bmra-config
parent4c6fe2cf6e1bbb279dcf5698cff315740ea6d8e8 (diff)
Updates Kuberef to use BMRA v21.08
Updates BMRA tag to v21.08 Updates Kuberef templates for BMRA configuration Updates idf.yml (hw_config) to match changes to BMRA Adds new and updated BMRA patches Fixes a few bugs in functions.sh Signed-off-by: Michael S. Pedersen <michaelx.pedersen@intel.com> Change-Id: Ifbd4d60af76dac45e49145f00cbeb90de0f7c719 Reviewed-on: https://gerrit.opnfv.org/gerrit/c/kuberef/+/72927 Tested-by: jenkins-ci <jenkins-opnfv-ci@opnfv.org> Reviewed-by: Rihab Banday <rihab.banday@ericsson.com>
Diffstat (limited to 'playbooks/roles/bmra-config')
-rw-r--r--playbooks/roles/bmra-config/templates/all.j242
-rw-r--r--playbooks/roles/bmra-config/templates/kube-node.j249
2 files changed, 56 insertions, 35 deletions
diff --git a/playbooks/roles/bmra-config/templates/all.j2 b/playbooks/roles/bmra-config/templates/all.j2
index 1eca556..440b373 100644
--- a/playbooks/roles/bmra-config/templates/all.j2
+++ b/playbooks/roles/bmra-config/templates/all.j2
@@ -9,9 +9,12 @@ SPDX-License-Identifier: Apache-2.0
# Kubernetes version
kubernetes: true
-#kube_version: v1.20.4
-kube_version: v1.19.8
-#kube_version: v1.18.16
+kube_version: v1.21.1
+#kube_version: v1.20.6
+#kube_version: v1.19.8
+
+# Kubernetes container runtime: docker, containerd
+container_runtime: {{ bmra.runtime }}
# Run system-wide package update (apt dist-upgrade, yum update, ...)
# Note: enabling this may lead to unexpected results
@@ -47,6 +50,10 @@ topology_manager_enabled: {{ bmra.features.topology_manager.enable }}
# There are four supported policies: none, best-effort, restricted, single-numa-node.
topology_manager_policy: "{{ bmra.features.topology_manager.policy }}"
+# OpenShift SRIOV Network Operator
+sriov_network_operator_enabled: false
+sriov_network_operator_namespace: "sriov-network-operator"
+
# Intel SRIOV Network Device Plugin
sriov_net_dp_enabled: {{ bmra.features.sriov_net_dp }}
sriov_net_dp_namespace: kube-system
@@ -91,19 +98,26 @@ sriovdp_config_data: |
]
}
+# Intel Device Plugin Operator
+intel_dp_namespace: kube-system # namespace will be applied for SGX DP and GPU DP
+
# Intel QAT Device Plugin for Kubernetes
qat_dp_enabled: {{ bmra.features.qat.enable }}
qat_dp_namespace: kube-system
qat_dp_build_image_locally: true
+# This feature will enable OpenSSL*Engine
+openssl_engine_enabled: false # To activate OpenSSL*Engine, install_openssl & update_qat_drivers must set to ‘true’ in host_vars
+
# Intel GPU Device Plugin for Kubernetes
gpu_dp_enabled: false
-gpu_dp_namespace: kube-system
+gpu_dp_kernel_version: "5.4.48+"
gpu_dp_build_image_locally: true
# Intel SGX Device Plugin for Kubernetes
sgx_dp_enabled: false
sgx_dp_build_image_locally: true
+sgx_aesmd_namespace: kube-system
# ProvisionLimit is a number of containers that can share
# the same SGX provision device.
sgx_dp_provision_limit: 20
@@ -111,6 +125,16 @@ sgx_dp_provision_limit: 20
# same SGX enclave device.
sgx_dp_enclave_limit: 20
+# KMRA (Key Management Reference Application)
+kmra_enabled: false
+# The PCCS uses this API key to request collaterals from Intel's Provisioning Certificate Service.
+# User needs to subscribe first to obtain an API key.
+# For how to subscribe to Intel Provisioning Certificate Service and receive an API key,
+# goto https://api.portal.trustedservices.intel.com/provisioning-certification and click on 'Subscribe'.
+kmra_pccs_api_key: "ffffffffffffffffffffffffffffffff"
+# deploy KMRA demo workload (NGINX server)
+kmra_deploy_demo_workload: true
+
# Intel Telemetry Aware Scheduling
tas_enabled: {{ bmra.features.tas.enable }}
tas_namespace: monitoring
@@ -153,9 +177,17 @@ helm_enabled: true
# local Docker Hub mirror, if it exists
#docker_registry_mirrors:
# - http://mirror_ip:mirror_port
+#containerd_registries:
+# "docker.io":
+# - "https://registry-1.docker.io"
+# - "https://mirror_ip:mirror_port"
# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK
-registry_local_address: "localhost:30500"
+# The range of valid ports is 30000-32767
+registry_nodeport: 30500
+{% raw %}
+registry_local_address: "localhost:{{ registry_nodeport }}"
+{% endraw %}
# Enable Pod Security Policy. This option enables PSP admission controller and creates minimal set of rules.
psp_enabled: {{ bmra.features.psp }}
diff --git a/playbooks/roles/bmra-config/templates/kube-node.j2 b/playbooks/roles/bmra-config/templates/kube-node.j2
index 2f66c5a..51c4112 100644
--- a/playbooks/roles/bmra-config/templates/kube-node.j2
+++ b/playbooks/roles/bmra-config/templates/kube-node.j2
@@ -40,22 +40,22 @@ bond_cni_enabled: {{ bmra.features.bond_cni }}
install_dpdk: true
# DPDK version
-dpdk_version: "19.11.6"
+dpdk_version: "21.05"
# Custom DPDK patches local path
-# dpdk_local_patches_dir: "/tmp/patches/dpdk-19.11.6"
+# dpdk_local_patches_dir: "/tmp/patches/dpdk-20.11"
# Userspace networking
userspace_cni_enabled: false
-ovs_dpdk_enabled: false # Should be enabled with Userspace CNI, when VPP is set to "false"; 1G hugepages required
-ovs_version: "v2.13.0"
+ovs_dpdk_enabled: false # Should be enabled with Userspace CNI, when VPP is set to "false"; 1G default_hugepage_size required
+ovs_version: "v2.15.0"
# CPU mask for OVS-DPDK PMD threads
ovs_dpdk_lcore_mask: 0x1
# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes
# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1
-# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM
+# example 2: "1024" will allocate 1GB from node 0 on a single socket board, e.g. in a VM
ovs_dpdk_socket_mem: "256,0"
-vpp_enabled: false # Should be enabled with Userspace CNI, when ovs_dpdk is set to "false"; 2M hugepages required
+vpp_enabled: false # Should be enabled with Userspace CNI, when ovs_dpdk is set to "false"; 2M default_hugepage_size required
# Set to 'true' to update i40e, ice and iavf kernel modules
update_nic_drivers: false
@@ -82,30 +82,15 @@ qat_devices:
{% for dev in bmra.device_roles.qat %}
- qat_dev: {{ dev.name }}
qat_id: "{{ dev.pci }}"
- qat_module_type: {{ dev.mod_type }}
qat_pci_type: {{ dev.pci_type }}
qat_sriov_numvfs: {{ dev.vfs }}
{% endfor %}
{% else %}
qat_devices: []
{% endif %}
-# - qat_dev: crypto01 # Device name as separate QAT Symmetric Crypto devices on which qat_sriov_numvfs will be distributed
-# qat_id: "0000:ab:00.0" # Device QAT id one using DPDK compatible driver for VF devices to be used by vfio-pci kernel driver, replace as required
-# qat_module_type: qat_c62x # QAT Crypto Poll Mode Kernel Module supported are qat_dh895xcc,qat_c62x,qat_c3xxx,qat_200xx,qat_c4xxx,qat_d15xx
-# qat_pci_type: c6xx # QAT Crypto Poll Mode Pci Driver id supported are dh895xcc,c6xx,c3xxx,d15xx,200xx & c4xxx
-# qat_sriov_numvfs: 12 # set how many VFs per qat_id to be created such as c6xxvf support 32 so per device will be 10+10+12=32, replace as required
- # Note: If don't want to assign VFs to id leave it as 0 such as qat_sriov_numvfs: 0
-# - qat_dev: crypto02
-# qat_id: "0000:xy:00.0"
-# qat_module_type: qat_c62x
-# qat_pci_type: c6xx
-# qat_sriov_numvfs: 10
-
-# - qat_dev: crypto03
-# qat_id: "0000:yz:00.0"
-# qat_module_type: qat_c62x
-# qat_pci_type: c6xx
-# qat_sriov_numvfs: 10
+
+# Install and configure OpenSSL cryptography
+openssl_install: {{ bmra.features.qat.update_drivers }} # This requires update_qat_drivers set to 'true' in host vars
# Enables hugepages support
hugepages_enabled: {{ bmra.features.hugepages.enable }}
@@ -113,9 +98,8 @@ hugepages_enabled: {{ bmra.features.hugepages.enable }}
# Hugepage sizes available: 2M, 1G
default_hugepage_size: {{ bmra.features.hugepages.default }}
-# Sets how many hugepages of each size should be created
-hugepages_1G: {{ bmra.features.hugepages.hugepages_1G }}
-hugepages_2M: {{ bmra.features.hugepages.hugepages_2M }}
+# Sets how many hugepages of 'default_hugepage_size' size should be created
+number_of_hugepages: {{ bmra.features.hugepages.amount }}
# CPU isolation from Linux scheduler
isolcpus_enabled: {{ bmra.features.isolcpus.enable }}
@@ -124,17 +108,18 @@ isolcpus_enabled: {{ bmra.features.isolcpus.enable }}
isolcpus: "{{ bmra.features.isolcpus.cpus }}"
# Native CPU Manager (Kubernetes built-in)
-
+# These settings are relevant only if in group_vars native_cpu_manager_enabled: true
+# Amount of CPU cores that will be reserved for the housekeeping (2000m = 2000 millicores = 2 cores)
native_cpu_manager_system_reserved_cpus: 2000m
# Amount of CPU cores that will be reserved for Kubelet
native_cpu_manager_kube_reserved_cpus: 1000m
# Explicit list of the CPUs reserved from pods scheduling.
# Note: Supported only with kube_version 1.17 and newer, overrides native_cpu_manager_system_reserved_cpus and native_cpu_manager_kube_reserved_cpus.
#native_cpu_manager_reserved_cpus: "0,1,2"
-# Note: All reamining unreserved CPU cores will be consumed by the workloads.
+# Note: All remaining unreserved CPU cores will be consumed by the workloads.
# Enable/Disable Intel PState scaling driver
-intel_pstate_enabled: true
+intel_pstate_enabled: false
# Config options for intel_pstate: disable, passive, force, no_hwp, hwp_only, support_acpi_ppc, per_cpu_perf_limits
intel_pstate: hwp_only
# Enable/Disable Intel Turbo Boost PState attribute
@@ -190,6 +175,10 @@ sst_cp_cpu_clos:
- clos: 1
cpus: 3,7
+# Intel(R) SST-TF (feature turbo-freq) configuration for Ice Lake (ICX) Platforms.
+# [true] Enable Intel Speed Select Turbo Frequency (SST-TF)
+# [false] Disable Intel Speed Select Base Frequency (SST-TF)
+sst_tf_configuration_enabled: false
# (CentOS 7 only) install real time kernel and related packages for flexran
install_real_time_package: false