diff options
author | Michael S. Pedersen <michaelx.pedersen@intel.com> | 2020-11-26 11:19:08 +0000 |
---|---|---|
committer | Rihab Banday <rihab.banday@ericsson.com> | 2020-12-03 19:29:32 +0000 |
commit | 7155286ed00a8bce09a948fb54fb4eeb85027e49 (patch) | |
tree | 74c728ac707a2b016ed5dd47aa688909287249c3 /playbooks/roles/bmra-config/templates | |
parent | d3916f20f0b10da360cc6c38b61e1ee04d6278c5 (diff) |
Update BMRA to v2.0
Also adds configuration of BMRA through the existing hw_config files (idf, pdf)
and better handling of dependencies.
Signed-off-by: Michael S. Pedersen <michaelx.pedersen@intel.com>
Change-Id: Iedaf249f01ea3e1e00c889e846e21644adff98c7
Reviewed-on: https://gerrit.opnfv.org/gerrit/c/kuberef/+/71504
Tested-by: jenkins-ci <jenkins-opnfv-ci@opnfv.org>
Reviewed-by: Rihab Banday <rihab.banday@ericsson.com>
Reviewed-by: Georg Kunz <georg.kunz@ericsson.com>
Diffstat (limited to 'playbooks/roles/bmra-config/templates')
-rw-r--r-- | playbooks/roles/bmra-config/templates/all.j2 | 154 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/templates/inventory.j2 (renamed from playbooks/roles/bmra-config/templates/inventory.ini) | 4 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/templates/kube-node.j2 | 128 |
3 files changed, 286 insertions, 0 deletions
diff --git a/playbooks/roles/bmra-config/templates/all.j2 b/playbooks/roles/bmra-config/templates/all.j2 new file mode 100644 index 0000000..1017331 --- /dev/null +++ b/playbooks/roles/bmra-config/templates/all.j2 @@ -0,0 +1,154 @@ +## +## Copyright (c) 2020 Intel Corporation. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +--- +## BMRA primary playbook variables ## + +# Kubernetes version +kubernetes: true +kube_version: v1.18.8 +#kube_version: v1.17.11 +#kube_version: v1.16.14 + +# Run system-wide package update (apt dist-upgrade, yum update, ...) +# Note: enabling this may lead to unexpected results +# Tip: you can set this per host using host_vars +update_all_packages: false + +# Node Feature Discovery +nfd_enabled: {{ bmra.features.nfd }} +nfd_build_image_locally: false +nfd_namespace: kube-system +nfd_sleep_interval: 60s + +# Intel CPU Manager for Kubernetes +cmk_enabled: {{ bmra.features.cmk.enable }} +cmk_namespace: kube-system +cmk_use_all_hosts: false # 'true' will deploy CMK on the controller nodes too +#cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes +cmk_shared_num_cores: {{ bmra.features.cmk.num_shared_cores }} # number of CPU cores to be assigned to the "shared" pool on each of the nodes +cmk_exclusive_num_cores: {{ bmra.features.cmk.num_exclusive_cores }} # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes +# cmk_shared_mode: packed # choose between: packed, spread, default: packed +# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed + +# Native CPU Manager (Kubernetes built-in) +# Note: Enabling CMK and built-in CPU Manager is not recommended. +# Setting this option as "true" enables the "static" policy, otherwise the default "none" policy is used. +native_cpu_manager_enabled: false +# Amount of CPU cores that will be reserved for the housekeeping (2000m = 2000 millicores = 2 cores) +native_cpu_manager_system_reserved_cpus: 2000m +# Amount of CPU cores that will be reserved for Kubelet +native_cpu_manager_kube_reserved_cpus: 1000m +# Explicit list of the CPUs reserved from pods scheduling. +# Note: Supported only with kube_version 1.17 and newer, overrides native_cpu_manager_system_reserved_cpus and native_cpu_manager_kube_reserved_cpus. +#native_cpu_manager_reserved_cpus: "0,1,2" +# Note: All reamining unreserved CPU cores will be consumed by the workloads. + +# Enable Kubernetes built-in Topology Manager +topology_manager_enabled: {{ bmra.features.topology_manager.enable }} +# There are four supported policies: none, best-effort, restricted, single-numa-node. +topology_manager_policy: "{{ bmra.features.topology_manager.policy }}" + +# Intel SRIOV Network Device Plugin +sriov_net_dp_enabled: {{ bmra.features.sriov_net_dp }} +sriov_net_dp_namespace: kube-system +# whether to build and store image locally or use one from public external registry +sriov_net_dp_build_image_locally: false +# SR-IOV network device plugin configuration. +# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations +sriovdp_config_data: | + { + "resourceList": [{ + "resourceName": "intel_sriov_netdevice", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed", "1889"], + "drivers": ["iavf", "ixgbevf"] + } + }, + { + "resourceName": "intel_sriov_dpdk_700_series", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed"], + "drivers": ["vfio-pci"] + } + }, + { + "resourceName": "intel_sriov_dpdk_800_series", + "selectors": { + "vendors": ["8086"], + "devices": ["1889"], + "drivers": ["vfio-pci"] + } + } + ] + } + +# Intel QAT Device Plugin for Kubernetes +qat_dp_enabled: false +qat_dp_namespace: kube-system + +# Intel GPU Device Plugin for Kubernetes +gpu_dp_enabled: false +gpu_dp_namespace: kube-system + +# Intel Telemetry Aware Scheduling +tas_enabled: {{ bmra.features.tas.enable }} +tas_namespace: monitoring +# create and enable TAS demonstration policy: [true, false] +tas_enable_demo_policy: {{ bmra.features.tas.demo_policy }} + +# Create reference net-attach-def objects +example_net_attach_defs: + userspace_ovs_dpdk: false # Update to match host_vars CNI configuration + userspace_vpp: false # Update to match host_vars CNI configuration + sriov_net_dp: {{ bmra.features.sriov_net_dp }} # Update to match host_vars CNI configuration + +## Proxy configuration ## +#http_proxy: "http://proxy.example.com:1080" +#https_proxy: "http://proxy.example.com:1080" +#additional_no_proxy: ".example.com" + +# (Ubuntu only) disables DNS stub listener which may cause issues on Ubuntu +dns_disable_stub_listener: false + +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local + +## Kubespray variables ## + +# default network plugins and kube-proxy configuration +kube_network_plugin_multus: true +multus_version: v3.4.2 +kube_network_plugin: flannel +kube_pods_subnet: 10.244.0.0/16 +kube_service_addresses: 10.233.0.0/18 +kube_proxy_mode: iptables + +# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed +helm_enabled: true + +# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK +registry_local_address: "localhost:30500" + +# Enable Pod Security Policy. This option enables PSP admission controller and creates minimal set of rules. +psp_enabled: {{ bmra.features.psp }} + +# Set image pull policy to Always. Pulls images prior to starting containers. Valid credentials must be configured. +always_pull_enabled: true + +# Telemetry configuration +collectd_scrap_interval: 30 diff --git a/playbooks/roles/bmra-config/templates/inventory.ini b/playbooks/roles/bmra-config/templates/inventory.j2 index 7a9c6ab..48e953c 100644 --- a/playbooks/roles/bmra-config/templates/inventory.ini +++ b/playbooks/roles/bmra-config/templates/inventory.j2 @@ -2,6 +2,7 @@ {% for node in nodes %} {{ idf.kubespray.hostnames[node.name] }} ansible_host={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }} ip={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }} {% endfor %} +localhost {% for role in ['kube-master', 'etcd', 'kube-node'] %} [{{ role }}] @@ -17,3 +18,6 @@ kube-master kube-node [calico-rr] + +[all:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/playbooks/roles/bmra-config/templates/kube-node.j2 b/playbooks/roles/bmra-config/templates/kube-node.j2 new file mode 100644 index 0000000..302fa8f --- /dev/null +++ b/playbooks/roles/bmra-config/templates/kube-node.j2 @@ -0,0 +1,128 @@ +## +## Copyright (c) 2020 Intel Corporation. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +--- +# Kubernetes node configuration + +# Enable SR-IOV networking related setup +sriov_enabled: {{ bmra.features.sriov.enable }} + +# SR-IOV PF specific configuration list +sriov_nics: +{% for intf in bmra.network_roles.sriov %} + - name: {{ intf.name }} + bus_info: "{{ intf.bus_info }}" + device_info: "{{ intf.device_info }}" + sriov_numvfs: {{ bmra.features.sriov.sriov_vfs_per_port }} + vf_driver: {{ intf.driver }} +{% endfor %} +{% for intf in bmra.network_roles.sriov_dpdk %} + - name: {{ intf.name }} + bus_info: "{{ intf.bus_info }}" + device_info: "{{ intf.device_info }}" + sriov_numvfs: {{ bmra.features.sriov.sriov_dpdk_vfs_per_port }} + vf_driver: {{ intf.driver }} +{% endfor %} + +sriov_cni_enabled: {{ bmra.features.sriov_cni }} + +# Bond CNI +bond_cni_enabled: false + +# install DPDK +install_dpdk: true # DPDK installation is required for sriov_enabled:true; default to false + +# Userspace networking +userspace_cni_enabled: false +ovs_dpdk_enabled: false # Should be enabled with Userspace CNI, when VPP is set to "false"; 1G hugepages required +# CPU mask for OVS-DPDK PMD threads +ovs_dpdk_lcore_mask: 0x1 +# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes +# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1 +# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM +ovs_dpdk_socket_mem: "256,0" +vpp_enabled: false # Should be enabled with Userspace CNI, when ovs_dpdk is set to "false"; 2M hugepages required + +# Set to 'true' to update i40e, ice and iavf kernel modules +update_nic_drivers: false + +# Set 'true' to update NIC firmware +update_nic_firmware: false + +# Additional list of NIC interfaces that the FW update will be executed on +# NOTE: FW update will be also executed on all NICs listed in "sriov_nics[*].name" +firmware_update_nics: + - intf1 + - intf2 + +# install Intel x700 & x800 series NICs DDP packages +install_ddp_packages: false + +# set how many VFs per single QAT device PF should be created +qat_sriov_numvfs: 16 + +# Enables hugepages support +hugepages_enabled: {{ bmra.features.hugepages.enable }} + +# Hugepage sizes available: 2M, 1G +default_hugepage_size: {{ bmra.features.hugepages.default }} + +# Sets how many hugepages of each size should be created +hugepages_1G: {{ bmra.features.hugepages.hugepages_1G }} +hugepages_2M: {{ bmra.features.hugepages.hugepages_2M }} + +# CPU isolation from Linux scheduler +isolcpus_enabled: {{ bmra.features.isolcpus.enable }} +isolcpus: "{{ bmra.features.isolcpus.cpus }}" # Update to match group_vars requested exclusive/shared cores + +# Max number of processors to support (physical & logical cores) +cpu_count: 144 + +# Enable/Disable Intel PState scaling driver +intel_pstate_enabled: true +# Config options for intel_pstate: disable, passive, force, no_hwp, hwp_only, support_acpi_ppc, per_cpu_perf_limits +intel_pstate: disable +# Enable/Disable Intel Turbo Boost PState attribute +turbo_boost_enabled: false + +# Intel Speed Select Base-Frequency configuration. +sst_bf_configuration_enabled: false + +# Intel Speed Select Base-Frequency configuration for Cascade Lake (CLX) Platforms. +# CLX support of SST-BF requires 'intel_pstate' to be 'enabled' +# Option clx_sst_bf_mode requires sst_bf_configuration_enabled to be set to 'true'. +# There are three configuration modes: +# [s] Set SST-BF config (set min/max to 2700/2700 and 2100/2100) +# [m] Set P1 on all cores (set min/max to 2300/2300) +# [r] Revert cores to min/Turbo (set min/max to 800/3900) +clx_sst_bf_mode: s + +# (CentOS 7 only) install real time kernel and related packages for flexran +install_real_time_package: false + +# Telemetry configuration +# intel_pmu plugin collects information provided by Linux perf interface. +enable_intel_pmu_plugin: false +# CPU Threads to be monitored by Intel PMU Plugin. +# If the field is empty, all available cores will be monitored. +# Please refer to https://collectd.org/wiki/index.php/Plugin:Intel_PMU for configuration details. +intel_pmu_plugin_monitored_cores: "" +# CPU Threads to be monitored by Intel RDT Plugin. +# If the field is empty, all available cores will be monitored. +# Please refer to https://collectd.org/wiki/index.php/Plugin:IntelRDT for configuration details. +intel_rdt_plugin_monitored_cores: "" + +# Additional list of plugins that will be excluded from collectd deployment. +exclude_collectd_plugins: [] |