diff options
author | Michael S. Pedersen <michaelx.pedersen@intel.com> | 2020-11-26 11:19:08 +0000 |
---|---|---|
committer | Rihab Banday <rihab.banday@ericsson.com> | 2020-12-03 19:29:32 +0000 |
commit | 7155286ed00a8bce09a948fb54fb4eeb85027e49 (patch) | |
tree | 74c728ac707a2b016ed5dd47aa688909287249c3 | |
parent | d3916f20f0b10da360cc6c38b61e1ee04d6278c5 (diff) |
Update BMRA to v2.0
Also adds configuration of BMRA through the existing hw_config files (idf, pdf)
and better handling of dependencies.
Signed-off-by: Michael S. Pedersen <michaelx.pedersen@intel.com>
Change-Id: Iedaf249f01ea3e1e00c889e846e21644adff98c7
Reviewed-on: https://gerrit.opnfv.org/gerrit/c/kuberef/+/71504
Tested-by: jenkins-ci <jenkins-opnfv-ci@opnfv.org>
Reviewed-by: Rihab Banday <rihab.banday@ericsson.com>
Reviewed-by: Georg Kunz <georg.kunz@ericsson.com>
-rw-r--r-- | deploy.env | 1 | ||||
-rwxr-xr-x | functions.sh | 24 | ||||
-rw-r--r-- | hw_config/equinix-metal/idf.yaml | 41 | ||||
-rw-r--r-- | playbooks/pre-install.yaml | 4 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/tasks/main.yaml | 14 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/templates/all.j2 | 154 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/templates/inventory.j2 (renamed from playbooks/roles/bmra-config/templates/inventory.ini) | 4 | ||||
-rw-r--r-- | playbooks/roles/bmra-config/templates/kube-node.j2 | 128 | ||||
-rw-r--r-- | playbooks/roles/pre-install/tasks/main.yml | 26 | ||||
-rw-r--r-- | playbooks/roles/pre-install/vars/RedHat.yml | 2 | ||||
-rw-r--r-- | sw_config/bmra/Dockerfile | 2 | ||||
-rw-r--r-- | sw_config/bmra/all.yml | 96 | ||||
-rw-r--r-- | sw_config/bmra/kube-node.yml | 63 |
13 files changed, 382 insertions, 177 deletions
@@ -2,6 +2,7 @@ export VENDOR=${VENDOR:-ericsson-pod2} export INSTALLER=bmra +export BMRA_PROFILE=$(yq r $CURRENTPATH/hw_config/$VENDOR/idf.yaml bmra.profile) # Deployment type. Supports "full" and "k8s" export DEPLOYMENT=${DEPLOYMENT:-full} diff --git a/functions.sh b/functions.sh index 8d206bf..88d6c09 100755 --- a/functions.sh +++ b/functions.sh @@ -189,19 +189,14 @@ EOF done } -# Get IPs of target nodes (used for installing dependencies) -get_target_ips() { - yq r "$CURRENTPATH"/hw_config/"$VENDOR"/pdf.yaml nodes[*].interfaces[*].address -} - # k8s Provisioning (currently BMRA) provision_k8s() { - ansible_cmd='/bin/bash -c "' + ansible_cmd="/bin/bash -c '" if [[ "$DEPLOYMENT" == "k8s" ]]; then ansible-playbook -i "$CURRENTPATH"/sw_config/bmra/inventory.ini "$CURRENTPATH"/playbooks/pre-install.yaml - ansible_cmd+='pip install --upgrade pip==9.0.3; yum -y remove python-netaddr; pip install netaddr==0.7.19; ' + ansible_cmd+="pip install --upgrade pip==9.0.3; pip install ansible==2.9.6;" fi - ansible_cmd+='ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/cluster.yml"' + ansible_cmd+="ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/k8s/patch_kubespray.yml; ansible-playbook -i /bmra/inventory.ini /bmra/playbooks/${BMRA_PROFILE}.yml'" # shellcheck disable=SC2087 ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF @@ -215,8 +210,8 @@ if ! command -v docker; then done fi if [ ! -d "${PROJECT_ROOT}/container-experience-kits" ]; then - git clone --recurse-submodules --depth 1 https://github.com/intel/container-experience-kits.git -b v1.4.1 ${PROJECT_ROOT}/container-experience-kits/ - cp -r ${PROJECT_ROOT}/container-experience-kits/examples/group_vars ${PROJECT_ROOT}/container-experience-kits/ + git clone --recurse-submodules --depth 1 https://github.com/intel/container-experience-kits.git -b v2.0.0 ${PROJECT_ROOT}/container-experience-kits/ + cp -r ${PROJECT_ROOT}/container-experience-kits/examples/${BMRA_PROFILE}/group_vars ${PROJECT_ROOT}/container-experience-kits/ #TODO Remove this once the reported issue is fixed in the next BMRA Release if [[ "$DEPLOYMENT" == "full" ]]; then sed -i '/\openshift/a \ extra_args: --ignore-installed PyYAML' \ @@ -229,6 +224,7 @@ cp ${PROJECT_ROOT}/${INSTALLER}/{all.yml,kube-node.yml} \ ${PROJECT_ROOT}/container-experience-kits/group_vars/ sudo docker run --rm \ -e ANSIBLE_CONFIG=/bmra/ansible.cfg \ +-e PROFILE=${BMRA_PROFILE} \ -v ${PROJECT_ROOT}/container-experience-kits:/bmra \ -v ~/.ssh/:/root/.ssh/ rihabbanday/bmra-install:centos \ ${ansible_cmd} @@ -241,13 +237,15 @@ copy_k8s_config() { MASTER_IP=$(get_host_pxe_ip "nodes[0]") # shellcheck disable=SC2087 ssh -o StrictHostKeyChecking=no -tT "$USERNAME"@"$(get_vm_ip)" << EOF -scp -q root@$MASTER_IP:/root/.kube/config ${PROJECT_ROOT}/kubeconfig +scp -o StrictHostKeyChecking=no -q root@$MASTER_IP:/root/.kube/config ${PROJECT_ROOT}/kubeconfig EOF # Copy kubeconfig from Jump VM to appropriate location in Jump Host # Direct scp to the specified location doesn't work due to permission/ssh-keys - scp "$USERNAME"@"$(get_vm_ip)":"${PROJECT_ROOT}"/kubeconfig kubeconfig - sudo cp kubeconfig /home/opnfv/functest-kubernetes/config + scp -o StrictHostKeyChecking=no "$USERNAME"@"$(get_vm_ip)":"${PROJECT_ROOT}"/kubeconfig kubeconfig + if [ -d "/home/opnfv/functest-kubernetes" ]; then + sudo cp kubeconfig /home/opnfv/functest-kubernetes/config + fi } # Executes a specific Ansible playbook diff --git a/hw_config/equinix-metal/idf.yaml b/hw_config/equinix-metal/idf.yaml index 2c61bb6..434ada2 100644 --- a/hw_config/equinix-metal/idf.yaml +++ b/hw_config/equinix-metal/idf.yaml @@ -38,3 +38,44 @@ engine: installers: kubespray: *idf_kubespray + +bmra: + profile: full_nfv + network_roles: + sriov: + - name: eno2 + bus_info: "19:00.1" + device_info: "8086:1572:0200" + driver: iavf + sriov_dpdk: + - name: eno4 + bus_info: "19:00.3" + device_info: "8086:1572:0200" + driver: vfio-pci + features: + sriov: + enable: true + sriov_vfs_per_port: 2 + sriov_dpdk_vfs_per_port: 4 + sriov_cni: true + sriov_net_dp: true + hugepages: + enable: true + default: 2M + hugepages_1G: 0 + hugepages_2M: 10240 + isolcpus: + enable: true + cpus: "8-27,36-55" + nfd: true + cmk: + enable: true + num_shared_cores: 3 + num_exclusive_cores: 3 + topology_manager: + enable: true + policy: "best-effort" + tas: + enable: true + demo_policy: true + psp: true diff --git a/playbooks/pre-install.yaml b/playbooks/pre-install.yaml index 5f07a63..31d2a1c 100644 --- a/playbooks/pre-install.yaml +++ b/playbooks/pre-install.yaml @@ -9,7 +9,7 @@ ############################################################################## - name: Install requirements on nodes - hosts: all - gather_facts: true + hosts: all,!localhost + gather_facts: false roles: - role: pre-install diff --git a/playbooks/roles/bmra-config/tasks/main.yaml b/playbooks/roles/bmra-config/tasks/main.yaml index e7d8df9..e5a692f 100644 --- a/playbooks/roles/bmra-config/tasks/main.yaml +++ b/playbooks/roles/bmra-config/tasks/main.yaml @@ -1,5 +1,15 @@ --- -- name: write BMRA config +- name: Write BMRA inventory config template: - src: "inventory.ini" + src: "inventory.j2" dest: "{{ kuberef_root }}/sw_config/bmra/inventory.ini" + +- name: Write BMRA kube-node config + template: + src: "kube-node.j2" + dest: "{{ kuberef_root }}/sw_config/bmra/kube-node.yml" + +- name: Write BMRA all config + template: + src: "all.j2" + dest: "{{ kuberef_root }}/sw_config/bmra/all.yml" diff --git a/playbooks/roles/bmra-config/templates/all.j2 b/playbooks/roles/bmra-config/templates/all.j2 new file mode 100644 index 0000000..1017331 --- /dev/null +++ b/playbooks/roles/bmra-config/templates/all.j2 @@ -0,0 +1,154 @@ +## +## Copyright (c) 2020 Intel Corporation. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +--- +## BMRA primary playbook variables ## + +# Kubernetes version +kubernetes: true +kube_version: v1.18.8 +#kube_version: v1.17.11 +#kube_version: v1.16.14 + +# Run system-wide package update (apt dist-upgrade, yum update, ...) +# Note: enabling this may lead to unexpected results +# Tip: you can set this per host using host_vars +update_all_packages: false + +# Node Feature Discovery +nfd_enabled: {{ bmra.features.nfd }} +nfd_build_image_locally: false +nfd_namespace: kube-system +nfd_sleep_interval: 60s + +# Intel CPU Manager for Kubernetes +cmk_enabled: {{ bmra.features.cmk.enable }} +cmk_namespace: kube-system +cmk_use_all_hosts: false # 'true' will deploy CMK on the controller nodes too +#cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes +cmk_shared_num_cores: {{ bmra.features.cmk.num_shared_cores }} # number of CPU cores to be assigned to the "shared" pool on each of the nodes +cmk_exclusive_num_cores: {{ bmra.features.cmk.num_exclusive_cores }} # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes +# cmk_shared_mode: packed # choose between: packed, spread, default: packed +# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed + +# Native CPU Manager (Kubernetes built-in) +# Note: Enabling CMK and built-in CPU Manager is not recommended. +# Setting this option as "true" enables the "static" policy, otherwise the default "none" policy is used. +native_cpu_manager_enabled: false +# Amount of CPU cores that will be reserved for the housekeeping (2000m = 2000 millicores = 2 cores) +native_cpu_manager_system_reserved_cpus: 2000m +# Amount of CPU cores that will be reserved for Kubelet +native_cpu_manager_kube_reserved_cpus: 1000m +# Explicit list of the CPUs reserved from pods scheduling. +# Note: Supported only with kube_version 1.17 and newer, overrides native_cpu_manager_system_reserved_cpus and native_cpu_manager_kube_reserved_cpus. +#native_cpu_manager_reserved_cpus: "0,1,2" +# Note: All reamining unreserved CPU cores will be consumed by the workloads. + +# Enable Kubernetes built-in Topology Manager +topology_manager_enabled: {{ bmra.features.topology_manager.enable }} +# There are four supported policies: none, best-effort, restricted, single-numa-node. +topology_manager_policy: "{{ bmra.features.topology_manager.policy }}" + +# Intel SRIOV Network Device Plugin +sriov_net_dp_enabled: {{ bmra.features.sriov_net_dp }} +sriov_net_dp_namespace: kube-system +# whether to build and store image locally or use one from public external registry +sriov_net_dp_build_image_locally: false +# SR-IOV network device plugin configuration. +# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations +sriovdp_config_data: | + { + "resourceList": [{ + "resourceName": "intel_sriov_netdevice", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed", "1889"], + "drivers": ["iavf", "ixgbevf"] + } + }, + { + "resourceName": "intel_sriov_dpdk_700_series", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed"], + "drivers": ["vfio-pci"] + } + }, + { + "resourceName": "intel_sriov_dpdk_800_series", + "selectors": { + "vendors": ["8086"], + "devices": ["1889"], + "drivers": ["vfio-pci"] + } + } + ] + } + +# Intel QAT Device Plugin for Kubernetes +qat_dp_enabled: false +qat_dp_namespace: kube-system + +# Intel GPU Device Plugin for Kubernetes +gpu_dp_enabled: false +gpu_dp_namespace: kube-system + +# Intel Telemetry Aware Scheduling +tas_enabled: {{ bmra.features.tas.enable }} +tas_namespace: monitoring +# create and enable TAS demonstration policy: [true, false] +tas_enable_demo_policy: {{ bmra.features.tas.demo_policy }} + +# Create reference net-attach-def objects +example_net_attach_defs: + userspace_ovs_dpdk: false # Update to match host_vars CNI configuration + userspace_vpp: false # Update to match host_vars CNI configuration + sriov_net_dp: {{ bmra.features.sriov_net_dp }} # Update to match host_vars CNI configuration + +## Proxy configuration ## +#http_proxy: "http://proxy.example.com:1080" +#https_proxy: "http://proxy.example.com:1080" +#additional_no_proxy: ".example.com" + +# (Ubuntu only) disables DNS stub listener which may cause issues on Ubuntu +dns_disable_stub_listener: false + +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local + +## Kubespray variables ## + +# default network plugins and kube-proxy configuration +kube_network_plugin_multus: true +multus_version: v3.4.2 +kube_network_plugin: flannel +kube_pods_subnet: 10.244.0.0/16 +kube_service_addresses: 10.233.0.0/18 +kube_proxy_mode: iptables + +# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed +helm_enabled: true + +# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK +registry_local_address: "localhost:30500" + +# Enable Pod Security Policy. This option enables PSP admission controller and creates minimal set of rules. +psp_enabled: {{ bmra.features.psp }} + +# Set image pull policy to Always. Pulls images prior to starting containers. Valid credentials must be configured. +always_pull_enabled: true + +# Telemetry configuration +collectd_scrap_interval: 30 diff --git a/playbooks/roles/bmra-config/templates/inventory.ini b/playbooks/roles/bmra-config/templates/inventory.j2 index 7a9c6ab..48e953c 100644 --- a/playbooks/roles/bmra-config/templates/inventory.ini +++ b/playbooks/roles/bmra-config/templates/inventory.j2 @@ -2,6 +2,7 @@ {% for node in nodes %} {{ idf.kubespray.hostnames[node.name] }} ansible_host={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }} ip={{ node.interfaces[idf.net_config[engine.pxe_network].interface].address }} {% endfor %} +localhost {% for role in ['kube-master', 'etcd', 'kube-node'] %} [{{ role }}] @@ -17,3 +18,6 @@ kube-master kube-node [calico-rr] + +[all:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/playbooks/roles/bmra-config/templates/kube-node.j2 b/playbooks/roles/bmra-config/templates/kube-node.j2 new file mode 100644 index 0000000..302fa8f --- /dev/null +++ b/playbooks/roles/bmra-config/templates/kube-node.j2 @@ -0,0 +1,128 @@ +## +## Copyright (c) 2020 Intel Corporation. +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +--- +# Kubernetes node configuration + +# Enable SR-IOV networking related setup +sriov_enabled: {{ bmra.features.sriov.enable }} + +# SR-IOV PF specific configuration list +sriov_nics: +{% for intf in bmra.network_roles.sriov %} + - name: {{ intf.name }} + bus_info: "{{ intf.bus_info }}" + device_info: "{{ intf.device_info }}" + sriov_numvfs: {{ bmra.features.sriov.sriov_vfs_per_port }} + vf_driver: {{ intf.driver }} +{% endfor %} +{% for intf in bmra.network_roles.sriov_dpdk %} + - name: {{ intf.name }} + bus_info: "{{ intf.bus_info }}" + device_info: "{{ intf.device_info }}" + sriov_numvfs: {{ bmra.features.sriov.sriov_dpdk_vfs_per_port }} + vf_driver: {{ intf.driver }} +{% endfor %} + +sriov_cni_enabled: {{ bmra.features.sriov_cni }} + +# Bond CNI +bond_cni_enabled: false + +# install DPDK +install_dpdk: true # DPDK installation is required for sriov_enabled:true; default to false + +# Userspace networking +userspace_cni_enabled: false +ovs_dpdk_enabled: false # Should be enabled with Userspace CNI, when VPP is set to "false"; 1G hugepages required +# CPU mask for OVS-DPDK PMD threads +ovs_dpdk_lcore_mask: 0x1 +# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes +# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1 +# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM +ovs_dpdk_socket_mem: "256,0" +vpp_enabled: false # Should be enabled with Userspace CNI, when ovs_dpdk is set to "false"; 2M hugepages required + +# Set to 'true' to update i40e, ice and iavf kernel modules +update_nic_drivers: false + +# Set 'true' to update NIC firmware +update_nic_firmware: false + +# Additional list of NIC interfaces that the FW update will be executed on +# NOTE: FW update will be also executed on all NICs listed in "sriov_nics[*].name" +firmware_update_nics: + - intf1 + - intf2 + +# install Intel x700 & x800 series NICs DDP packages +install_ddp_packages: false + +# set how many VFs per single QAT device PF should be created +qat_sriov_numvfs: 16 + +# Enables hugepages support +hugepages_enabled: {{ bmra.features.hugepages.enable }} + +# Hugepage sizes available: 2M, 1G +default_hugepage_size: {{ bmra.features.hugepages.default }} + +# Sets how many hugepages of each size should be created +hugepages_1G: {{ bmra.features.hugepages.hugepages_1G }} +hugepages_2M: {{ bmra.features.hugepages.hugepages_2M }} + +# CPU isolation from Linux scheduler +isolcpus_enabled: {{ bmra.features.isolcpus.enable }} +isolcpus: "{{ bmra.features.isolcpus.cpus }}" # Update to match group_vars requested exclusive/shared cores + +# Max number of processors to support (physical & logical cores) +cpu_count: 144 + +# Enable/Disable Intel PState scaling driver +intel_pstate_enabled: true +# Config options for intel_pstate: disable, passive, force, no_hwp, hwp_only, support_acpi_ppc, per_cpu_perf_limits +intel_pstate: disable +# Enable/Disable Intel Turbo Boost PState attribute +turbo_boost_enabled: false + +# Intel Speed Select Base-Frequency configuration. +sst_bf_configuration_enabled: false + +# Intel Speed Select Base-Frequency configuration for Cascade Lake (CLX) Platforms. +# CLX support of SST-BF requires 'intel_pstate' to be 'enabled' +# Option clx_sst_bf_mode requires sst_bf_configuration_enabled to be set to 'true'. +# There are three configuration modes: +# [s] Set SST-BF config (set min/max to 2700/2700 and 2100/2100) +# [m] Set P1 on all cores (set min/max to 2300/2300) +# [r] Revert cores to min/Turbo (set min/max to 800/3900) +clx_sst_bf_mode: s + +# (CentOS 7 only) install real time kernel and related packages for flexran +install_real_time_package: false + +# Telemetry configuration +# intel_pmu plugin collects information provided by Linux perf interface. +enable_intel_pmu_plugin: false +# CPU Threads to be monitored by Intel PMU Plugin. +# If the field is empty, all available cores will be monitored. +# Please refer to https://collectd.org/wiki/index.php/Plugin:Intel_PMU for configuration details. +intel_pmu_plugin_monitored_cores: "" +# CPU Threads to be monitored by Intel RDT Plugin. +# If the field is empty, all available cores will be monitored. +# Please refer to https://collectd.org/wiki/index.php/Plugin:IntelRDT for configuration details. +intel_rdt_plugin_monitored_cores: "" + +# Additional list of plugins that will be excluded from collectd deployment. +exclude_collectd_plugins: [] diff --git a/playbooks/roles/pre-install/tasks/main.yml b/playbooks/roles/pre-install/tasks/main.yml index db6534d..9b725c7 100644 --- a/playbooks/roles/pre-install/tasks/main.yml +++ b/playbooks/roles/pre-install/tasks/main.yml @@ -8,6 +8,13 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +- name: Ensure Python3 is installed (Equinix Metal) + raw: yum install -y python3 + when: lookup('env', 'VENDOR') == 'equinix-metal' + +- name: Gather facts + setup: + - name: Load distro variables include_vars: file: "{{ ansible_os_family }}.yml" @@ -18,3 +25,22 @@ name: "{{ item }}" state: present with_items: "{{ bmra_pkgs }}" + +- name: Check if Python is present in PATH + shell: "which python" + register: pypath + failed_when: false + +- name: Check if /usr/bin/python exists + stat: + path: /usr/bin/python + register: pybin + +- name: Create symlink for Python + file: + src: /usr/bin/python3 + dest: /usr/bin/python + state: link + when: + - not pybin.stat.exists + - pypath.rc != 0 diff --git a/playbooks/roles/pre-install/vars/RedHat.yml b/playbooks/roles/pre-install/vars/RedHat.yml index 5adac50..cf20b89 100644 --- a/playbooks/roles/pre-install/vars/RedHat.yml +++ b/playbooks/roles/pre-install/vars/RedHat.yml @@ -12,3 +12,5 @@ bmra_pkgs: - lshw - pciutils - ethtool + - python3 + - 'dnf-command(config-manager)' diff --git a/sw_config/bmra/Dockerfile b/sw_config/bmra/Dockerfile index d1c8b24..2402764 100644 --- a/sw_config/bmra/Dockerfile +++ b/sw_config/bmra/Dockerfile @@ -5,7 +5,7 @@ MAINTAINER "Rihab Banday <rihab.banday@ericsson.com>" RUN yum -y update && \ yum -y install git epel-release python36 python-netaddr && \ yum -y install python-pip && \ - pip install ansible==2.7.16 jmespath && \ + pip install ansible==2.9.6 jmespath && \ pip install jinja2 --upgrade CMD ["bash"] diff --git a/sw_config/bmra/all.yml b/sw_config/bmra/all.yml deleted file mode 100644 index 45300e3..0000000 --- a/sw_config/bmra/all.yml +++ /dev/null @@ -1,96 +0,0 @@ ---- -## BMRA master playbook variables ## - -# Node Feature Discovery -nfd_enabled: false -nfd_build_image_locally: false -nfd_namespace: kube-system -nfd_sleep_interval: 30s - -# Intel CPU Manager for Kubernetes -cmk_enabled: false -cmk_namespace: kube-system -cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too -cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes -cmk_shared_num_cores: 2 # number of CPU cores to be assigned to the "shared" pool on each of the nodes -cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes -# cmk_shared_mode: packed # choose between: packed, spread, default: packed -# cmk_exclusive_mode: packed # choose between: packed, spread, default: packed - -# Intel SRIOV Network Device Plugin -sriov_net_dp_enabled: false -sriov_net_dp_namespace: kube-system -# whether to build and store image locally or use one from public external registry -sriov_net_dp_build_image_locally: false -# SR-IOV network device plugin configuration. -# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations -sriovdp_config_data: | - { - "resourceList": [{ - "resourceName": "intel_sriov_netdevice", - "selectors": { - "vendors": ["8086"], - "devices": ["154c", "10ed"], - "drivers": ["iavf", "i40evf", "ixgbevf"] - } - }, - { - "resourceName": "intel_sriov_dpdk", - "selectors": { - "vendors": ["8086"], - "devices": ["154c", "10ed"], - "drivers": ["vfio-pci"] - } - } - ] - } - - -# Intel Device Plugins for Kubernetes -qat_dp_enabled: false -qat_dp_namespace: kube-system -gpu_dp_enabled: false -gpu_dp_namespace: kube-system - -# Intel Telemetry Aware Scheduling -tas_enabled: false -tas_namespace: default -# create default TAS policy: [true, false] -tas_create_policy: false - -# Create reference net-attach-def objects -example_net_attach_defs: - userspace_ovs_dpdk: false - userspace_vpp: false - sriov_net_dp: false - -## Proxy configuration ## -# http_proxy: "http://proxy.example.com:1080" -# https_proxy: "http://proxy.example.com:1080" -# additional_no_proxy: ".example.com" - -# Topology Manager flags -kubelet_node_custom_flags: - - "--feature-gates=TopologyManager=true" - - "--topology-manager-policy=none" - -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local - -## Kubespray variables ## - -# default network plugins and kube-proxy configuration -kube_network_plugin_multus: true -multus_version: v3.3 -kube_network_plugin: flannel -kube_pods_subnet: 10.244.0.0/16 -kube_service_addresses: 10.233.0.0/18 -kube_proxy_mode: iptables - -# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed -helm_enabled: true - -# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK -registry_enabled: true -registry_storage_class: "" -registry_local_address: "localhost:5000" diff --git a/sw_config/bmra/kube-node.yml b/sw_config/bmra/kube-node.yml deleted file mode 100644 index e103673..0000000 --- a/sw_config/bmra/kube-node.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -# Kubernetes node configuration - -# Enable SR-IOV networking related setup -sriov_enabled: false - -# sriov_nics: SR-IOV PF specific configuration list -sriov_nics: - - name: enp24s0f0 # PF interface names - sriov_numvfs: 2 # number of VFs to create for this PF(enp24s0f0) - vf_driver: vfio-pci # VF driver to be attached for all VFs under this PF(enp24s0f0), "i40evf", "iavf", "vfio-pci", "igb_uio" - ddp_profile: "gtp.pkgo" # DDP package name to be loaded into the NIC - - name: enp24s0f1 - sriov_numvfs: 4 - vf_driver: iavf - -sriov_cni_enabled: false - -# install DPDK -install_dpdk: false # DPDK installation is required for sriov_enabled:true; default to false - -userspace_cni_enabled: false - -# Intel Bond CNI Plugin -bond_cni_enabled: false - -vpp_enabled: false -ovs_dpdk_enabled: false -# CPU mask for OVS-DPDK PMD threads -ovs_dpdk_lcore_mask: 0x1 -# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes -# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1 -# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM -ovs_dpdk_socket_mem: "256,0" - -# Set to 'true' to update i40e and i40evf kernel modules -force_nic_drivers_update: false - -# install Intel x700 & x800 series NICs DDP packages -install_ddp_packages: false - -# Enables hugepages support -hugepages_enabled: false - -# Hugepage sizes available: 2M, 1G -default_hugepage_size: 1G - -# Sets how many hugepages of each size should be created -hugepages_1G: 4 -hugepages_2M: 0 - -# CPU isolation from Linux scheduler -isolcpus_enabled: false -isolcpus: "4-7" - -# Intel CommsPowerManagement -sst_bf_configuration_enabled: false -# Option sst_bf_mode requires sst_bf_configuration_enabled to be set to 'true'. -# There are three configuration modes: -# [s] Set SST-BF config (set min/max to 2700/2700 and 2100/2100) -# [m] Set P1 on all cores (set min/max to 2300/2300) -# [r] Revert cores to min/Turbo (set min/max to 800/3900) -sst_bf_mode: s |