diff options
Diffstat (limited to 'sw_config')
-rw-r--r-- | sw_config/bmra/all.yml | 96 | ||||
-rw-r--r-- | sw_config/bmra/inventory.ini | 18 | ||||
-rw-r--r-- | sw_config/bmra/node1.yml | 63 |
3 files changed, 177 insertions, 0 deletions
diff --git a/sw_config/bmra/all.yml b/sw_config/bmra/all.yml new file mode 100644 index 0000000..a1140d3 --- /dev/null +++ b/sw_config/bmra/all.yml @@ -0,0 +1,96 @@ +--- +## BMRA master playbook variables ## + +# Node Feature Discovery +nfd_enabled: false +nfd_build_image_locally: false +nfd_namespace: kube-system +nfd_sleep_interval: 30s + +# Intel CPU Manager for Kubernetes +cmk_enabled: false +cmk_namespace: kube-system +cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too +cmk_hosts_list: node1,node2 # allows to control where CMK nodes will run, leave this option commented out to deploy on all K8s nodes +cmk_shared_num_cores: 2 # number of CPU cores to be assigned to the "shared" pool on each of the nodes +cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes +#cmk_shared_mode: packed # choose between: packed, spread, default: packed +#cmk_exclusive_mode: packed # choose between: packed, spread, default: packed + +# Intel SRIOV Network Device Plugin +sriov_net_dp_enabled: false +sriov_net_dp_namespace: kube-system +# whether to build and store image locally or use one from public external registry +sriov_net_dp_build_image_locally: false +# SR-IOV network device plugin configuration. +# For more information on supported configuration refer to: https://github.com/intel/sriov-network-device-plugin#configurations +sriovdp_config_data: | + { + "resourceList": [{ + "resourceName": "intel_sriov_netdevice", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed"], + "drivers": ["iavf", "i40evf", "ixgbevf"] + } + }, + { + "resourceName": "intel_sriov_dpdk", + "selectors": { + "vendors": ["8086"], + "devices": ["154c", "10ed"], + "drivers": ["vfio-pci"] + } + } + ] + } + + +# Intel Device Plugins for Kubernetes +qat_dp_enabled: false +qat_dp_namespace: kube-system +gpu_dp_enabled: false +gpu_dp_namespace: kube-system + +# Intel Telemetry Aware Scheduling +tas_enabled: false +tas_namespace: default +# create default TAS policy: [true, false] +tas_create_policy: false + +# Create reference net-attach-def objects +example_net_attach_defs: + userspace_ovs_dpdk: false + userspace_vpp: false + sriov_net_dp: false + +## Proxy configuration ## +#http_proxy: "http://proxy.example.com:1080" +#https_proxy: "http://proxy.example.com:1080" +#additional_no_proxy: ".example.com" + +#Topology Manager flags +kubelet_node_custom_flags: + - "--feature-gates=TopologyManager=true" + - "--topology-manager-policy=none" + +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local + +## Kubespray variables ## + +# default network plugins and kube-proxy configuration +kube_network_plugin_multus: true +multus_version: v3.3 +kube_network_plugin: flannel +kube_pods_subnet: 10.244.0.0/16 +kube_service_addresses: 10.233.0.0/18 +kube_proxy_mode: iptables + +# please leave it set to "true", otherwise Intel BMRA features deployed as Helm charts won't be installed +helm_enabled: true + +# Docker registry running on the cluster allows us to store images not avaialble on Docker Hub, e.g. CMK +registry_enabled: true +registry_storage_class: "" +registry_local_address: "localhost:5000" diff --git a/sw_config/bmra/inventory.ini b/sw_config/bmra/inventory.ini new file mode 100644 index 0000000..9fb9f41 --- /dev/null +++ b/sw_config/bmra/inventory.ini @@ -0,0 +1,18 @@ +[all] +master1 ansible_host=10.10.190.202 ip=10.10.190.202 +node1 ansible_host=10.10.190.203 ip=10.10.190.203 + +[kube-master] +master1 + +[etcd] +master1 + +[kube-node] +node1 + +[k8s-cluster:children] +kube-master +kube-node + +[calico-rr] diff --git a/sw_config/bmra/node1.yml b/sw_config/bmra/node1.yml new file mode 100644 index 0000000..3e4f634 --- /dev/null +++ b/sw_config/bmra/node1.yml @@ -0,0 +1,63 @@ +--- +# Kubernetes node configuration + +# Enable SR-IOV networking related setup +sriov_enabled: false + +# sriov_nics: SR-IOV PF specific configuration list +sriov_nics: + - name: enp24s0f0 # PF interface names + sriov_numvfs: 2 # number of VFs to create for this PF(enp24s0f0) + vf_driver: vfio-pci # VF driver to be attached for all VFs under this PF(enp24s0f0), "i40evf", "iavf", "vfio-pci", "igb_uio" + ddp_profile: "gtp.pkgo" # DDP package name to be loaded into the NIC + - name: enp24s0f1 + sriov_numvfs: 4 + vf_driver: iavf + +sriov_cni_enabled: false + +# install DPDK +install_dpdk: false # DPDK installation is required for sriov_enabled:true; default to false + +userspace_cni_enabled: false + +# Intel Bond CNI Plugin +bond_cni_enabled: false + +vpp_enabled: false +ovs_dpdk_enabled: false +# CPU mask for OVS-DPDK PMD threads +ovs_dpdk_lcore_mask: 0x1 +# Huge memory pages allocated by OVS-DPDK per NUMA node in megabytes +# example 1: "256,512" will allocate 256MB from node 0 abd 512MB from node 1 +# example 2: "1024" will allocate 1GB fron node 0 on a single socket board, e.g. in a VM +ovs_dpdk_socket_mem: "256,0" + +# Set to 'true' to update i40e and i40evf kernel modules +force_nic_drivers_update: false + +# install Intel x700 & x800 series NICs DDP packages +install_ddp_packages: false + +# Enables hugepages support +hugepages_enabled: false + +# Hugepage sizes available: 2M, 1G +default_hugepage_size: 1G + +# Sets how many hugepages of each size should be created +hugepages_1G: 4 +hugepages_2M: 0 + +# CPU isolation from Linux scheduler +isolcpus_enabled: false +isolcpus: "4-7" + +# Intel CommsPowerManagement +sst_bf_configuration_enabled: false +# Option sst_bf_mode requires sst_bf_configuration_enabled to be set to 'true'. +# There are three configuration modes: +# [s] Set SST-BF config (set min/max to 2700/2700 and 2100/2100) +# [m] Set P1 on all cores (set min/max to 2300/2300) +# [r] Revert cores to min/Turbo (set min/max to 800/3900) +sst_bf_mode: s |