aboutsummaryrefslogtreecommitdiffstats
path: root/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
diff options
context:
space:
mode:
authorParth Inamdar <parth.inamdar1@gmail.com>2021-11-29 22:01:38 -0500
committerParth Inamdar <parth.inamdar1@gmail.com>2021-11-30 05:25:24 +0000
commit52ba79c07aa517160698ee7e04797447448ebf3c (patch)
tree5a27ed50d5f75d21eaf789ae027ac7e899cb254d /sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
parentbfd37762bdf91a7f89d4ebc259454ddb2f5e7b3d (diff)
Added Security, Policy, Observability & Plugin Checks
Security Checks: Checking for security config on the cluster, consisting of capability, privilege, host network, host path and connectivity checks Policy Checks: Validating CPU Manager and Topology Manager policies against the settings from PDF Observability Checks Checking existence and health of prometheus, node-exporter and collectd pods Plugin checks Checking for the existence of multi-interface pod (multus) and validating the list of CNI against the PDF Also added usage information and pdf field information to userguide.rst file in the docs section. For reference, I have added a PDF.json in sdv/docker/sdvstate/settings section file to look at necessary configuration required for the kuberef validation. Signed-off-by: Parth V Inamdar <parth.inamdar1@gmail.com> Change-Id: I28dc8e687c14cba099230f2226b4add79a55a7ad
Diffstat (limited to 'sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py')
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py123
1 files changed, 123 insertions, 0 deletions
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py b/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
new file mode 100644
index 0000000..6993fd7
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
@@ -0,0 +1,123 @@
+"""
+Policy Checks
+Checks if the policies are properly configured
+"""
+
+
+import ast
+import logging
+from tools.kube_utils import kube_api
+from tools.conf import settings
+from internal.store_result import store_result
+
+def cpu_manager_policy_check():
+ """
+ Checks cpu manager settings
+ """
+ api = kube_api()
+ logger = logging.getLogger(__name__)
+ node_list = api.list_node()
+ nodes = []
+
+ for node in node_list:
+ nodes.append(node.metadata.name)
+
+ result = {'category': 'compute',
+ 'case_name': 'cpu_manager_policy_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ for node in nodes:
+ configz = api.connect_get_node_proxy_with_path(node, "configz")
+ configz = ast.literal_eval(configz)
+ res = {
+ 'node': node,
+ 'criteria': 'pass',
+ 'config': []
+ }
+
+ status = []
+
+ flag = True
+
+ cpu_manager = settings.getValue('pdf_file')['vim_functional']['cpu_manager_policy']
+
+ if cpu_manager['type'] == configz['kubeletconfig']['cpuManagerPolicy']:
+ if cpu_manager['type'] == 'static':
+ if cpu_manager['reconcile_period'] == configz['kubeletconfig']['cpuManagerReconcilePeriod']:
+ if cpu_manager['full_pcpus'] == configz['kubeletconfig']['full-pcpus-only']:
+ flag = flag and True
+ else:
+ flag = flag and False
+ else:
+ flag = flag and True
+ else:
+ flag = flag and False
+
+ if flag is False:
+ res['criteria'] = 'fail'
+
+ status.append(cpu_manager)
+ res['config'] = status
+ result['details'].append(res)
+
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ store_result(logger, result)
+ return result
+
+def topology_manager_policy_check():
+ """
+ Checks topology manager settings
+ """
+ api = kube_api()
+ logger = logging.getLogger(__name__)
+ node_list = api.list_node()
+ nodes = []
+
+ for node in node_list:
+ nodes.append(node.metadata.name)
+
+
+ result = {
+ 'category': 'compute',
+ 'case_name': 'topology_manager_policy_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ for node in nodes:
+ configz = api.connect_get_node_proxy_with_path(node, "configz")
+ configz = ast.literal_eval(configz)
+ res = {
+ 'node': node,
+ 'criteria': 'pass',
+ 'config': []
+ }
+
+ status = []
+
+ flag = True
+
+ topology_manager = settings.getValue('pdf_file')['undercloud_ook']['topo_manager_policy']
+
+ if topology_manager['type'] == configz['kubeletconfig']['topologyManagerPolicy']:
+ if topology_manager['scope'] == configz['kubeletconfig']['topologyManagerScope']:
+ flag = flag and True
+ else:
+ flag = flag and False
+ if flag is False:
+ res['criteria'] = 'fail'
+
+ status.append(topology_manager)
+ res['config'] = status
+ result['details'].append(res)
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ store_result(logger, result)
+ return result