aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorParth Inamdar <parth.inamdar1@gmail.com>2021-11-29 22:01:38 -0500
committerParth Inamdar <parth.inamdar1@gmail.com>2021-11-30 05:25:24 +0000
commit52ba79c07aa517160698ee7e04797447448ebf3c (patch)
tree5a27ed50d5f75d21eaf789ae027ac7e899cb254d
parentbfd37762bdf91a7f89d4ebc259454ddb2f5e7b3d (diff)
Added Security, Policy, Observability & Plugin Checks
Security Checks: Checking for security config on the cluster, consisting of capability, privilege, host network, host path and connectivity checks Policy Checks: Validating CPU Manager and Topology Manager policies against the settings from PDF Observability Checks Checking existence and health of prometheus, node-exporter and collectd pods Plugin checks Checking for the existence of multi-interface pod (multus) and validating the list of CNI against the PDF Also added usage information and pdf field information to userguide.rst file in the docs section. For reference, I have added a PDF.json in sdv/docker/sdvstate/settings section file to look at necessary configuration required for the kuberef validation. Signed-off-by: Parth V Inamdar <parth.inamdar1@gmail.com> Change-Id: I28dc8e687c14cba099230f2226b4add79a55a7ad
-rw-r--r--docs/state/user/userguide.rst20
-rw-r--r--sdv/docker/sdvstate/internal/validator/airship/probe_check.py13
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/helm_check.py35
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/kuberef.py29
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/kubevirt_health_check.py44
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/monitoring_agent_checker.py102
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/node_exporter_checker.py65
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/plugin_check.py152
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py123
-rw-r--r--sdv/docker/sdvstate/internal/validator/kuberef/security_check.py272
-rw-r--r--sdv/docker/sdvstate/settings/PDF.json650
-rw-r--r--sdv/docker/sdvstate/settings/state.yml20
12 files changed, 1515 insertions, 10 deletions
diff --git a/docs/state/user/userguide.rst b/docs/state/user/userguide.rst
index ef95584..17470d8 100644
--- a/docs/state/user/userguide.rst
+++ b/docs/state/user/userguide.rst
@@ -2,11 +2,12 @@
SDVState User Guide
====================
-Currently, SDVState supports validation of Airship 1.7. Before running checks you need two files:
- - kubeconfig file which gives access to clusterAPI of Airship cluster.
- - PDF(Pod Descriptor File) of the current Airship deployment.
+Currently, SDVState supports validation of Airship 1.7 and Kuberef, Kuberef, a reference implementation according to the CNTT RA-2. Before running checks you need two files:
+ - kubeconfig file which gives access to clusterAPI of the cluster.
+ - PDF(Pod Descriptor File) of the current the deployment.
-Create a config file of SDVState using the above files as values. Look at example conf-file at sdv/docker/sdvstate/example/state.yml
+To choose between Airship and Kuberef, you need to specify the installer using "installer_used" field in the PDF of your deployment, it can either "airship" or "kuberef".
+You also need to create a config file of SDVState using the above files as values. Look at example conf-file at sdv/docker/sdvstate/settings/state.yml
To run checks use command:
@@ -19,4 +20,13 @@ After running checks, you can find all results at ``/tmp`` directory by default.
SDVState uses default settings stored at sdv/docker/sdvstate/settings. We can override default settings by adding those in our conf-file.
To view help and all available options with the SDVState tool check help command:
- ``./state --help`` \ No newline at end of file
+ ``./state --help``
+
+ For properly running validation on kuberef, we need some additions to the PDF file. Take a look at the PDF file at sdv/docker/sdvstate/settings/PDF.json
+
+ We need to add following entries in the "vim_functional" field of PDF to make the validation work properly.
+
+ - cpu_manager_policy: It includes the details about the cpu manager policy and it is important for the policy_checks
+ - topo_manager_policy: It includes the details about the topology manager policy and it is important for the policy_checks
+ - cnis_supported: It includes list of cnis supported by the cluster and it is important for the cni_plugin_check.
+
diff --git a/sdv/docker/sdvstate/internal/validator/airship/probe_check.py b/sdv/docker/sdvstate/internal/validator/airship/probe_check.py
index 670bd9a..96842c1 100644
--- a/sdv/docker/sdvstate/internal/validator/airship/probe_check.py
+++ b/sdv/docker/sdvstate/internal/validator/airship/probe_check.py
@@ -19,11 +19,11 @@ Probe Checks
2. Liveness
3. Startup
"""
-
+import logging
from tools.kube_utils import kube_api
from tools.conf import settings
-from .store_result import store_result
+from store_result import store_result
def readiness_probe_check():
@@ -31,6 +31,7 @@ def readiness_probe_check():
Checks whether the readiness probe is configured for all overcloud
components deployed as pods on undercloud Kubernetes.
"""
+ logger = logging.getLogger(__name__)
api = kube_api()
namespace_list = settings.getValue('airship_namespace_list')
@@ -61,7 +62,7 @@ def readiness_probe_check():
pod_stats['containers'].append(container_stats)
result['details'].append(pod_stats)
- store_result(result)
+ store_result(logger, result)
return result
def liveness_probe_check():
@@ -69,6 +70,7 @@ def liveness_probe_check():
Checks whether the liveness probe is configured for all overcloud
components deployed as pods on undercloud Kubernetes.
"""
+ logger = logging.getLogger(__name__)
api = kube_api()
namespace_list = settings.getValue('airship_namespace_list')
@@ -99,7 +101,7 @@ def liveness_probe_check():
pod_stats['containers'].append(container_stats)
result['details'].append(pod_stats)
- store_result(result)
+ store_result(logger, result)
return result
def startup_probe_check():
@@ -107,6 +109,7 @@ def startup_probe_check():
Checks whether the startup probe is configured for all overcloud
components deployed as pods on undercloud Kubernetes.
"""
+ logger = logging.getLogger(__name__)
api = kube_api()
namespace_list = settings.getValue('airship_namespace_list')
@@ -137,5 +140,5 @@ def startup_probe_check():
pod_stats['containers'].append(container_stats)
result['details'].append(pod_stats)
- store_result(result)
+ store_result(logger, result)
return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/helm_check.py b/sdv/docker/sdvstate/internal/validator/kuberef/helm_check.py
new file mode 100644
index 0000000..55f4052
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/helm_check.py
@@ -0,0 +1,35 @@
+"""
+Helm 2 disabled check
+
+Checks if the helm v2 is supported in the cluster
+"""
+
+import logging
+from tools.kube_utils import kube_api
+from tools.conf import settings
+from internal.store_result import store_result
+
+def helmv2_disabled_check():
+ """
+ Checks for helm v2 support
+ """
+ result = {'category': 'platform',
+ 'case_name': 'helmv2_disabled_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+ kube = kube_api()
+ logger = logging.getLogger(__name__)
+ res = False
+ pod_details = kube.list_pod_for_all_namespaces()
+ pods = pod_details.items
+ version_support = settings.getValue('pdf_file')['vim_functional']['legacy_helm_support']
+ if 'YES' in version_support:
+ for pod in pods:
+ if 'tiller' in pod.metadata.name:
+ res = True
+ result['details'].append(pod)
+ if res is False:
+ result['criteria'] = 'fail'
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/kuberef.py b/sdv/docker/sdvstate/internal/validator/kuberef/kuberef.py
index 4768e81..f42c723 100644
--- a/sdv/docker/sdvstate/internal/validator/kuberef/kuberef.py
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/kuberef.py
@@ -22,6 +22,14 @@ from datetime import datetime as dt
from internal import store_result
from internal.validator.validator import Validator
+from internal.validator.kuberef.policy_checks import topology_manager_policy_check, cpu_manager_policy_check
+from internal.validator.kuberef.security_check import capability_check, privilege_check, host_network_check
+from internal.validator.kuberef.security_check import host_path_vol_check, k8s_api_conn_check
+from internal.validator.kuberef.monitoring_agent_checker import collectd_check, monitoring_agent_check
+from internal.validator.kuberef.node_exporter_checker import node_exporter_check
+from internal.validator.kuberef.plugin_check import cni_plugin_check, multi_interface_cni_check
+from internal.validator.kuberef.helm_check import helmv2_disabled_check
+from internal.validator.kuberef.kubevirt_health_check import kubevirt_check
from tools.conf import settings
from tools.kube_utils import load_kube_api
@@ -82,8 +90,29 @@ class KuberefValidator(Validator):
# PLATFORM CHECKS
self.update_report(pod_health_check())
+ self.update_report(kubevirt_check())
+ self.update_report(helmv2_disabled_check())
+ self.update_report(capability_check())
+ self.update_report(privilege_check())
+ self.update_report(host_network_check())
+ self.update_report(host_path_vol_check())
+ self.update_report(k8s_api_conn_check())
+
+
+ # MONITORING & LOGGING AGENT CHECKS
+ self.update_report(monitoring_agent_check())
+ self.update_report(collectd_check())
+ self.update_report(node_exporter_check())
# COMPUTE CHECKS
+ self.update_report(cpu_manager_policy_check())
+ self.update_report(topology_manager_policy_check())
+
+
+ # NETWORK CHECKS
+ self.update_report(cni_plugin_check())
+ self.update_report(multi_interface_cni_check())
+
def get_report(self):
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/kubevirt_health_check.py b/sdv/docker/sdvstate/internal/validator/kuberef/kubevirt_health_check.py
new file mode 100644
index 0000000..08bb3c7
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/kubevirt_health_check.py
@@ -0,0 +1,44 @@
+"""
+Kubevirt Check
+Checks the existence and health of kubevirt
+"""
+
+import logging
+from tools.kube_utils import kube_api
+from internal.checks.pod_health_check import pod_status, get_logs
+from internal.store_result import store_result
+
+def kubevirt_check():
+ """
+ Checks for existence kubevirt namespace and checks health of the pods within
+
+ """
+ k8s_api = kube_api()
+ namespaces = k8s_api.list_namespace()
+ ns_names = []
+ for nspace in namespaces.items:
+ ns_names.append(nspace.metadata.name)
+
+ result = {'category': 'platform',
+ 'case_name': 'kubevirt_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ logger = logging.getLogger(__name__)
+
+ if 'kubevirt' in ns_names:
+ result['criteria'] = 'pass'
+ result['details'].append(ns_names)
+ pod_list = k8s_api.list_namespaced_pod('kubevirt')
+ for pod in pod_list.items:
+ pod_stats = pod_status(logger, pod)
+ if pod_stats['criteria'] == 'fail':
+ pod_stats['logs'] = get_logs(k8s_api, pod)
+ result['criteria'] = 'fail'
+ result['details'].append(pod_stats)
+ else:
+ result['criteria'] = 'fail'
+
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/monitoring_agent_checker.py b/sdv/docker/sdvstate/internal/validator/kuberef/monitoring_agent_checker.py
new file mode 100644
index 0000000..bc94c33
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/monitoring_agent_checker.py
@@ -0,0 +1,102 @@
+"""
+Monitoring agent checks
+Checks for prometheus and collectd existence and health
+"""
+
+import logging
+from tools.kube_utils import kube_api
+from internal.store_result import store_result
+from internal.checks.pod_health_check import pod_status, get_logs
+
+def health_checker(pod, api_instance, logger, result):
+ """
+ Checks the health of pod
+ """
+ status = []
+ pod_stats = pod_status(logger, pod)
+
+ if pod_stats['criteria'] == 'fail':
+ pod_stats['logs'] = get_logs(api_instance, pod)
+ result['criteria'] = 'fail'
+
+ status.append(pod.metadata.name)
+ status.append(pod_stats)
+ return status
+
+def monitoring_agent_check():
+ """
+ Checks existence & health of prometheus pods
+ """
+ api_instance = kube_api()
+ namespaces = api_instance.list_namespace()
+ ns_names = []
+
+ for nspace in namespaces.items:
+ ns_names.append(nspace.metadata.name)
+
+ result = {'category': 'observability',
+ 'case_name': 'prometheus_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+ flag = False
+ logger = logging.getLogger(__name__)
+ if 'monitoring' in ns_names:
+ pod_details = api_instance.list_namespaced_pod('monitoring', watch=False)
+ pods = pod_details.items
+ for pod in pods:
+ if 'prometheus' in pod.metadata.name:
+ stats = health_checker(pod, api_instance, logger, result)
+ status.append(stats)
+ flag = True
+ else:
+ for name in ns_names:
+ pod_details = api_instance.list_namespaced_pod(name, watch=False)
+ pods = pod_details.items
+ for pod in pods:
+ if 'prometheus' in pod.metadata.name:
+ stats = health_checker(pod, api_instance, logger, result)
+ status.append(stats)
+ flag = True
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ result['details'].append(status)
+ store_result(logger, result)
+ return result
+
+
+def collectd_check():
+ """
+ Checks for collectd pods present and their state of being
+ """
+ api_instance = kube_api()
+ pod_details = api_instance.list_pod_for_all_namespaces()
+ pods = pod_details.items
+
+ result = {'category': 'observability',
+ 'case_name': 'collectd_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ logger = logging.getLogger(__name__)
+
+ status = []
+
+ flag = False
+ for pod in pods:
+ if 'collectd' in pod.metadata.name:
+ stats = health_checker(pod, api_instance, logger, result)
+ status.append(stats)
+ flag = True
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ result['details'].append(status)
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/node_exporter_checker.py b/sdv/docker/sdvstate/internal/validator/kuberef/node_exporter_checker.py
new file mode 100644
index 0000000..7262fb1
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/node_exporter_checker.py
@@ -0,0 +1,65 @@
+"""
+Node Exporter Check
+"""
+
+import logging
+from tools.kube_utils import kube_api
+from internal.checks.pod_health_check import pod_status, get_logs
+from internal.store_result import store_result
+
+
+def node_exporter_check():
+ """
+ Checks existence & health of node exporter pods
+ """
+ kube = kube_api()
+ namespaces = kube.list_namespace()
+ ns_names = []
+ for nspace in namespaces.items:
+ ns_names.append(nspace.metadata.name)
+
+ result = {'category': 'observability',
+ 'case_name': 'node_exporter_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+
+ flag = False
+
+ logger = logging.getLogger(__name__)
+
+ if 'monitoring' in ns_names:
+ pod_list = kube.list_namespaced_pod('monitoring', watch=False)
+ pods = pod_list.items
+ for pod in pods:
+ if 'node-exporter' in pod.metadata.name:
+ pod_stats = pod_status(logger, pod)
+ if pod_stats['criteria'] == 'fail':
+ pod_stats['logs'] = get_logs(kube, pod)
+ result['criteria'] = 'fail'
+ status.append(pod.metadata.name)
+ status.append(pod_stats)
+ flag = True
+ else:
+ for nspace in namespaces.items:
+ pod_list = kube.list_namespaced_pod(nspace.metadata.name, watch=False)
+ pods = pod_list.items
+ for pod in pods:
+ if 'node-exporter' in pod.metadata.name:
+ pod_stats = pod_status(logger, pod)
+ if pod_stats['criteria'] == 'fail':
+ pod_stats['logs'] = get_logs(kube, pod)
+ result['criteria'] = 'fail'
+ status.append(pod.metadata.name)
+ status.append(pod_stats)
+ flag = True
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ result['details'].append(status)
+
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/plugin_check.py b/sdv/docker/sdvstate/internal/validator/kuberef/plugin_check.py
new file mode 100644
index 0000000..e964707
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/plugin_check.py
@@ -0,0 +1,152 @@
+"""
+CNI Plugin Check
+Multi-interface CNI Check
+"""
+
+import time
+import logging
+from kubernetes import client
+from tools.kube_utils import kube_api, kube_exec
+from tools.conf import settings
+from internal.store_result import store_result
+
+def create_daemonset(apps_instance):
+ """
+ Creates daemonset for the checks
+ """
+ manifest = {
+ 'apiVersion': 'apps/v1',
+ 'kind': 'DaemonSet',
+ 'metadata': {
+ 'name': 'plugin-check-test-set',
+ 'namespace': 'default'
+ },
+ 'spec': {
+ 'selector': {
+ 'matchLabels': {
+ 'name': 'alpine'
+ }
+ },
+ 'template': {
+ 'metadata': {
+ 'labels': {
+ 'name': 'alpine'
+ }
+ }
+ },
+ 'spec': {
+ 'containers': [{
+ 'name': 'alpine',
+ 'image': 'alpine:3.2',
+ 'command': ["sh", "-c", "echo \"Hello K8s\" && sleep 3600"],
+ 'volumeMounts': [{
+ 'name': 'etccni',
+ 'mountPath': '/etc/cni'
+ }, {
+ 'name': 'optcnibin',
+ 'mountPath': '/opt/cni/bin',
+ 'readOnly': True
+ }]
+ }],
+ 'volumes': [{
+ 'name': 'etccni',
+ 'hostPath': {
+ 'path': '/etc/cni'
+ }
+ }, {
+ 'name': 'optcnibin',
+ 'hostPath': {
+ 'path': '/opt/cni/bin'
+ }
+ }],
+ 'tolerations': [{
+ 'effect': 'NoSchedule',
+ 'key': 'node-role.kubernetes.io/master',
+ 'operator': 'Exists'
+ }]
+ }
+ }
+ }
+ apps_instance.create_namespaced_daemon_set('default', manifest)
+ time.sleep(6)
+
+
+def multi_interface_cni_check():
+ """
+ Checks if multi interface cni is enabled
+ """
+ apps_instance = client.AppsV1Api()
+ api_instance = kube_api()
+ logger = logging.getLogger(__name__)
+
+ result = {'category': 'network',
+ 'case_name': 'multi_interface_cni_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ create_daemonset(apps_instance)
+ pod_details = api_instance.list_namespaced_pod('default', watch=False)
+ pods = pod_details.items
+ status = []
+ cmd = ['ls', '/etc/cni/net.d']
+
+ for pod in pods:
+ if 'plugin-check-test-set' in pod.metadata.name:
+ list_of_plugin_conf = kube_exec(pod, cmd)
+ list_of_plugin_conf = list_of_plugin_conf.split("\n")
+
+ cmd3 = ['cat', list_of_plugin_conf[0]]
+ multi_interface_conf = kube_exec(pod, cmd3)
+
+ if 'multus' not in multi_interface_conf:
+ result['criteria'] = 'fail'
+
+ status.append(list_of_plugin_conf)
+ status.append(multi_interface_conf)
+
+ apps_instance.delete_namespaced_daemon_set('plugin-check-test-set', 'default')
+ result['details'].append(status)
+ store_result(logger, result)
+ return result
+
+def cni_plugin_check():
+ """
+ Checks for CNI plugins and validate against PDF
+ """
+ apps_instance = client.AppsV1Api()
+ api_instance = kube_api()
+
+ result = {'category': 'network',
+ 'case_name': 'cni_plugin_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ logger = logging.getLogger(__name__)
+ create_daemonset(apps_instance)
+ pod_details = api_instance.list_namespaced_pod('default', watch=False)
+ pods = pod_details.items
+ daemon_pods = []
+ status = []
+ cmd = ['ls', '/opt/cni/bin']
+ cni_plugins = settings.getValue('pdf_file')['vim_functional']['cnis_supported']
+
+
+ for pod in pods:
+ if 'plugin-check-test-set' in pod.metadata.name:
+ list_of_cni_from_dir = kube_exec(pod, cmd)
+
+ for plugin in cni_plugins:
+ if plugin not in list_of_cni_from_dir:
+ result['criteria'] = 'fail'
+
+ status.append(list_of_cni_from_dir)
+ daemon_pods.append(pod.metadata.name)
+
+ apps_instance.delete_namespaced_daemon_set('plugin-check-test-set', 'default')
+
+ result['details'].append(daemon_pods)
+ result['details'].append(status)
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py b/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
new file mode 100644
index 0000000..6993fd7
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/policy_checks.py
@@ -0,0 +1,123 @@
+"""
+Policy Checks
+Checks if the policies are properly configured
+"""
+
+
+import ast
+import logging
+from tools.kube_utils import kube_api
+from tools.conf import settings
+from internal.store_result import store_result
+
+def cpu_manager_policy_check():
+ """
+ Checks cpu manager settings
+ """
+ api = kube_api()
+ logger = logging.getLogger(__name__)
+ node_list = api.list_node()
+ nodes = []
+
+ for node in node_list:
+ nodes.append(node.metadata.name)
+
+ result = {'category': 'compute',
+ 'case_name': 'cpu_manager_policy_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ for node in nodes:
+ configz = api.connect_get_node_proxy_with_path(node, "configz")
+ configz = ast.literal_eval(configz)
+ res = {
+ 'node': node,
+ 'criteria': 'pass',
+ 'config': []
+ }
+
+ status = []
+
+ flag = True
+
+ cpu_manager = settings.getValue('pdf_file')['vim_functional']['cpu_manager_policy']
+
+ if cpu_manager['type'] == configz['kubeletconfig']['cpuManagerPolicy']:
+ if cpu_manager['type'] == 'static':
+ if cpu_manager['reconcile_period'] == configz['kubeletconfig']['cpuManagerReconcilePeriod']:
+ if cpu_manager['full_pcpus'] == configz['kubeletconfig']['full-pcpus-only']:
+ flag = flag and True
+ else:
+ flag = flag and False
+ else:
+ flag = flag and True
+ else:
+ flag = flag and False
+
+ if flag is False:
+ res['criteria'] = 'fail'
+
+ status.append(cpu_manager)
+ res['config'] = status
+ result['details'].append(res)
+
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ store_result(logger, result)
+ return result
+
+def topology_manager_policy_check():
+ """
+ Checks topology manager settings
+ """
+ api = kube_api()
+ logger = logging.getLogger(__name__)
+ node_list = api.list_node()
+ nodes = []
+
+ for node in node_list:
+ nodes.append(node.metadata.name)
+
+
+ result = {
+ 'category': 'compute',
+ 'case_name': 'topology_manager_policy_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ for node in nodes:
+ configz = api.connect_get_node_proxy_with_path(node, "configz")
+ configz = ast.literal_eval(configz)
+ res = {
+ 'node': node,
+ 'criteria': 'pass',
+ 'config': []
+ }
+
+ status = []
+
+ flag = True
+
+ topology_manager = settings.getValue('pdf_file')['undercloud_ook']['topo_manager_policy']
+
+ if topology_manager['type'] == configz['kubeletconfig']['topologyManagerPolicy']:
+ if topology_manager['scope'] == configz['kubeletconfig']['topologyManagerScope']:
+ flag = flag and True
+ else:
+ flag = flag and False
+ if flag is False:
+ res['criteria'] = 'fail'
+
+ status.append(topology_manager)
+ res['config'] = status
+ result['details'].append(res)
+
+ if flag is False:
+ result['criteria'] = 'fail'
+
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/internal/validator/kuberef/security_check.py b/sdv/docker/sdvstate/internal/validator/kuberef/security_check.py
new file mode 100644
index 0000000..f49048c
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/kuberef/security_check.py
@@ -0,0 +1,272 @@
+"""
+Security Checks
+"""
+
+import time
+import logging
+from tools.kube_utils import kube_api, kube_curl
+from tools.kube_utils import kube_exec
+from internal.store_result import store_result
+
+# capability check
+def capability_check():
+ """
+ Checks if creation of pods with particular capabilties is possible
+ """
+ kube = kube_api()
+ logger = logging.getLogger(__name__)
+ pod_manifest = {
+ 'apiVersion': 'v1',
+ 'kind': 'Pod',
+ 'metadata': {
+ 'name': 'security-capability-demo',
+ },
+ 'spec': {
+ 'containers': [{
+ 'image': 'alpine:3.2',
+ 'name': 'security-capability-demo',
+ 'command': ["/bin/sh", "-c", "sleep 60m"],
+ 'securityContext': {
+ 'capabilities': {
+ 'drop': [
+ "ALL"
+ ],
+ 'add': [
+ 'NET_ADMIN', 'NET_RAW'
+ ]
+ }
+ }
+ }]
+ }
+ }
+ result = {'category': 'platform',
+ 'case_name': 'capability_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+ status = []
+ try:
+ pod_cap = kube.create_namespaced_pod(body=pod_manifest, namespace='default')
+ time.sleep(6)
+ cmd = ['cat', '/proc/1/status']
+
+ response = kube_exec(pod_cap, cmd)
+ if "0000000000003000" in response:
+ result['criteria'] = 'fail'
+ status.append(pod_cap)
+ kube.delete_namespaced_pod(name=pod_cap.metadata.name, namespace='default')
+
+ except KeyError as error:
+ status.append(error)
+
+ except RuntimeError as error:
+ status.append(error)
+
+ result['details'].append(status)
+ store_result(logger, result)
+ return result
+
+# privileges check
+def privilege_check():
+ """
+ Checks if privileged pods are possible to created
+ """
+ kube = kube_api()
+ logger = logging.getLogger(__name__)
+
+ pod_manifest = {
+ 'apiVersion': 'v1',
+ 'kind': 'Pod',
+ 'metadata': {
+ 'name': 'security-privileges-demo',
+ },
+ 'spec': {
+ 'containers': [{
+ 'image': 'alpine:3.2',
+ 'name': 'security-privileges-demo',
+ 'command': ["/bin/sh", "-c", "sleep 60m"],
+ 'securityContext': {
+ 'privileged': True
+ }
+ }]
+ }
+ }
+ result = {'category': 'platform',
+ 'case_name': 'privilege_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+
+ try:
+ pod_priv = kube.create_namespaced_pod(body=pod_manifest, namespace='default')
+ time.sleep(5)
+ cmd = ['ps', 'aux']
+
+ response = kube_exec(pod_priv, cmd)
+
+ if "root" in response:
+ result['criteria'] = 'fail'
+ status.append(response)
+
+ kube.delete_namespaced_pod(name=pod_priv.metadata.name, namespace='default')
+
+ except KeyError as error:
+ status.append(error)
+
+ except RuntimeError as error:
+ status.append(error)
+
+ result['details'].append(status)
+
+ store_result(logger, result)
+ return result
+
+# host network check
+def host_network_check():
+ """
+ Checks if the pods can share the network with their host
+ """
+ kube = kube_api()
+ logger = logging.getLogger(__name__)
+
+ pod_manifest = {
+ 'apiVersion': 'v1',
+ 'kind': 'Pod',
+ 'metadata': {
+ 'name': 'security-host-network-demo',
+ },
+ 'spec': {
+ 'hostNetwork': True,
+ 'containers': [{
+ 'image': 'k8s.gcr.io/pause',
+ 'name': 'security-host-network-demo',
+ 'command': ["/bin/sh", "-c", "sleep 60m"],
+ }],
+ 'restartPolicy': 'Always'
+ }
+ }
+ result = {'category': 'platform',
+ 'case_name': 'host_network_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+
+ try:
+ pod_nw = kube.create_namespaced_pod(body=pod_manifest, namespace='default')
+ time.sleep(5)
+
+ kube.delete_namespaced_pod(name=pod_nw.metadata.name, namespace='default')
+ result['criteria'] = 'fail'
+
+ except KeyError as error:
+ status.append(error)
+
+ except RuntimeError as error:
+ status.append(error)
+
+ result['details'].append(status)
+
+ store_result(logger, result)
+ return result
+
+# host directory as a volume check
+def host_path_vol_check():
+ """
+ Checks if pods can be mounted to a host directory
+ """
+ kube = kube_api()
+ logger = logging.getLogger(__name__)
+
+ pod_manifest = {
+ 'apiVersion': 'v1',
+ 'kind': 'Pod',
+ 'metadata': {
+ 'name': 'security-host-path-volume-demo',
+ },
+ 'spec': {
+ 'hostNetwork': True,
+ 'containers': [{
+ 'image': 'k8s.gcr.io/pause',
+ 'name': 'security-host-path-volume-demo',
+ 'command': ["/bin/sh", "-c", "sleep 60m"],
+ }],
+ 'volumes': [
+ {
+ 'name': 'test-vol',
+ 'hostpath': {
+ 'path': 'home',
+ 'type': 'Directory'
+ }
+ }
+ ]
+ }
+ }
+ result = {'category': 'platform',
+ 'case_name': 'host_path_dir_vol_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+
+ try:
+ pod_vol = kube.create_namespaced_pod(body=pod_manifest, namespace='default')
+
+ time.sleep(5)
+
+ kube.delete_namespaced_pod(name=pod_vol.metadata.name, namespace='default')
+ result['criteria'] = 'fail'
+
+ except KeyError as error:
+ status.append(error)
+
+ except RuntimeError as error:
+ status.append(error)
+
+ result['details'].append(status)
+
+ store_result(logger, result)
+ return result
+
+# kubernetes api connectivity check
+def k8s_api_conn_check():
+ """
+ Checks for connectivity from within the pod
+ """
+
+ result = {'category': 'platform',
+ 'case_name': 'connectivity_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ status = []
+ logger = logging.getLogger(__name__)
+
+ try:
+ ca_crt = '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
+ auth_tkn = '"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"'
+ url = 'https://kubernetes.default.svc'
+ response = kube_curl('-v', '--cacert', ca_crt, '-H', auth_tkn, url)
+
+ if "Connected to kubernetes" in response:
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+ status.append(response)
+
+ except ConnectionError as error:
+ status.append(error)
+
+ except RuntimeError as error:
+ status.append(error)
+
+ result['details'].append(status)
+
+ store_result(logger, result)
+ return result
diff --git a/sdv/docker/sdvstate/settings/PDF.json b/sdv/docker/sdvstate/settings/PDF.json
new file mode 100644
index 0000000..c587956
--- /dev/null
+++ b/sdv/docker/sdvstate/settings/PDF.json
@@ -0,0 +1,650 @@
+{
+ "management_info": {
+ "owner": "",
+ "area_name": "",
+ "area_center_name": "",
+ "room_id": "",
+ "city": "",
+ "timezone": "",
+ "resource_pool_id": "",
+ "resource_pool_name": "",
+ "resource_pool_type": ""
+ },
+ "user_info": [
+ {
+ "_comment_users1": " Access Type: PIM, VIM, etc.",
+ "_comment_users2": " Endpoint: Server, Switch, VIM, etc.",
+ "access_type": "",
+ "endpoint": "",
+ "auth_type": "",
+ "username": "",
+ "password": "",
+ "pub_key": "",
+ "passphrase": "",
+ "tls_ca_cert": "",
+ "tls_cert": "",
+ "tls_key": "",
+ "email": ""
+ }
+ ],
+ "ntp_info": {
+ "primary_ip": "",
+ "primary_zone": "",
+ "secondary_ip": "",
+ "secondary_zone": ""
+ },
+ "syslog_info": {
+ "server_ip": "",
+ "transport": ""
+ },
+ "dns_info": [
+ {
+ "name": "",
+ "domain": "",
+ "servers": [
+ {
+ "ip": ""
+ }
+ ]
+ }
+ ],
+ "proxy_info": {
+ "address": "",
+ "port": "",
+ "user": "",
+ "password": ""
+ },
+ "ldap_info": {
+ "base_url": "",
+ "url": "",
+ "auth_path": "",
+ "common_name": "",
+ "subdomain": "",
+ "domain": ""
+ },
+ "vim_info": {
+ "vim_name": " ",
+ "vim_id": " ",
+ "vendor": " ",
+ "version": " ",
+ "installer": "",
+ "deployment_style": "",
+ "container_orchestrator": "",
+ "storage_type": ""
+ },
+ "_comment_deployment": " Type can be OOK or NOOK, block storage method is rbd or iscsi",
+ "deployment_info": {
+ "high_availability": "",
+ "introspection": "",
+ "deployment_type": "",
+ "installer_used": "KUBEREF",
+ "workload_vnf": "",
+ "workload_cnf": "",
+ "sdn_controller": "",
+ "sdn_controller_version": "",
+ "sdn_controller_nbapps": "",
+ "vnfm": "",
+ "vnfm_version": "",
+ "data_plane_used": "",
+ "ironic_deploy_interface": "",
+ "ip_type": "",
+ "external_storage_cluster": "",
+ "blk_str_connect_method": ""
+ },
+ "vim_functional": {
+ "kubevirt_support":"",
+ "ingress_approaches": "",
+ "egress_approaches": "",
+ "prev_mod_containers": "",
+ "hostdir_as_vols": "",
+ "host_ns_use": "",
+ "net_raw_admin_cap": "",
+ "_comment_cpu_manager": "Type could be none or static",
+ "cpu_manager_policy": {
+ "type": "",
+ "kube_reserved": "",
+ "system_reserved": "",
+ "reserved_cpus": "",
+ "full_pcpus_only": "",
+ "reconcile_period": ""
+ },
+ "_comment_topo_manager": "Type could be none, best-effort, restricted single-numa-node",
+ "topo_manager_policy": {
+ "scope": "",
+ "type": ""
+ },
+ "use_of_service_mesh": "",
+ "k8s_api_access_from_pod": "",
+ "liveliness_probe": "",
+ "readiness_probe": "",
+ "cnis_supported": [
+ "bandwidth",
+ "dhcp",
+ "flannel",
+ "host-local",
+ "host-device",
+ "ipvlan",
+ "loopback",
+ "multus",
+ "ptp",
+ "sriov",
+ "tuning",
+ "vrf",
+ "bridge",
+ "firewall",
+ "macvlan",
+ "portmap",
+ "sbr",
+ "static",
+ "vlan"
+ ],
+ "device_plugins_supported":"",
+ "scheduler_filters": "",
+ "cpu_allocation_ratio": "",
+ "legacy_helm_support":"YES"
+ },
+ "jumphost_info": {
+ "ip": "",
+ "name": ""
+ },
+ "rack_info": [
+ {
+ "rack_id": "",
+ "rack_details": {
+ "rack_name": "",
+ "rack_description": "",
+ "rack_az": "",
+ "rack_room": "",
+ "rack_raw": "",
+ "rack_number": ""
+ }
+ }
+ ],
+ "storage_cluster_info": {
+ "name": "",
+ "cluster_type": "",
+ "vendor": "",
+ "version": "",
+ "cluster_id": "",
+ "auth_type": "",
+ "username": "",
+ "password": "",
+ "certificate_location": "",
+ "client_key": "",
+ "mon_host_ips": [
+ {
+ "ip": ""
+ }
+ ],
+ "public_cidr": "",
+ "cluster_cidr": "",
+ "nodes": [
+ {
+ "name": "",
+ "id": ""
+ }
+ ],
+ "pools": [
+ {
+ "key": "",
+ "value": ""
+ }
+ ],
+ "max_quota_capacity": "",
+ "az_name": "",
+ "backup_policy": "",
+ "networks": [
+ {
+ "name": ""
+ }
+ ]
+ },
+ "_comment_info2": "End of Information - Except Software-INFO",
+ "bios_profiles": [
+ {
+ "profile_name": "",
+ "bios_info": {
+ "bios_version": "",
+ "bios_mode": "",
+ "bootstrap_proto": "",
+ "hyperthreading_enabled": "",
+ "_comment": "C4_C6_MLC-STR_MLC-SPA_DCU_DCA_RAS_TURBO",
+ "bios_setting": ""
+ }
+ }
+ ],
+ "bmc_profiles": [
+ {
+ "profile_name": "",
+ "bmc_info": {
+ "version": ""
+ }
+ }
+ ],
+ "processor_profiles": [
+ {
+ "profile_name": "",
+ "profile_info": {
+ "manufacturer": "",
+ "generation": "",
+ "speed": "",
+ "model": "",
+ "architecture": "",
+ "cpu_cflags": "",
+ "cache_size": "",
+ "numas": [
+ {
+ "node_id": "",
+ "cpu_set": ""
+ }
+ ]
+ }
+ }
+ ],
+ "disks_profiles": [
+ {
+ "profile_name": "",
+ "profile_info": [
+ {
+ "alias": "",
+ "vendor": "",
+ "address": "",
+ "size": "",
+ "model": "",
+ "dev_type": "",
+ "rotation": "",
+ "bus": "",
+ "logical_name": ""
+ }
+ ]
+ }
+ ],
+ "nic_profiles": [
+ {
+ "profile_name": "",
+ "profile_info": [
+ {
+ "alias": "",
+ "name": "",
+ "address": "",
+ "dev_type": "",
+ "bus": "",
+ "sriov_capable": "",
+ "numa_id": ""
+ }
+ ]
+ }
+ ],
+ "hardware_profiles": [
+ {
+ "profile_name": "",
+ "profile_id": "",
+ "profile_info": {
+ "manufacturer": "",
+ "sku": "",
+ "model": "",
+ "generation": "",
+ "bios_profile": "",
+ "bmc_profile": "",
+ "processor_profile": "",
+ "memory": "",
+ "disks_profile": "",
+ "nics_profile": ""
+ }
+ }
+ ],
+ "switch_profiles": [
+ {
+ "profile_name": "",
+ "profile_id": "",
+ "profile_info": {
+ "manufacturer": "",
+ "sku": "",
+ "model": "",
+ "generation": "",
+ "bios_profile": "",
+ "bmc_profile": "",
+ "nics_profile": ""
+ }
+ }
+ ],
+ "_comment_hw": " Hardware Information is complete",
+ "storage_profile": [
+ {
+ "name": "",
+ "bootdrive": "",
+ "bd_partitions": [
+ {
+ "name": "",
+ "size": "",
+ "bootable": "",
+ "filesystem": {
+ "mountpoint": "",
+ "fstype": "",
+ "mount_options": ""
+ }
+ }
+ ],
+ "data_devices": [
+ {
+ "name": "",
+ "partitions": [
+ {
+ "name": "ceph",
+ "size": "available",
+ "filesystem": {
+ "mountpoint": "/var/lib/ceph",
+ "fstype": "ext4",
+ "mount_options": "defaults"
+ }
+ }
+ ]
+ }
+ ],
+ "journal_devices": [
+ {
+ "name": ""
+ }
+ ]
+ }
+ ],
+ "_comment_nw1": "Network Info, Please include IPMI & Physnets info too",
+ "networks": [
+ {
+ "name": "",
+ "vips": [
+ {
+ "name": "",
+ "ip": ""
+ }
+ ],
+ "tunnel_type": "",
+ "tunnel_id": "",
+ "tunnel_id_range": "",
+ "mtu": "",
+ "routedomain": "",
+ "cidr": "",
+ "dns": "",
+ "routes": [
+ {
+ "subnet": "",
+ "gateway": "",
+ "metric": "",
+ "routedomain": ""
+ }
+ ],
+ "allocation_pools": [
+ {
+ "type": "",
+ "start": "",
+ "end": ""
+ }
+ ],
+ "v6_cidr": "",
+ "v6_allocation_pools": [
+ {
+ "type": "",
+ "start": "",
+ "end": ""
+ }
+ ]
+ }
+ ],
+ "_comment_nw_2": "These are specific to Infrastructure manager",
+ "physical_networks": [
+ {
+ "name": "external",
+ "cidr": "",
+ "type": "flat"
+ }
+ ],
+ "_comment_nw3": " type: trunk (airship), bond, interface, bridge",
+ "network_link": [
+ {
+ "name": "",
+ "type": "",
+ "bonding_mode": "",
+ "mtu": "",
+ "linkspeed": "auto",
+ "trunking_mode": "",
+ "trunking_default_nw": "",
+ "metadata": [
+ {
+ "key": "",
+ "value": ""
+ }
+ ],
+ "members": [
+ {
+ "name": "",
+ "type": ""
+ }
+ ],
+ "vid": "",
+ "vf_count": ""
+ }
+ ],
+ "_comment_nw4": "The link_name could be i/f, bond, bridges",
+ "_comment_nw5": "These profiles are mapped to roles",
+ "link_nw_mapping_profiles": [
+ {
+ "profile_name": "",
+ "profile_data": [
+ {
+ "link_name": "",
+ "link_type": "",
+ "networks": [
+ {
+ "name": ""
+ }
+ ],
+ "use_dhcp": ""
+ }
+ ]
+ }
+ ],
+ "platform_profiles": [
+ {
+ "profile_name": "",
+ "os": "",
+ "rt_kvm": "",
+ "kernel_version": "",
+ "kernel_parameters": "",
+ "isolated_cpus": "",
+ "vnf_cores": "",
+ "os_reserved_cores": " ",
+ "hugepage_count": "",
+ "hugepages": [
+ {
+ "hugepage_count": "",
+ "hugepage_size": ""
+ }
+ ],
+ "iommu": "",
+ "vswitch_daemon_cores": " ",
+ "vswitch_type": "",
+ "vswitch_uio_driver": "",
+ "vswitch_mem_channels": "",
+ "vswitch_socket_memory": "",
+ "vswitch_pmd_cores": "",
+ "vswitch_dpdk_lcores": "",
+ "vswitch_dpdk_rxqs": "",
+ "vswitch_options": ""
+ }
+ ],
+ "undercloud_ook": {
+ "dns": {
+ "cluster_domain": "",
+ "service_ip": ""
+ },
+ "etcd": {
+ "service_ip": "",
+ "container_port": "",
+ "haproxy_port": ""
+ },
+ "masters": [
+ {
+ "hostname": ""
+ }
+ ],
+ "networking": {
+ "type": "",
+ "interface_used": "",
+ "api_service_ip": "",
+ "etcd_service_ip": "",
+ "pod_cidr": "",
+ "service_cidr": "",
+ "apiserver_port": "",
+ "haproxy_port": "",
+ "servicenode_port_range": ""
+ },
+ "kvps": [
+ {
+ "key": "",
+ "value": ""
+ }
+ ]
+
+ },
+ "undercloud_ooo": {
+ "host_name": "",
+ "local_ip": "",
+ "public_host": "",
+ "admin_host": "",
+ "local_interface": "",
+ "inspection_interface": "",
+ "networking": {
+ "ctrlplane_cidr": "",
+ "ctrlplane_ip_start": "",
+ "ctrlplane_ip_end": "",
+ "inspection_range": "",
+ "gateway": ""
+ }
+ },
+ "switch_topology": [
+ {
+ "top_id": "",
+ "top_name": "",
+ "vertices": [
+ {
+ "vertex_id": "",
+ "node1_id": "",
+ "node1_port": "",
+ "node2_id": "",
+ "node2_port": "",
+ "vertex_type": ""
+ }
+ ]
+ }
+ ],
+ "_comment_sw1": "Software Begins",
+ "undercloud_sw_profiles": [
+ {
+ "profile_name": "",
+ "sw_list": [
+ {
+ "name": "",
+ "version": ""
+ }
+ ]
+ }
+ ],
+ "openstack_sw_profiles": [
+ {
+ "profile_name": "",
+ "sw_list": [
+ {
+ "name": "",
+ "version": ""
+ }
+ ]
+ }
+ ],
+ "infra_sw_profiles": [
+ {
+ "profile_name": "",
+ "sw_list": [
+ {
+ "name": "",
+ "version": ""
+ }
+ ]
+ }
+ ],
+ "software_set": [
+ {
+ "set_name": "",
+ "undercloud_profile": "",
+ "infrasw_profile": "",
+ "openstack_profile": ""
+ }
+ ],
+ "_comment_role1": "User has to configure this - What profile to use for a role",
+ "_comment_role2": "Based on this server_info will be autogenerated for all servers",
+ "roles": [
+ {
+ "name": "",
+ "hostname_prefix": "",
+ "hostname_suffix": "",
+ "hostname_number_start": "",
+ "count": "",
+ "hardware_profile": "",
+ "interface_mapping": "",
+ "storage_mapping": "",
+ "platform_profile": "",
+ "sw_set_name": "",
+ "metadata": [
+ {
+ "on_count_condition": "",
+ "count": "",
+ "key": "",
+ "value": ""
+ }
+ ]
+ }
+ ],
+ "_comment_ex1": "C:City, A:Area, R:Room, N:Unique Number",
+ "_comment_ex2": "All are 2 characters. City-Capitals, Area-Small",
+ "extrapolation_info": {
+ "ilo_password": "CID-AID-RID-NID",
+ "ilo_user": "owner",
+ "ip_increment": ""
+ },
+ "host_aggregates": [
+ {
+ "aggregate_name": "",
+ "properties": [
+ {
+ "key": "",
+ "value": ""
+ }
+ ],
+ "servers": [
+ {
+ "identifier": "",
+ "ilo_ip": "",
+ "hostname": ""
+ }
+ ]
+ }
+ ],
+ "_comment_servers1": "This will be auto generated",
+ "_comment_servers2": "This describes the entire cloud.",
+ "servers": [
+ {
+ "role_name": "",
+ "device_name": "",
+ "az_name": " ",
+ "ha_name": " ",
+ "rack": "",
+ "ilo_info": {
+ "ip": "",
+ "user": "",
+ "password": ""
+ },
+ "service_info": {
+ "service_ip": ""
+ }
+ }
+ ]
+ } \ No newline at end of file
diff --git a/sdv/docker/sdvstate/settings/state.yml b/sdv/docker/sdvstate/settings/state.yml
new file mode 100644
index 0000000..8a031d1
--- /dev/null
+++ b/sdv/docker/sdvstate/settings/state.yml
@@ -0,0 +1,20 @@
+# This is a comment
+
+# values are stored in key:value format
+## keys are case-insensitive
+## values can be int, float, string, dict, list, bool
+
+
+## Path to PDF file
+PDF_FILE: PDF_FILE.json
+
+#############
+# Airship arguments
+#############
+
+# Path to kube-config file
+KUBE_CONFIG : config
+
+
+MASTER_ROLE_NAME : masters
+WORKER_ROLE_NAME : workers