aboutsummaryrefslogtreecommitdiffstats
path: root/sdv/docker/sdvstate/validator/airship
diff options
context:
space:
mode:
authorParth Yadav <parthyadav3105@gmail.com>2021-07-19 19:26:06 +0530
committerParth Yadav <parthyadav3105@gmail.com>2021-07-19 19:53:45 +0530
commit27cf386ac6a133b5c75d2dbe7864ec7166d74b09 (patch)
tree5f1338e8a360fef901d4c0d3c8612c06c99102c0 /sdv/docker/sdvstate/validator/airship
parent3a0d5aa439389a42d1fc0cdcefdae2fe597c3e56 (diff)
Initialize k8s test suite for Airship
This patch updates sdv framework to support multiple test suites. Current test suites: * default * k8s Current Cloud Installers: * Airship The patch also adds makefile for sdv framework to ease development work. Some useful makefile target: * make sandbox * make run * make lint * make bash Run `make help` for complete usage guide. The patch restructures the codebases directory structure. Signed-off-by: Parth Yadav<parthyadav3105@gmail.com> Change-Id: I109d13f84334ec1cfa4f9c17b74d38a979272ea5
Diffstat (limited to 'sdv/docker/sdvstate/validator/airship')
-rw-r--r--sdv/docker/sdvstate/validator/airship/__init__.py49
-rw-r--r--sdv/docker/sdvstate/validator/airship/airship.py129
-rw-r--r--sdv/docker/sdvstate/validator/airship/ceph_check.py51
-rw-r--r--sdv/docker/sdvstate/validator/airship/compute_check.py661
-rw-r--r--sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py243
-rw-r--r--sdv/docker/sdvstate/validator/airship/network_check.py114
-rw-r--r--sdv/docker/sdvstate/validator/airship/pod_health_check.py111
-rw-r--r--sdv/docker/sdvstate/validator/airship/store_result.py28
8 files changed, 0 insertions, 1386 deletions
diff --git a/sdv/docker/sdvstate/validator/airship/__init__.py b/sdv/docker/sdvstate/validator/airship/__init__.py
deleted file mode 100644
index 78e42c4..0000000
--- a/sdv/docker/sdvstate/validator/airship/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Package for Airship
-"""
-
-
-### Pod Health Checks
-from .pod_health_check import pod_health_check
-
-### Ceph Health Checks
-from .ceph_check import ceph_health_check
-
-### Monitoring & Logging Agents Checks
-from .monitoring_logging_agent_check import prometheus_check
-from .monitoring_logging_agent_check import grafana_check
-# from .monitoring_logging_agent_check import prometheus_alert_manager_check
-from .monitoring_logging_agent_check import elasticsearch_check
-from .monitoring_logging_agent_check import kibana_check
-from .monitoring_logging_agent_check import nagios_check
-from .monitoring_logging_agent_check import elasticsearch_exporter_check
-from .monitoring_logging_agent_check import fluentd_exporter_check
-
-### Network Checks
-from .network_check import physical_network_check
-
-### Compute Related Checks
-from .compute_check import reserved_vnf_cores_check
-from .compute_check import isolated_cores_check
-from .compute_check import vswitch_pmd_cores_check
-from .compute_check import vswitch_dpdk_lcores_check
-from .compute_check import os_reserved_cores_check
-from .compute_check import nova_scheduler_filters_check
-from .compute_check import cpu_allocation_ratio_check
-
-from .store_result import store_result
diff --git a/sdv/docker/sdvstate/validator/airship/airship.py b/sdv/docker/sdvstate/validator/airship/airship.py
deleted file mode 100644
index f2bdebd..0000000
--- a/sdv/docker/sdvstate/validator/airship/airship.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Airship Validator
-"""
-
-import logging
-from datetime import datetime as dt
-
-from tools.conf import settings
-from tools.kube_utils import load_kube_api, delete_kube_curl_pod
-from validator.validator import Validator
-
-from . import *
-
-
-
-
-
-class AirshipValidator(Validator):
- """Class for Airship Validation
- """
-
- def __init__(self):
- """
- Initialisation function.
- """
- super(AirshipValidator, self).__init__()
- self._logger = logging.getLogger(__name__)
-
- self._report = {"installer": "Airship",
- "criteria": "pass",
- "details": {"total_checks": 0,
- "pass": [],
- "fail": [],
- "metadata": {}
- }
- }
-
- load_kube_api()
-
-
- def validate(self):
- """
- Validation method
- """
-
- self._report['scenario'] = 'none'
- self._report['case_name'] = 'ook_airship'
- self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
-
-
- # PLATFORM CHECKS
- self.update_report(pod_health_check())
-
- # STORAGE CHECKS
- self.update_report(ceph_health_check())
-
- # MONITORING & LOGGING AGENTS CHECKS
- self.update_report(prometheus_check())
- self.update_report(grafana_check())
- ## current version of AlertManager doesn't support this
- # prometheus_alert_manager_check()
- self.update_report(elasticsearch_check())
- self.update_report(kibana_check())
- self.update_report(nagios_check())
- self.update_report(elasticsearch_exporter_check())
- self.update_report(fluentd_exporter_check())
-
- # NETWORK CHECKS
- self.update_report(physical_network_check())
-
- # COMPUTE CHECKS
- self.update_report(reserved_vnf_cores_check())
- self.update_report(isolated_cores_check())
- self.update_report(vswitch_pmd_cores_check())
- self.update_report(vswitch_dpdk_lcores_check())
- self.update_report(os_reserved_cores_check())
- self.update_report(nova_scheduler_filters_check())
- self.update_report(cpu_allocation_ratio_check())
-
- delete_kube_curl_pod()
-
- self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
-
-
- def update_report(self, result):
- """
- Updates report with new results
- """
- case_name = result['case_name']
- criteria = result['criteria']
-
- self._report['details']['total_checks'] += 1
- if criteria == 'pass':
- self._report['details']['pass'].append(case_name)
- elif criteria == 'fail':
- self._report['details']['fail'].append(case_name)
- self._report['criteria'] = 'fail'
-
-
-
- def get_report(self):
- """
- Return final report as dict
- """
- self._report["project_name"] = settings.getValue("project_name")
- self._report["version"] = settings.getValue("project_version")
- self._report["build_tag"] = "none"
-
- pdf = settings.getValue('pdf_file')
- self._report["pod_name"] = pdf['management_info']['resource_pool_name']
-
- store_result(self._report)
-
- return self._report
diff --git a/sdv/docker/sdvstate/validator/airship/ceph_check.py b/sdv/docker/sdvstate/validator/airship/ceph_check.py
deleted file mode 100644
index b33e876..0000000
--- a/sdv/docker/sdvstate/validator/airship/ceph_check.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Ceph Related Checks
-"""
-
-import ast
-
-from tools.kube_utils import get_pod_with_labels, kube_exec
-from .store_result import store_result
-
-
-
-
-def ceph_health_check():
- """
- Check health of Ceph
- """
- pod = get_pod_with_labels('application=ceph,component=mon')
-
- cmd = ['ceph', 'health', '-f', 'json']
- response = kube_exec(pod, cmd)
-
- response = ast.literal_eval(response)
-
- result = {'category': 'storage',
- 'case_name': 'ceph_health_check',
- 'details': []
- }
-
- if response['status'] == 'HEALTH_OK':
- result['criteria'] = 'pass'
- result['details'] = 'HEALTH_OK'
- else:
- result['criteria'] = 'fail'
- result['details'] = response
-
- store_result(result)
- return result
diff --git a/sdv/docker/sdvstate/validator/airship/compute_check.py b/sdv/docker/sdvstate/validator/airship/compute_check.py
deleted file mode 100644
index a602471..0000000
--- a/sdv/docker/sdvstate/validator/airship/compute_check.py
+++ /dev/null
@@ -1,661 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Compute Related Checks
-"""
-
-import configparser
-import json
-import re
-
-from tools.kube_utils import kube_exec, get_pod_with_labels
-from tools.conf import settings
-from .store_result import store_result
-
-
-###########
-# Checks
-###########
-
-def isolated_cores_check():
- """
- isolated_cores_check
- """
- traced_value = trace_isolated_cores()
- required_value = required_isolated_cores()
-
- result = {'category': 'compute',
- 'case_name': 'isolated_cores_check',
- 'details': {'traced_cores': traced_value,
- 'required_cores': required_value
- }
- }
-
- if is_ranges_equals(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
-
- store_result(result)
- return result
-
-
-
-def reserved_vnf_cores_check():
- """
- reserved_vnf_cores_check
- """
- traced_value = trace_reserved_vnf_cores()
- required_value = required_reserved_vnf_cores()
-
- result = {'category': 'compute',
- 'case_name': 'reserved_vnf_cores_check',
- 'details': {'traced_cores': traced_value,
- 'required_cores': required_value
- }
- }
-
- if is_ranges_equals(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
-
- store_result(result)
- return result
-
-
-
-def vswitch_pmd_cores_check():
- """
- vswitch_pmd_cores_check
- """
- traced_value = trace_vswitch_pmd_cores()
- required_value = required_vswitch_pmd_cores()
-
- result = {'category': 'compute',
- 'case_name': 'vswitch_pmd_cores_check',
- 'details': {'traced_cores': traced_value,
- 'required_cores': required_value
- }
- }
-
- if is_ranges_equals(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
-
- store_result(result)
- return result
-
-
-
-def vswitch_dpdk_lcores_check():
- """
- vswitch_dpdk_lcores_check
- """
- traced_value = trace_vswitch_dpdk_lcores()
- required_value = required_vswitch_dpdk_lcores()
-
- result = {'category': 'compute',
- 'case_name': 'vswitch_dpdk_lcores_check',
- 'details': {'traced_cores': traced_value,
- 'required_cores': required_value
- }
- }
-
- if is_ranges_equals(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
-
- store_result(result)
- return result
-
-
-
-def os_reserved_cores_check():
- """
- os_reserved_cores_check
- """
- traced_value = trace_os_reserved_cores()
- required_value = required_os_reserved_cores()
-
- result = {'category': 'compute',
- 'case_name': 'os_reserved_cores_check',
- 'details': {'traced_cores': traced_value,
- 'required_cores': required_value
- }
- }
-
- if is_ranges_equals(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
-
- store_result(result)
- return result
-
-
-
-def nova_scheduler_filters_check():
- """
- nova_scheduler_filters_check
- """
- traced_value = trace_nova_scheduler_filters()
- required_value = required_nova_scheduler_filters()
-
- result = {'category': 'compute',
- 'case_name': 'nova_scheduler_filters_check',
- 'details': {'traced_filters': traced_value,
- 'required_filters': required_value
- }
- }
-
- if are_lists_equal(traced_value, required_value):
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
- store_result(result)
- return result
-
-
-
-def cpu_allocation_ratio_check():
- """
- cpu_allocation_ratio_check
- """
- traced_value = trace_cpu_allocation_ratio()
- required_value = required_cpu_allocation_ratio()
-
- result = {'category': 'compute',
- 'case_name': 'cpu_allocation_ratio_check',
- 'details': {'traced_ratio': traced_value,
- 'required_ratio': required_value
- }
- }
-
- if traced_value == required_value:
- result['criteria'] = 'pass'
- else:
- result['criteria'] = 'fail'
-
- store_result(result)
- return result
-
-
-
-
-
-
-
-
-###############
-# helper functions
-###############
-
-
-
-def trace_isolated_cores():
- """
- Trace isolated_cores from Airship deployment
-
- :return: value traced from `isolcpus` key in `/proc/cmdline`
- """
- pod = get_pod_with_labels('application=nova,component=compute')
-
- cmd = ['cat', '/proc/cmdline']
- proc_cmd = kube_exec(pod, cmd)
-
- for option in proc_cmd.split():
- if 'isolcpus' in option:
- _, isolcpus_value = split_key_value(option)
- break
-
- return isolcpus_value
-
-
-def required_isolated_cores():
- """
- Returns value of `isolated_cpus` from platform_profile used by
- Role for worker nodes in PDF
-
- :return: isolated_cores value expected by the PDF
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- profile = get_platform_profile_by_role(worker_role)
- return profile['isolated_cpus']
-
-
-
-
-
-
-def trace_reserved_vnf_cores():
- """
- Trace vnf_reserved_cores from Airship deployment
-
- :return: value traced from `vcpu_pin_set` key in nova.conf
- of actual deployment
- """
- try:
- config = get_nova_conf()
- vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
- except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
- vcpu_pin_set = ''
-
- return vcpu_pin_set
-
-
-def required_reserved_vnf_cores():
- """
- Returns value of vnf_cores from platform_profile used by
- Role for worker nodes in PDF
-
- :return: vnf_reserverd_core value expected by the PDF
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- profile = get_platform_profile_by_role(worker_role)
- return profile['vnf_cores']
-
-
-
-
-
-
-def trace_vswitch_pmd_cores():
- """
- Trace vswitch_pmd_cores from Airship deployment
-
- :return: value traced from `other_config:pmd-cpu-mask` in
- openvswitchdb using ovs-vsctl
- """
- ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
-
- cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
- response = kube_exec(ovs_pod, cmd)
-
- # convert config str to json str
- match = re.findall("[a-zA-Z0-9-]+=", response)
- for key in match:
- response = response.replace(key, '"' + key[:-1] + '":')
- match = re.findall(":[a-zA-Z0-9-]+", response)
- for key in match:
- response = response.replace(key[1:], '"' + key[1:] + '"')
-
- config = json.loads(response)
-
- if 'pmd-cpu-mask' in config:
- pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
- else:
- pmd_cores = ''
-
- return pmd_cores
-
-
-def required_vswitch_pmd_cores():
- """
- Returns value of vswitch_pmd_cores from platform_profile used by
- Role for worker nodes in PDF
-
- :return: vswitch_pmd_cores value expected by the PDF
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- profile = get_platform_profile_by_role(worker_role)
- return profile['vswitch_pmd_cores']
-
-
-
-
-
-
-def trace_vswitch_dpdk_lcores():
- """
- Trace vswitch_dpdk_lcores from Airship deployment
-
- :return: value traced from `other_config:dpdk-lcore-mask` in
- openvswitchdb using ovs-vsctl
- """
- ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
-
- cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
- response = kube_exec(ovs_pod, cmd)
-
- # convert config str to json str
- match = re.findall("[a-zA-Z0-9-]+=", response)
- for key in match:
- response = response.replace(key, '"' + key[:-1] + '":')
- match = re.findall(":[a-zA-Z0-9-]+", response)
- for key in match:
- response = response.replace(key[1:], '"' + key[1:] + '"')
-
- config = json.loads(response)
-
- if 'dpdk-lcore-mask' in config:
- pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
- else:
- pmd_cores = ''
-
- return pmd_cores
-
-
-def required_vswitch_dpdk_lcores():
- """
- Returns value of vswitch_dpdk_lcores from platform_profile used by
- Role for worker nodes in PDF
-
- :return: vswitch_dpdk_lcores value expected by the PDF
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- profile = get_platform_profile_by_role(worker_role)
- return profile['vswitch_dpdk_lcores']
-
-
-
-
-
-
-def trace_os_reserved_cores():
- """
- Trace os_reserved_cores from Airship deployment
-
- os_reserved_cores = all_cores - (reserved_vnf_cores +
- vswitch_pmd_cores +
- vswitch_dpdk_lcores)
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- all_cores = get_cores_by_role(worker_role)
-
- reserved_vnf_cores = trace_reserved_vnf_cores()
- vswitch_pmd_cores = trace_vswitch_pmd_cores()
- vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
-
- non_os_cores = []
- non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
- non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
- non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
-
- os_reserved_cores = set(all_cores).difference(set(non_os_cores))
-
- # return as string with comma separated value
- return ','.join(map(str, list(os_reserved_cores)))
-
-
-def required_os_reserved_cores():
- """
- Returns value of os_reserved_cores from platform_profile used by
- Role for worker nodes in PDF
-
- :return: os_reserved_cores value expected by the PDF
- """
- worker_role = settings.getValue('WORKER_ROLE_NAME')
- profile = get_platform_profile_by_role(worker_role)
- return profile['os_reserved_cores']
-
-
-
-
-
-def trace_nova_scheduler_filters():
- """
- Trace scheduler_filters from Airship deployment
-
- :return: value traced from `enabled_filters` key in nova.conf
- of actual deployment
- """
- try:
- config = get_nova_conf()
- filters = config.get('filter_scheduler', 'enabled_filters')
- except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
- filters = ''
-
- filters = filters.split(',')
- map(str.strip, filters)
-
- return filters
-
-def required_nova_scheduler_filters():
- """
- Required nova scheduler_filters by the PDF
- """
- pdf = settings.getValue('pdf_file')
- filters = pdf['vim_functional']['scheduler_filters']
-
- filters = filters.split(',')
- map(str.strip, filters)
-
- return filters
-
-
-
-
-
-
-
-def trace_cpu_allocation_ratio():
- """
- Trace cpu_allocation_ratio from Airship deployment
-
- :return: value traced from `cpu_allocation_ratio` key in nova.conf
- of actual deployment
- """
- try:
- config = get_nova_conf()
- cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
- except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
- cpu_allocation_ratio = ''
-
- return float(cpu_allocation_ratio)
-
-def required_cpu_allocation_ratio():
- """
- Required cpu_allocation_ratio by the PDF
- """
- pdf = settings.getValue('pdf_file')
- cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
-
- return float(cpu_allocation_ratio)
-
-
-
-
-
-
-
-def get_role(role_name):
- """
- Searches and returns role with `role_name`
- """
- roles = settings.getValue('pdf_file')['roles']
-
- for role in roles:
- if role['name'] == role_name:
- role_details = role
-
- return role_details
-
-
-def get_platform_profile(profile_name):
- """
- Searches and returns platform_profile with `profile_name`
- """
- platform_profiles = settings.getValue('pdf_file')['platform_profiles']
-
- for profile in platform_profiles:
- if profile['profile_name'] == profile_name:
- profile_details = profile
-
- return profile_details
-
-def get_processor_profile(profile_name):
- """
- Searches and returns processor_profile with `profile_name`
- """
- processor_profiles = settings.getValue('pdf_file')['processor_profiles']
-
- for profile in processor_profiles:
- if profile['profile_name'] == profile_name:
- profile_details = profile
-
- return profile_details
-
-def get_platform_profile_by_role(role_name):
- """
- Returns platform profile details of a role
- """
- role = get_role(role_name)
- profile = get_platform_profile(role['platform_profile'])
- return profile
-
-
-def get_hardware_profile_by_role(role_name):
- """
- Returns hardware profile details of a role
- """
- role = get_role(role_name)
-
- hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
-
- for profile in hardware_profiles:
- if profile['profile_name'] == role['hardware_profile']:
- profile_details = profile
-
- return profile_details
-
-
-def get_cores_by_role(role_name):
- """
- Returns cpu cores list of server hardware used in the role
- """
- hardware_profile = get_hardware_profile_by_role(role_name)
- processor_profile = hardware_profile['profile_info']['processor_profile']
- profile = get_processor_profile(processor_profile)
-
- cpus = []
-
- for numa in profile['profile_info']['numas']:
- cpus.extend(convert_range_to_list(numa['cpu_set']))
-
- return cpus
-
-
-
-
-
-
-
-def get_nova_conf():
- """
- Returns parsed nova.conf
- """
- pod = get_pod_with_labels('application=nova,component=compute')
-
- cmd = ['cat', '/etc/nova/nova.conf']
- response = kube_exec(pod, cmd)
-
- config = configparser.ConfigParser()
- config.read_string(response)
-
- return config
-
-
-### cpu cores related helper function
-
-def convert_range_to_list(x):
- """
- Returns list of numbers from given range as string
-
- e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
- """
- # pylint: disable=C0103
- result = []
- for part in x.split(','):
- if '-' in part:
- a, b = part.split('-')
- a, b = int(a), int(b)
- result.extend(range(a, b + 1))
- elif part != '':
- a = int(part)
- result.append(a)
- # remove duplicates
- result = list(dict.fromkeys(result))
- return result
-
-
-def is_ranges_equals(range1, range2):
- """
- Checks whether two ranges passed as string are equal
-
- e.g.: is_ranges_equals('2-5', '2-4,5') returns true
- """
- set1 = set(convert_range_to_list(range1))
- set2 = set(convert_range_to_list(range2))
- return set1 == set2
-
-def are_lists_equal(list1, list2):
- """
- Checks whether two list are identicals
- """
- set1 = set(list1)
- set2 = set(list2)
- return set1 == set2
-
-
-
-def hex_to_comma_list(hex_mask):
- """
- Converts CPU mask given in hex to list of cores
- """
- binary = bin(int(hex_mask, 16))[2:]
- reversed_binary = binary[::-1]
- i = 0
- output = ""
- for bit in reversed_binary:
- if bit == '1':
- output = output + str(i) + ','
- i = i + 1
- return output[:-1]
-
-
-def comma_list_to_hex(cpus):
- """
- Converts a list of cpu cores in corresponding hex value
- of cpu-mask
- """
- cpu_arr = cpus.split(",")
- binary_mask = 0
- for cpu in cpu_arr:
- binary_mask = binary_mask | (1 << int(cpu))
- return format(binary_mask, '02x')
-
-
-
-def split_key_value(key_value_str, delimiter='='):
- """
- splits given string into key and value based on delimiter
-
- :param key_value_str: example string `someKey=somevalue`
- :param delimiter: default delimiter is `=`
- :return: [ key, value]
- """
- key, value = key_value_str.split(delimiter)
- key = key.strip()
- value = value.strip()
- return key, value
diff --git a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
deleted file mode 100644
index 3754299..0000000
--- a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Monitoring & Logging Agents Related Checks
-"""
-
-import ast
-
-from tools.kube_utils import kube_curl
-from tools.result_api import rfile
-from .store_result import store_result
-
-
-def prometheus_check():
- """
- Check health of Prometheus
- """
- username = "prometheus"
- password = "password123"
- service = "prom-metrics"
- namespace = "osh-infra"
-
- health = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/healthy')
- if 'Prometheus is Healthy' in res:
- health = "pass"
-
- readiness = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/ready')
- if 'Prometheus is Ready' in res:
- readiness = "pass"
-
- if health == "pass" and readiness == "pass":
- state = "pass"
- else:
- state = "fail"
-
- result = {'category': 'platform',
- 'case_name': 'prometheus_check',
- 'criteria': state,
- 'details': {'health': health, 'readiness': readiness}
- }
-
- store_result(result)
- return result
-
-
-
-def grafana_check():
- """
- Check health of Grafana
- """
- username = "grafana"
- password = "password123"
- service = "grafana-dashboard"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
- "-o", "/dev/null", "-u", \
- f'{username}:{password}', \
- f'{service}.{namespace}:3000/api/health')
- if res == '200':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'grafana_check',
- 'criteria': state
- }
-
- store_result(result)
- return result
-
-
-def prometheus_alert_manager_check():
- """
- Check health of Alert Manager
- """
- service = "alerts-engine"
- namespace = "osh-infra"
-
- health = "fail" #default
- res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/healthy')
- if 'Prometheus is Healthy' in res:
- health = "pass"
-
- readiness = "fail" #default
- res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/ready')
- if 'Prometheus is Ready' in res:
- readiness = "pass"
-
- if health == "pass" and readiness == "pass":
- state = "pass"
- else:
- state = "fail"
-
- result = {'category': 'platform',
- 'case_name': 'prometheus_alert_manager_check',
- 'criteria': state,
- 'details': {'health': health, 'readiness': readiness}
- }
-
-
- store_result(result)
- return result
-
-
-def elasticsearch_check():
- """
- Check health of Elasticsearch cluster
- """
- username = "elasticsearch"
- password = "password123"
- service = "elasticsearch"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/_cluster/health')
-
- if res == '':
- res = 'Elasticsearch not reachable'
- else:
- res = ast.literal_eval(res)
- if res['status'] == 'green':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'elasticsearch_check',
- 'criteria': state,
- 'details': res
- }
-
- store_result(result)
- return result
-
-
-def kibana_check():
- """
- Check health of Kibana
- """
- username = "elasticsearch"
- password = "password123"
- service = "kibana-dash"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/api/status')
-
- if res == '':
- res = 'kibana not reachable'
- else:
- res = ast.literal_eval(res)
- if res['status']['overall']['state'] == 'green':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'kibana_check',
- 'criteria': state,
- 'details': rfile(str(res))
- }
-
- store_result(result)
- return result
-
-
-def nagios_check():
- """
- Check health of Nagios
- """
- username = "nagios"
- password = "password123"
- service = "nagios-metrics"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
- "-o", "/dev/null", "-u", \
- f'{username}:{password}', \
- f'{service}.{namespace}')
- if res == '200':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'nagios_check',
- 'criteria': state
- }
-
- store_result(result)
- return result
-
-
-def elasticsearch_exporter_check():
- """
- Check health of Elasticsearch Exporter
- """
- service = "elasticsearch-exporter"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9108/metrics')
- if res == '200':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'elasticsearch_exporter_check',
- 'criteria': state
- }
-
- store_result(result)
- return result
-
-
-def fluentd_exporter_check():
- """
- Check health of Fluentd Exporter
- """
- service = "fluentd-exporter"
- namespace = "osh-infra"
-
- state = "fail" #default
- res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9309/metrics')
- if res == '200':
- state = "pass"
-
- result = {'category': 'platform',
- 'case_name': 'fluentd_exporter_check',
- 'criteria': state
- }
-
- store_result(result)
- return result
diff --git a/sdv/docker/sdvstate/validator/airship/network_check.py b/sdv/docker/sdvstate/validator/airship/network_check.py
deleted file mode 100644
index bddf579..0000000
--- a/sdv/docker/sdvstate/validator/airship/network_check.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Network Related Checks
-"""
-
-
-import configparser
-
-from tools.conf import settings
-from tools.kube_utils import kube_exec, get_pod_with_labels
-
-from .store_result import store_result
-
-
-def physical_network_check():
- """
- physical_network_check
- """
- ml2_config = neutron_ml2_config()
-
- physical_networks = settings.getValue('pdf_file')['physical_networks']
-
- type_drivers = ml2_config.get('ml2', 'type_drivers').split(',')
-
- flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',')
-
- vlan_networks = []
- network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',')
- for network in network_vlan_ranges:
- vlan_networks.append(network.split(':')[0])
-
- result = {'category': 'network',
- 'case_name': 'physical_network_check',
- 'criteria': 'pass',
- 'details': []
- }
-
- for physnet in physical_networks:
-
- res = {'network_name': physnet['name'],
- 'type': physnet['type'],
- 'criteria': 'fail'
- }
-
- if physnet['type'] in type_drivers:
- if physnet['type'] == 'flat':
- if physnet['name'] in flat_networks or '*' in flat_networks:
- res['criteria'] = 'pass'
- else:
- res['details'] = 'physical network name not found'
- if physnet['type'] == 'vlan':
- if physnet['name'] in vlan_networks:
- res['criteria'] = 'pass'
- else:
- res['details'] = 'physical network name not found'
- else:
- res['details'] = 'physical network type not found'
-
- result['details'].append(res)
- if res['criteria'] == 'fail':
- result['criteria'] = 'fail'
-
- store_result(result)
- return result
-
-
-
-def neutron_ml2_config():
- """
- Returns parsed ml2 config from neutron
- """
- ovs = get_pod_with_labels("application=neutron,component=neutron-ovs-agent")
- sriov = get_pod_with_labels("application=neutron,component=neutron-sriov-agent")
-
- confs = get_neutron_ml2_conf_from_pod(ovs)
- confs.extend(get_neutron_ml2_conf_from_pod(sriov))
-
- config = configparser.ConfigParser()
- for conf in confs:
- config.read_string(conf)
-
- return config
-
-
-
-
-def get_neutron_ml2_conf_from_pod(pod):
- """
- Reads ml2 config from neutron pod
- """
- cmd = ['ls', '/etc/neutron/plugins/ml2/']
- response = kube_exec(pod, cmd)
- files = response.rstrip("\n").split()
-
- response = []
- for filename in files:
- cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename]
- conf = kube_exec(pod, cmd)
- response.append(conf)
-
- return response
diff --git a/sdv/docker/sdvstate/validator/airship/pod_health_check.py b/sdv/docker/sdvstate/validator/airship/pod_health_check.py
deleted file mode 100644
index 0093ffc..0000000
--- a/sdv/docker/sdvstate/validator/airship/pod_health_check.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Pod Health Checks
-"""
-
-
-
-import logging
-
-from tools.kube_utils import kube_api
-from tools.conf import settings
-from tools.result_api import rfile
-
-from .store_result import store_result
-
-
-
-def pod_health_check():
- """
- Check health of all pods and get logs of failed pods
- """
- api = kube_api()
- namespace_list = settings.getValue('airship_namespace_list')
-
- result = {'category': 'platform',
- 'case_name': 'pod_health_check',
- 'criteria': 'pass',
- 'details': []
- }
-
- for namespace in namespace_list:
- pod_list = api.list_namespaced_pod(namespace)
- for pod in pod_list.items:
- pod_stats = pod_status(pod)
- if pod_stats['criteria'] == 'fail':
- pod_stats['logs'] = get_logs(pod)
- result['criteria'] = 'fail'
- result['details'].append(pod_stats)
-
-
- store_result(result)
- return result
-
-
-
-def pod_status(pod):
- """
- Check health of a pod and returns it's status as result
- """
- result = {'criteria': 'pass',
- 'name': pod.metadata.name,
- 'namespace': pod.metadata.namespace,
- 'node': pod.spec.node_name}
-
- if pod.status.container_statuses is None:
- result['criteria'] = 'fail'
- result['pod_details'] = rfile(str(pod))
- else:
- for container in pod.status.container_statuses:
- if container.state.running is not None:
- status = 'Running'
- if container.state.terminated is not None:
- status = container.state.terminated.reason
- if container.state.waiting is not None:
- status = container.state.waiting.reason
-
- if status not in ('Running', 'Completed'):
- result['criteria'] = 'fail'
- result['pod_details'] = rfile(str(pod))
-
- info = f'[Health: {result["criteria"]}] Name: {result["name"]}, '
- info = info + f'Namespace: {result["namespace"]}, Node: {result["node"]}'
-
- logger = logging.getLogger(__name__)
- logger.debug(info)
- return result
-
-
-def get_logs(pod):
- """
- Collects logs of all containers in ``pod``
- """
- api = kube_api()
- logs = []
- if pod.status.container_statuses is not None:
- for container in pod.status.container_statuses:
- con = {'container': container.name}
- if container.state.waiting is not None and \
- container.state.waiting.reason == 'PodInitializing':
- log = 'Not found, status: waiting, reason: PodInitializing'
- else:
- log = api.read_namespaced_pod_log(name=pod.metadata.name,
- namespace=pod.metadata.namespace,
- container=container.name)
- con['log'] = rfile(log)
- logs.append(con)
- return logs
diff --git a/sdv/docker/sdvstate/validator/airship/store_result.py b/sdv/docker/sdvstate/validator/airship/store_result.py
deleted file mode 100644
index 52f4e10..0000000
--- a/sdv/docker/sdvstate/validator/airship/store_result.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-store_result function to log and store result
-"""
-import logging
-from tools.result_api import result_api
-
-def store_result(result):
- """
- Logs and stores result
- """
- logger = logging.getLogger(__name__)
- logger.info(f'[State: {result["criteria"]}] {result["case_name"]}')
-
- result_api.store(result)