From 27cf386ac6a133b5c75d2dbe7864ec7166d74b09 Mon Sep 17 00:00:00 2001
From: Parth Yadav <parthyadav3105@gmail.com>
Date: Mon, 19 Jul 2021 19:26:06 +0530
Subject: Initialize k8s test suite for Airship

This patch updates sdv framework to support multiple test suites.
Current test suites:
    * default
    * k8s
Current Cloud Installers:
    * Airship

The patch also adds makefile for sdv framework to ease development work.
Some useful makefile target:
    * make sandbox
    * make run
    * make lint
    * make bash
Run `make help` for complete usage guide.

The patch restructures the codebases directory structure.

Signed-off-by: Parth Yadav<parthyadav3105@gmail.com>
Change-Id: I109d13f84334ec1cfa4f9c17b74d38a979272ea5
---
 sdv/docker/sdvstate/Dockerfile                     |  15 +-
 sdv/docker/sdvstate/Makefile                       | 133 +++++
 sdv/docker/sdvstate/core/__init__.py               |  22 -
 sdv/docker/sdvstate/core/display_report.py         |  57 --
 sdv/docker/sdvstate/core/load_pdf.py               |  54 --
 sdv/docker/sdvstate/example/state.yml              |  20 -
 sdv/docker/sdvstate/internal/__init__.py           |  22 +
 sdv/docker/sdvstate/internal/display_report.py     |  57 ++
 sdv/docker/sdvstate/internal/load_pdf.py           |  56 ++
 sdv/docker/sdvstate/internal/validator/__init__.py |  21 +
 .../internal/validator/airship/__init__.py         |  49 ++
 .../sdvstate/internal/validator/airship/airship.py | 148 +++++
 .../internal/validator/airship/ceph_check.py       |  51 ++
 .../internal/validator/airship/compute_check.py    | 661 +++++++++++++++++++++
 .../airship/monitoring_logging_agent_check.py      | 243 ++++++++
 .../internal/validator/airship/network_check.py    | 114 ++++
 .../internal/validator/airship/pod_health_check.py | 111 ++++
 .../internal/validator/airship/store_result.py     |  28 +
 .../sdvstate/internal/validator/validator.py       |  27 +
 sdv/docker/sdvstate/settings/testsuits.yaml        |  15 +
 sdv/docker/sdvstate/state                          |   7 +-
 sdv/docker/sdvstate/tools/__init__.py              |  18 +
 sdv/docker/sdvstate/validator/__init__.py          |  21 -
 sdv/docker/sdvstate/validator/airship/__init__.py  |  49 --
 sdv/docker/sdvstate/validator/airship/airship.py   | 129 ----
 .../sdvstate/validator/airship/ceph_check.py       |  51 --
 .../sdvstate/validator/airship/compute_check.py    | 661 ---------------------
 .../airship/monitoring_logging_agent_check.py      | 243 --------
 .../sdvstate/validator/airship/network_check.py    | 114 ----
 .../sdvstate/validator/airship/pod_health_check.py | 111 ----
 .../sdvstate/validator/airship/store_result.py     |  28 -
 sdv/docker/sdvstate/validator/validator.py         |  27 -
 32 files changed, 1765 insertions(+), 1598 deletions(-)
 create mode 100644 sdv/docker/sdvstate/Makefile
 delete mode 100644 sdv/docker/sdvstate/core/__init__.py
 delete mode 100644 sdv/docker/sdvstate/core/display_report.py
 delete mode 100644 sdv/docker/sdvstate/core/load_pdf.py
 delete mode 100644 sdv/docker/sdvstate/example/state.yml
 create mode 100644 sdv/docker/sdvstate/internal/__init__.py
 create mode 100644 sdv/docker/sdvstate/internal/display_report.py
 create mode 100644 sdv/docker/sdvstate/internal/load_pdf.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/__init__.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/__init__.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/airship.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/ceph_check.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/compute_check.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/monitoring_logging_agent_check.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/network_check.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/pod_health_check.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/airship/store_result.py
 create mode 100644 sdv/docker/sdvstate/internal/validator/validator.py
 create mode 100644 sdv/docker/sdvstate/settings/testsuits.yaml
 create mode 100644 sdv/docker/sdvstate/tools/__init__.py
 delete mode 100644 sdv/docker/sdvstate/validator/__init__.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/__init__.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/airship.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/ceph_check.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/compute_check.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/network_check.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/pod_health_check.py
 delete mode 100644 sdv/docker/sdvstate/validator/airship/store_result.py
 delete mode 100644 sdv/docker/sdvstate/validator/validator.py

(limited to 'sdv/docker')

diff --git a/sdv/docker/sdvstate/Dockerfile b/sdv/docker/sdvstate/Dockerfile
index 7d18e7a..10bc678 100644
--- a/sdv/docker/sdvstate/Dockerfile
+++ b/sdv/docker/sdvstate/Dockerfile
@@ -2,18 +2,17 @@ FROM python:3.8-slim-buster
 
 MAINTAINER Parth Yadav <parthyadav3105@gmail.com>
 
-WORKDIR /state/
+WORKDIR /sdv/
 
-COPY requirements.txt /state/requirements.txt
+COPY requirements.txt requirements.txt
 RUN pip install -r requirements.txt
 RUN rm requirements.txt
 
-COPY core/ /state/core/
-COPY settings/ /state/settings/
-COPY tools/ /state/tools/
-COPY validator/ /state/validator/
-COPY state /state/
-COPY server /state/
+COPY internal/ internal/
+COPY settings/ settings/
+COPY tools/ tools/
+COPY state .
+COPY server .
 
 ENV RESULTS_PATH /tmp/
 ENV RESULTS_FILENAME results.json
diff --git a/sdv/docker/sdvstate/Makefile b/sdv/docker/sdvstate/Makefile
new file mode 100644
index 0000000..1cd9b78
--- /dev/null
+++ b/sdv/docker/sdvstate/Makefile
@@ -0,0 +1,133 @@
+# Set the shell to bash always
+SHELL := /bin/bash
+
+
+#############
+# Options
+############
+
+IMG_TAG="latest"
+WORKDIR="workdir"
+
+###################
+
+# helper for printing yellow-bold formatted messages in makefile.
+# Usage: $S some text message $E
+S=printf "\n\e[1;33m[makefile]:
+E=\e[0m\n"
+
+
+define HELP_MSG
+
+Usage: make [target] arguments=value
+_________________________________________________________________
+Targets:
+
+make sandbox
+	creates new sandbox container.
+
+make sandbox-clean
+	deletes sandbox container.
+
+make build
+	builds container image for sdv.
+
+make bash
+	opens a bash to sandbox contianer
+
+make run
+	runs sdvstate checks inside sandbox container.
+
+make lint
+	lints sdvstate code and generates report.
+
+make setup-dev
+	creates workdir for local developement files.
+
+_________________________________________________________________
+Optional arguments:
+
+IMG_TAG
+	Currently set to '$(IMG_TAG)'
+
+endef
+export HELP_MSG
+help:
+	@echo "$$HELP_MSG"
+
+
+# The following target enables all commands to run in same shell. This enables
+# exit to kill the program
+.ONESHELL:
+
+
+define CONF
+## Path to PDF file
+PDF_FILE: github.com/nfvid/sites/blob/master/intel-pod10.json
+
+#############
+# Airship arguments
+#############
+
+# Path to kube-config file
+KUBE_CONFIG : /sdv/workdir/example/config
+
+MASTER_ROLE_NAME : masters
+WORKER_ROLE_NAME : workers
+endef
+export CONF
+setup-dev:
+	@if [ ! -d "$(WORKDIR)" ]; then \
+		mkdir $(WORKDIR); \
+		mkdir "$(WORKDIR)/example"; \
+		echo "$$CONF" >> "$(WORKDIR)/example/conf.yaml"; \
+		$S: Created local workdir. $E; \
+	fi
+
+
+build:
+	$S: building sdv image... $E
+	@docker build . -t sdv:$(IMG_TAG) -f Dockerfile
+
+
+sandbox-clean:
+	@if [[ "$(shell docker container ls --format "{{.Names}}" | grep sdvsandbox)" == "sdvsandbox" ]]; then \
+	  docker container stop sdvsandbox; \
+	fi
+
+sandbox: build sandbox-clean
+	docker container run --rm -d --name sdvsandbox \
+	-v $(PWD):/sdv/ \
+	-v $(PWD)/workdir/result:/tmp/state \
+	sdv:latest /bin/bash -c "while true; do sleep 10; done;";
+
+
+bash:
+	docker container exec -it sdvsandbox /bin/bash
+
+define sandbox_bash
+	if [[ "$(shell docker container ls --format "{{.Names}}" | grep sdvsandbox)" == "" ]]; then \
+		echo "Sandbox container does not exists. Creating Sandbox..."; \
+		make sandbox; \
+	fi
+	docker container exec -it sdvsandbox bash -c $(1);
+endef
+
+run:
+	@$(call sandbox_bash, "/sdv/state --conf-file /sdv/workdir/example/conf.yaml")
+
+lint:
+	@cp ../../../pylintrc $(WORKDIR)/pylintrc
+	@$(call sandbox_bash, \
+	"pylint --rcfile=/sdv/workdir/pylintrc /sdv/state /sdv/server /sdv/internal /sdv/tools")
+
+
+test:
+	@echo "Todo"
+
+all: help
+
+
+.PHONY: help setup-dev build sandbox-clean sandbox bash lint test run all 
+
+
diff --git a/sdv/docker/sdvstate/core/__init__.py b/sdv/docker/sdvstate/core/__init__.py
deleted file mode 100644
index 47830c5..0000000
--- a/sdv/docker/sdvstate/core/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Core package
-contains all program specific dependencies
-"""
-
-from .load_pdf import load_pdf
-from .display_report import display_report
diff --git a/sdv/docker/sdvstate/core/display_report.py b/sdv/docker/sdvstate/core/display_report.py
deleted file mode 100644
index 97ccb55..0000000
--- a/sdv/docker/sdvstate/core/display_report.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Display Report
-"""
-
-import logging
-from datetime import datetime as dt
-
-
-
-def display_report(report):
-    """
-    Logs the final report
-    """
-    installer = report['installer']
-    result = report['criteria']
-    start_time = dt.strptime(report['start_date'], '%Y-%m-%d %H:%M:%S')
-    stop_time = dt.strptime(report['stop_date'], '%Y-%m-%d %H:%M:%S')
-    duration = (stop_time - start_time).total_seconds()
-
-    logger = logging.getLogger(__name__)
-    logger.info('')
-    logger.info('')
-    logger.info('========================================')
-    logger.info('')
-    logger.info(f'  Installer: {installer}')
-    logger.info(f'  Duration: {duration}')
-    logger.info(f'  Result: {result}')
-    logger.info('')
-    logger.info('')
-    logger.info(f'  CHECKS PASSED:')
-    logger.info('  =============')
-    for case_name in report['details']['pass']:
-        logger.info(f'  {case_name}')
-    logger.info('')
-    logger.info('')
-    logger.info(f'  CHECKS FAILED:')
-    logger.info('  =============')
-    for case_name in report['details']['fail']:
-        logger.info(f'  {case_name}')
-    logger.info('')
-    logger.info('========================================')
-    logger.info('')
-    logger.info('')
diff --git a/sdv/docker/sdvstate/core/load_pdf.py b/sdv/docker/sdvstate/core/load_pdf.py
deleted file mode 100644
index fa2bf7e..0000000
--- a/sdv/docker/sdvstate/core/load_pdf.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Loads PDF file into settings
-"""
-
-import json
-import yaml
-import requests
-import os
-
-from tools.conf import settings
-
-def load_pdf():
-    """
-    Updates settings with PDF data
-    """
-    path = settings.getValue('pdf_file')
-    data=""
-    if os.path.exists(path):
-        with open(path) as handle:
-            data = handle.read()
-    else:
-        if (path.find("github.com") != -1):
-            path = path.replace("github.com", "raw.githubusercontent.com")
-            path = path.replace("/blob", "")
-        try:
-            resp = requests.get(path)
-            if resp.status_code == requests.codes.ok:
-                data = resp.text
-        except:
-            raise Exception(f"Invalid path: {path}")
-
-    try:
-        pdf = json.loads(data)
-    except json.decoder.JSONDecodeError:
-        try:
-            pdf = yaml.safe_load(data)
-        except yaml.parser.ParserError:
-            raise Exception(f"Invalid PDF file: {path}")
-
-    settings.setValue('pdf_file', pdf)
diff --git a/sdv/docker/sdvstate/example/state.yml b/sdv/docker/sdvstate/example/state.yml
deleted file mode 100644
index 89dc548..0000000
--- a/sdv/docker/sdvstate/example/state.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# This is a comment
-
-# values are stored in key:value format
-## keys are case-insensitive
-## values can be int, float, string, dict, list, bool
-
-
-## Path to PDF file
-PDF_FILE: example/intel-pod10.json
-
-#############
-# Airship arguments
-#############
-
-# Path to kube-config file
-KUBE_CONFIG : example/kubepod10
-
-MASTER_ROLE_NAME : masters
-WORKER_ROLE_NAME : workers
-
diff --git a/sdv/docker/sdvstate/internal/__init__.py b/sdv/docker/sdvstate/internal/__init__.py
new file mode 100644
index 0000000..47830c5
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/__init__.py
@@ -0,0 +1,22 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Core package
+contains all program specific dependencies
+"""
+
+from .load_pdf import load_pdf
+from .display_report import display_report
diff --git a/sdv/docker/sdvstate/internal/display_report.py b/sdv/docker/sdvstate/internal/display_report.py
new file mode 100644
index 0000000..97ccb55
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/display_report.py
@@ -0,0 +1,57 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Display Report
+"""
+
+import logging
+from datetime import datetime as dt
+
+
+
+def display_report(report):
+    """
+    Logs the final report
+    """
+    installer = report['installer']
+    result = report['criteria']
+    start_time = dt.strptime(report['start_date'], '%Y-%m-%d %H:%M:%S')
+    stop_time = dt.strptime(report['stop_date'], '%Y-%m-%d %H:%M:%S')
+    duration = (stop_time - start_time).total_seconds()
+
+    logger = logging.getLogger(__name__)
+    logger.info('')
+    logger.info('')
+    logger.info('========================================')
+    logger.info('')
+    logger.info(f'  Installer: {installer}')
+    logger.info(f'  Duration: {duration}')
+    logger.info(f'  Result: {result}')
+    logger.info('')
+    logger.info('')
+    logger.info(f'  CHECKS PASSED:')
+    logger.info('  =============')
+    for case_name in report['details']['pass']:
+        logger.info(f'  {case_name}')
+    logger.info('')
+    logger.info('')
+    logger.info(f'  CHECKS FAILED:')
+    logger.info('  =============')
+    for case_name in report['details']['fail']:
+        logger.info(f'  {case_name}')
+    logger.info('')
+    logger.info('========================================')
+    logger.info('')
+    logger.info('')
diff --git a/sdv/docker/sdvstate/internal/load_pdf.py b/sdv/docker/sdvstate/internal/load_pdf.py
new file mode 100644
index 0000000..fff2d67
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/load_pdf.py
@@ -0,0 +1,56 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Loads PDF file into settings
+"""
+
+import json
+import os
+import requests
+import yaml
+
+from tools.conf import settings
+
+def load_pdf():
+    """
+    Updates settings with PDF data
+    """
+    path = settings.getValue('pdf_file')
+    data = ""
+    if os.path.exists(path):
+        with open(path) as handle:
+            data = handle.read()
+    else:
+        if path.find("github.com") != -1:
+            path = path.replace("github.com", "raw.githubusercontent.com")
+            path = path.replace("/blob", "")
+            if path[:8] != "https://":
+                path = "https://" + path
+        try:
+            resp = requests.get(path)
+            if resp.status_code == requests.codes.ok:
+                data = resp.text
+        except:
+            raise Exception(f"Invalid path: {path}")
+
+    try:
+        pdf = json.loads(data)
+    except json.decoder.JSONDecodeError:
+        try:
+            pdf = yaml.safe_load(data)
+        except yaml.parser.ParserError:
+            raise Exception(f"Invalid PDF file: {path}")
+
+    settings.setValue('pdf_file', pdf)
diff --git a/sdv/docker/sdvstate/internal/validator/__init__.py b/sdv/docker/sdvstate/internal/validator/__init__.py
new file mode 100644
index 0000000..0e1fb38
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Package for Validators
+"""
+
+from .validator import Validator
+from .airship.airship import AirshipValidator
diff --git a/sdv/docker/sdvstate/internal/validator/airship/__init__.py b/sdv/docker/sdvstate/internal/validator/airship/__init__.py
new file mode 100644
index 0000000..78e42c4
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Package for Airship
+"""
+
+
+### Pod Health Checks
+from .pod_health_check import pod_health_check
+
+### Ceph Health Checks
+from .ceph_check import ceph_health_check
+
+### Monitoring & Logging Agents Checks
+from .monitoring_logging_agent_check import prometheus_check
+from .monitoring_logging_agent_check import grafana_check
+# from .monitoring_logging_agent_check import prometheus_alert_manager_check
+from .monitoring_logging_agent_check import elasticsearch_check
+from .monitoring_logging_agent_check import kibana_check
+from .monitoring_logging_agent_check import nagios_check
+from .monitoring_logging_agent_check import elasticsearch_exporter_check
+from .monitoring_logging_agent_check import fluentd_exporter_check
+
+### Network Checks
+from .network_check import physical_network_check
+
+### Compute Related Checks
+from .compute_check import reserved_vnf_cores_check
+from .compute_check import isolated_cores_check
+from .compute_check import vswitch_pmd_cores_check
+from .compute_check import vswitch_dpdk_lcores_check
+from .compute_check import os_reserved_cores_check
+from .compute_check import nova_scheduler_filters_check
+from .compute_check import cpu_allocation_ratio_check
+
+from .store_result import store_result
diff --git a/sdv/docker/sdvstate/internal/validator/airship/airship.py b/sdv/docker/sdvstate/internal/validator/airship/airship.py
new file mode 100644
index 0000000..6f1109d
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/airship.py
@@ -0,0 +1,148 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Airship Validator
+"""
+
+import logging
+from datetime import datetime as dt
+
+from tools.conf import settings
+from tools.kube_utils import load_kube_api, delete_kube_curl_pod
+from internal.validator.validator import Validator
+
+from . import *
+
+
+
+
+
+class AirshipValidator(Validator):
+    """Class for Airship Validation
+    """
+
+    def __init__(self):
+        """
+        Initialisation function.
+        """
+        super(AirshipValidator, self).__init__()
+        self._logger = logging.getLogger(__name__)
+
+        self._report = {"installer": "Airship",
+                        "criteria": "pass",
+                        "details": {"total_checks": 0,
+                                    "pass": [],
+                                    "fail": [],
+                                    "metadata": {}
+                                   }
+                        }
+
+        load_kube_api()
+
+
+    def validate(self):
+        """
+        Validation method
+        """
+
+        self._report['scenario'] = 'none'
+        self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+        test_suite = settings.getValue("test_suite")
+
+        if test_suite == "default":
+            self._report['case_name'] = 'ook_airship'
+            self.default_suite()
+
+        if test_suite == "k8s":
+            self._report['case_name'] = 'k8s_airship'
+            self.k8s_suite()
+
+        delete_kube_curl_pod()
+
+        self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+
+    def default_suite(self):
+        """
+        Default Test Suite
+        """
+
+        # PLATFORM CHECKS
+        self.update_report(pod_health_check())
+
+        # STORAGE CHECKS
+        self.update_report(ceph_health_check())
+
+        # MONITORING & LOGGING AGENTS CHECKS
+        self.update_report(prometheus_check())
+        self.update_report(grafana_check())
+        ## current version of AlertManager doesn't support this
+        # prometheus_alert_manager_check()
+        self.update_report(elasticsearch_check())
+        self.update_report(kibana_check())
+        self.update_report(nagios_check())
+        self.update_report(elasticsearch_exporter_check())
+        self.update_report(fluentd_exporter_check())
+
+        # NETWORK CHECKS
+        self.update_report(physical_network_check())
+
+        # COMPUTE CHECKS
+        self.update_report(reserved_vnf_cores_check())
+        self.update_report(isolated_cores_check())
+        self.update_report(vswitch_pmd_cores_check())
+        self.update_report(vswitch_dpdk_lcores_check())
+        self.update_report(os_reserved_cores_check())
+        self.update_report(nova_scheduler_filters_check())
+        self.update_report(cpu_allocation_ratio_check())
+
+    def k8s_suite(self):
+        """
+        Kubernetes Platform Test Suite
+        """
+
+
+    def update_report(self, result):
+        """
+        Updates report with new results
+        """
+        case_name = result['case_name']
+        criteria = result['criteria']
+
+        self._report['details']['total_checks'] += 1
+        if criteria == 'pass':
+            self._report['details']['pass'].append(case_name)
+        elif criteria == 'fail':
+            self._report['details']['fail'].append(case_name)
+            self._report['criteria'] = 'fail'
+
+
+
+    def get_report(self):
+        """
+        Return final report as dict
+        """
+        self._report["project_name"] = settings.getValue("project_name")
+        self._report["version"] = settings.getValue("project_version")
+        self._report["build_tag"] = "none"
+
+        pdf = settings.getValue('pdf_file')
+        self._report["pod_name"] = pdf['management_info']['resource_pool_name']
+
+        store_result(self._report)
+
+        return self._report
diff --git a/sdv/docker/sdvstate/internal/validator/airship/ceph_check.py b/sdv/docker/sdvstate/internal/validator/airship/ceph_check.py
new file mode 100644
index 0000000..b33e876
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/ceph_check.py
@@ -0,0 +1,51 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Ceph Related Checks
+"""
+
+import ast
+
+from tools.kube_utils import get_pod_with_labels, kube_exec
+from .store_result import store_result
+
+
+
+
+def ceph_health_check():
+    """
+    Check health of Ceph
+    """
+    pod = get_pod_with_labels('application=ceph,component=mon')
+
+    cmd = ['ceph', 'health', '-f', 'json']
+    response = kube_exec(pod, cmd)
+
+    response = ast.literal_eval(response)
+
+    result = {'category':  'storage',
+              'case_name': 'ceph_health_check',
+              'details': []
+             }
+
+    if response['status'] == 'HEALTH_OK':
+        result['criteria'] = 'pass'
+        result['details'] = 'HEALTH_OK'
+    else:
+        result['criteria'] = 'fail'
+        result['details'] = response
+
+    store_result(result)
+    return result
diff --git a/sdv/docker/sdvstate/internal/validator/airship/compute_check.py b/sdv/docker/sdvstate/internal/validator/airship/compute_check.py
new file mode 100644
index 0000000..a602471
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/compute_check.py
@@ -0,0 +1,661 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Compute Related Checks
+"""
+
+import configparser
+import json
+import re
+
+from tools.kube_utils import kube_exec, get_pod_with_labels
+from tools.conf import settings
+from .store_result import store_result
+
+
+###########
+# Checks
+###########
+
+def isolated_cores_check():
+    """
+    isolated_cores_check
+    """
+    traced_value = trace_isolated_cores()
+    required_value = required_isolated_cores()
+
+    result = {'category':  'compute',
+              'case_name': 'isolated_cores_check',
+              'details': {'traced_cores': traced_value,
+                          'required_cores': required_value
+                         }
+             }
+
+    if is_ranges_equals(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+
+    store_result(result)
+    return result
+
+
+
+def reserved_vnf_cores_check():
+    """
+    reserved_vnf_cores_check
+    """
+    traced_value = trace_reserved_vnf_cores()
+    required_value = required_reserved_vnf_cores()
+
+    result = {'category':  'compute',
+              'case_name': 'reserved_vnf_cores_check',
+              'details': {'traced_cores': traced_value,
+                          'required_cores': required_value
+                         }
+             }
+
+    if is_ranges_equals(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+
+    store_result(result)
+    return result
+
+
+
+def vswitch_pmd_cores_check():
+    """
+    vswitch_pmd_cores_check
+    """
+    traced_value = trace_vswitch_pmd_cores()
+    required_value = required_vswitch_pmd_cores()
+
+    result = {'category':  'compute',
+              'case_name': 'vswitch_pmd_cores_check',
+              'details': {'traced_cores': traced_value,
+                          'required_cores': required_value
+                         }
+             }
+
+    if is_ranges_equals(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+
+    store_result(result)
+    return result
+
+
+
+def vswitch_dpdk_lcores_check():
+    """
+    vswitch_dpdk_lcores_check
+    """
+    traced_value = trace_vswitch_dpdk_lcores()
+    required_value = required_vswitch_dpdk_lcores()
+
+    result = {'category':  'compute',
+              'case_name': 'vswitch_dpdk_lcores_check',
+              'details': {'traced_cores': traced_value,
+                          'required_cores': required_value
+                         }
+             }
+
+    if is_ranges_equals(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+
+    store_result(result)
+    return result
+
+
+
+def os_reserved_cores_check():
+    """
+    os_reserved_cores_check
+    """
+    traced_value = trace_os_reserved_cores()
+    required_value = required_os_reserved_cores()
+
+    result = {'category':  'compute',
+              'case_name': 'os_reserved_cores_check',
+              'details': {'traced_cores': traced_value,
+                          'required_cores': required_value
+                         }
+             }
+
+    if is_ranges_equals(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+
+    store_result(result)
+    return result
+
+
+
+def nova_scheduler_filters_check():
+    """
+    nova_scheduler_filters_check
+    """
+    traced_value = trace_nova_scheduler_filters()
+    required_value = required_nova_scheduler_filters()
+
+    result = {'category':  'compute',
+              'case_name': 'nova_scheduler_filters_check',
+              'details': {'traced_filters': traced_value,
+                          'required_filters': required_value
+                         }
+             }
+
+    if are_lists_equal(traced_value, required_value):
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+    store_result(result)
+    return result
+
+
+
+def cpu_allocation_ratio_check():
+    """
+    cpu_allocation_ratio_check
+    """
+    traced_value = trace_cpu_allocation_ratio()
+    required_value = required_cpu_allocation_ratio()
+
+    result = {'category':  'compute',
+              'case_name': 'cpu_allocation_ratio_check',
+              'details': {'traced_ratio': traced_value,
+                          'required_ratio': required_value
+                         }
+             }
+
+    if traced_value == required_value:
+        result['criteria'] = 'pass'
+    else:
+        result['criteria'] = 'fail'
+
+    store_result(result)
+    return result
+
+
+
+
+
+
+
+
+###############
+# helper functions
+###############
+
+
+
+def trace_isolated_cores():
+    """
+    Trace isolated_cores from Airship deployment
+
+    :return: value traced from `isolcpus` key in `/proc/cmdline`
+    """
+    pod = get_pod_with_labels('application=nova,component=compute')
+
+    cmd = ['cat', '/proc/cmdline']
+    proc_cmd = kube_exec(pod, cmd)
+
+    for option in proc_cmd.split():
+        if 'isolcpus' in option:
+            _, isolcpus_value = split_key_value(option)
+            break
+
+    return isolcpus_value
+
+
+def required_isolated_cores():
+    """
+    Returns value of `isolated_cpus` from platform_profile used by
+    Role for worker nodes in PDF
+
+    :return: isolated_cores value expected by the PDF
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    profile = get_platform_profile_by_role(worker_role)
+    return profile['isolated_cpus']
+
+
+
+
+
+
+def trace_reserved_vnf_cores():
+    """
+    Trace vnf_reserved_cores from Airship deployment
+
+    :return: value traced from `vcpu_pin_set` key in nova.conf
+    of actual deployment
+    """
+    try:
+        config = get_nova_conf()
+        vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
+    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+        vcpu_pin_set = ''
+
+    return vcpu_pin_set
+
+
+def required_reserved_vnf_cores():
+    """
+    Returns value of vnf_cores from platform_profile used by
+    Role for worker nodes in PDF
+
+    :return: vnf_reserverd_core value expected by the PDF
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    profile = get_platform_profile_by_role(worker_role)
+    return profile['vnf_cores']
+
+
+
+
+
+
+def trace_vswitch_pmd_cores():
+    """
+    Trace vswitch_pmd_cores from Airship deployment
+
+    :return: value traced from `other_config:pmd-cpu-mask` in
+    openvswitchdb using ovs-vsctl
+    """
+    ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
+
+    cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
+    response = kube_exec(ovs_pod, cmd)
+
+    # convert config str to json str
+    match = re.findall("[a-zA-Z0-9-]+=", response)
+    for key in match:
+        response = response.replace(key, '"' + key[:-1] + '":')
+    match = re.findall(":[a-zA-Z0-9-]+", response)
+    for key in match:
+        response = response.replace(key[1:], '"' + key[1:] + '"')
+
+    config = json.loads(response)
+
+    if 'pmd-cpu-mask' in config:
+        pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
+    else:
+        pmd_cores = ''
+
+    return pmd_cores
+
+
+def required_vswitch_pmd_cores():
+    """
+    Returns value of vswitch_pmd_cores from platform_profile used by
+    Role for worker nodes in PDF
+
+    :return: vswitch_pmd_cores value expected by the PDF
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    profile = get_platform_profile_by_role(worker_role)
+    return profile['vswitch_pmd_cores']
+
+
+
+
+
+
+def trace_vswitch_dpdk_lcores():
+    """
+    Trace vswitch_dpdk_lcores from Airship deployment
+
+    :return: value traced from `other_config:dpdk-lcore-mask` in
+    openvswitchdb using ovs-vsctl
+    """
+    ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
+
+    cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
+    response = kube_exec(ovs_pod, cmd)
+
+    # convert config str to json str
+    match = re.findall("[a-zA-Z0-9-]+=", response)
+    for key in match:
+        response = response.replace(key, '"' + key[:-1] + '":')
+    match = re.findall(":[a-zA-Z0-9-]+", response)
+    for key in match:
+        response = response.replace(key[1:], '"' + key[1:] + '"')
+
+    config = json.loads(response)
+
+    if 'dpdk-lcore-mask' in config:
+        pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
+    else:
+        pmd_cores = ''
+
+    return pmd_cores
+
+
+def required_vswitch_dpdk_lcores():
+    """
+    Returns value of vswitch_dpdk_lcores from platform_profile used by
+    Role for worker nodes in PDF
+
+    :return: vswitch_dpdk_lcores value expected by the PDF
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    profile = get_platform_profile_by_role(worker_role)
+    return profile['vswitch_dpdk_lcores']
+
+
+
+
+
+
+def trace_os_reserved_cores():
+    """
+    Trace os_reserved_cores from Airship deployment
+
+    os_reserved_cores = all_cores - (reserved_vnf_cores +
+                                     vswitch_pmd_cores +
+                                     vswitch_dpdk_lcores)
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    all_cores = get_cores_by_role(worker_role)
+
+    reserved_vnf_cores = trace_reserved_vnf_cores()
+    vswitch_pmd_cores = trace_vswitch_pmd_cores()
+    vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
+
+    non_os_cores = []
+    non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
+    non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
+    non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
+
+    os_reserved_cores = set(all_cores).difference(set(non_os_cores))
+
+    # return as string with comma separated value
+    return ','.join(map(str, list(os_reserved_cores)))
+
+
+def required_os_reserved_cores():
+    """
+    Returns value of os_reserved_cores from platform_profile used by
+    Role for worker nodes in PDF
+
+    :return: os_reserved_cores value expected by the PDF
+    """
+    worker_role = settings.getValue('WORKER_ROLE_NAME')
+    profile = get_platform_profile_by_role(worker_role)
+    return profile['os_reserved_cores']
+
+
+
+
+
+def trace_nova_scheduler_filters():
+    """
+    Trace scheduler_filters from Airship deployment
+
+    :return: value traced from `enabled_filters` key in nova.conf
+    of actual deployment
+    """
+    try:
+        config = get_nova_conf()
+        filters = config.get('filter_scheduler', 'enabled_filters')
+    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+        filters = ''
+
+    filters = filters.split(',')
+    map(str.strip, filters)
+
+    return filters
+
+def required_nova_scheduler_filters():
+    """
+    Required nova scheduler_filters by the PDF
+    """
+    pdf = settings.getValue('pdf_file')
+    filters = pdf['vim_functional']['scheduler_filters']
+
+    filters = filters.split(',')
+    map(str.strip, filters)
+
+    return filters
+
+
+
+
+
+
+
+def trace_cpu_allocation_ratio():
+    """
+    Trace cpu_allocation_ratio from Airship deployment
+
+    :return: value traced from `cpu_allocation_ratio` key in nova.conf
+    of actual deployment
+    """
+    try:
+        config = get_nova_conf()
+        cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
+    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+        cpu_allocation_ratio = ''
+
+    return float(cpu_allocation_ratio)
+
+def required_cpu_allocation_ratio():
+    """
+    Required cpu_allocation_ratio by the PDF
+    """
+    pdf = settings.getValue('pdf_file')
+    cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
+
+    return float(cpu_allocation_ratio)
+
+
+
+
+
+
+
+def get_role(role_name):
+    """
+    Searches and returns role with `role_name`
+    """
+    roles = settings.getValue('pdf_file')['roles']
+
+    for role in roles:
+        if role['name'] == role_name:
+            role_details = role
+
+    return role_details
+
+
+def get_platform_profile(profile_name):
+    """
+    Searches and returns platform_profile with `profile_name`
+    """
+    platform_profiles = settings.getValue('pdf_file')['platform_profiles']
+
+    for profile in platform_profiles:
+        if profile['profile_name'] == profile_name:
+            profile_details = profile
+
+    return profile_details
+
+def get_processor_profile(profile_name):
+    """
+    Searches and returns processor_profile with `profile_name`
+    """
+    processor_profiles = settings.getValue('pdf_file')['processor_profiles']
+
+    for profile in processor_profiles:
+        if profile['profile_name'] == profile_name:
+            profile_details = profile
+
+    return profile_details
+
+def get_platform_profile_by_role(role_name):
+    """
+    Returns platform profile details of a role
+    """
+    role = get_role(role_name)
+    profile = get_platform_profile(role['platform_profile'])
+    return profile
+
+
+def get_hardware_profile_by_role(role_name):
+    """
+    Returns hardware profile details of a role
+    """
+    role = get_role(role_name)
+
+    hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
+
+    for profile in hardware_profiles:
+        if profile['profile_name'] == role['hardware_profile']:
+            profile_details = profile
+
+    return profile_details
+
+
+def get_cores_by_role(role_name):
+    """
+    Returns cpu cores list of server hardware used in the role
+    """
+    hardware_profile = get_hardware_profile_by_role(role_name)
+    processor_profile = hardware_profile['profile_info']['processor_profile']
+    profile = get_processor_profile(processor_profile)
+
+    cpus = []
+
+    for numa in profile['profile_info']['numas']:
+        cpus.extend(convert_range_to_list(numa['cpu_set']))
+
+    return cpus
+
+
+
+
+
+
+
+def get_nova_conf():
+    """
+    Returns parsed nova.conf
+    """
+    pod = get_pod_with_labels('application=nova,component=compute')
+
+    cmd = ['cat', '/etc/nova/nova.conf']
+    response = kube_exec(pod, cmd)
+
+    config = configparser.ConfigParser()
+    config.read_string(response)
+
+    return config
+
+
+### cpu cores related helper function
+
+def convert_range_to_list(x):
+    """
+    Returns list of numbers from given range as string
+
+    e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
+    """
+    # pylint: disable=C0103
+    result = []
+    for part in x.split(','):
+        if '-' in part:
+            a, b = part.split('-')
+            a, b = int(a), int(b)
+            result.extend(range(a, b + 1))
+        elif part != '':
+            a = int(part)
+            result.append(a)
+    # remove duplicates
+    result = list(dict.fromkeys(result))
+    return result
+
+
+def is_ranges_equals(range1, range2):
+    """
+    Checks whether two ranges passed as string are equal
+
+    e.g.: is_ranges_equals('2-5', '2-4,5') returns true
+    """
+    set1 = set(convert_range_to_list(range1))
+    set2 = set(convert_range_to_list(range2))
+    return set1 == set2
+
+def are_lists_equal(list1, list2):
+    """
+    Checks whether two list are identicals
+    """
+    set1 = set(list1)
+    set2 = set(list2)
+    return set1 == set2
+
+
+
+def hex_to_comma_list(hex_mask):
+    """
+    Converts CPU mask given in hex to list of cores
+    """
+    binary = bin(int(hex_mask, 16))[2:]
+    reversed_binary = binary[::-1]
+    i = 0
+    output = ""
+    for bit in reversed_binary:
+        if bit == '1':
+            output = output + str(i) + ','
+        i = i + 1
+    return output[:-1]
+
+
+def comma_list_to_hex(cpus):
+    """
+    Converts a list of cpu cores in corresponding hex value
+    of cpu-mask
+    """
+    cpu_arr = cpus.split(",")
+    binary_mask = 0
+    for cpu in cpu_arr:
+        binary_mask = binary_mask | (1 << int(cpu))
+    return format(binary_mask, '02x')
+
+
+
+def split_key_value(key_value_str, delimiter='='):
+    """
+    splits given string into key and value based on delimiter
+
+    :param key_value_str: example string `someKey=somevalue`
+    :param delimiter: default delimiter is `=`
+    :return: [ key, value]
+    """
+    key, value = key_value_str.split(delimiter)
+    key = key.strip()
+    value = value.strip()
+    return key, value
diff --git a/sdv/docker/sdvstate/internal/validator/airship/monitoring_logging_agent_check.py b/sdv/docker/sdvstate/internal/validator/airship/monitoring_logging_agent_check.py
new file mode 100644
index 0000000..3754299
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/monitoring_logging_agent_check.py
@@ -0,0 +1,243 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Monitoring & Logging Agents Related Checks
+"""
+
+import ast
+
+from tools.kube_utils import kube_curl
+from tools.result_api import rfile
+from .store_result import store_result
+
+
+def prometheus_check():
+    """
+    Check health of Prometheus
+    """
+    username = "prometheus"
+    password = "password123"
+    service = "prom-metrics"
+    namespace = "osh-infra"
+
+    health = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/healthy')
+    if 'Prometheus is Healthy' in res:
+        health = "pass"
+
+    readiness = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/ready')
+    if 'Prometheus is Ready' in res:
+        readiness = "pass"
+
+    if health == "pass" and readiness == "pass":
+        state = "pass"
+    else:
+        state = "fail"
+
+    result = {'category':  'platform',
+              'case_name': 'prometheus_check',
+              'criteria':  state,
+              'details': {'health': health, 'readiness': readiness}
+             }
+
+    store_result(result)
+    return result
+
+
+
+def grafana_check():
+    """
+    Check health of Grafana
+    """
+    username = "grafana"
+    password = "password123"
+    service = "grafana-dashboard"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
+                    "-o", "/dev/null", "-u",  \
+                    f'{username}:{password}', \
+                    f'{service}.{namespace}:3000/api/health')
+    if res == '200':
+        state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'grafana_check',
+              'criteria':  state
+             }
+
+    store_result(result)
+    return result
+
+
+def prometheus_alert_manager_check():
+    """
+    Check health of Alert Manager
+    """
+    service = "alerts-engine"
+    namespace = "osh-infra"
+
+    health = "fail" #default
+    res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/healthy')
+    if 'Prometheus is Healthy' in res:
+        health = "pass"
+
+    readiness = "fail" #default
+    res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/ready')
+    if 'Prometheus is Ready' in res:
+        readiness = "pass"
+
+    if health == "pass" and readiness == "pass":
+        state = "pass"
+    else:
+        state = "fail"
+
+    result = {'category':  'platform',
+              'case_name': 'prometheus_alert_manager_check',
+              'criteria':  state,
+              'details': {'health': health, 'readiness': readiness}
+             }
+
+
+    store_result(result)
+    return result
+
+
+def elasticsearch_check():
+    """
+    Check health of Elasticsearch cluster
+    """
+    username = "elasticsearch"
+    password = "password123"
+    service = "elasticsearch"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/_cluster/health')
+
+    if res == '':
+        res = 'Elasticsearch not reachable'
+    else:
+        res = ast.literal_eval(res)
+        if res['status'] == 'green':
+            state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'elasticsearch_check',
+              'criteria':  state,
+              'details': res
+             }
+
+    store_result(result)
+    return result
+
+
+def kibana_check():
+    """
+    Check health of Kibana
+    """
+    username = "elasticsearch"
+    password = "password123"
+    service = "kibana-dash"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/api/status')
+
+    if res == '':
+        res = 'kibana not reachable'
+    else:
+        res = ast.literal_eval(res)
+        if res['status']['overall']['state'] == 'green':
+            state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'kibana_check',
+              'criteria':  state,
+              'details': rfile(str(res))
+             }
+
+    store_result(result)
+    return result
+
+
+def nagios_check():
+    """
+    Check health of Nagios
+    """
+    username = "nagios"
+    password = "password123"
+    service = "nagios-metrics"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
+                    "-o", "/dev/null", "-u",  \
+                    f'{username}:{password}', \
+                    f'{service}.{namespace}')
+    if res == '200':
+        state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'nagios_check',
+              'criteria':  state
+             }
+
+    store_result(result)
+    return result
+
+
+def elasticsearch_exporter_check():
+    """
+    Check health of Elasticsearch Exporter
+    """
+    service = "elasticsearch-exporter"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9108/metrics')
+    if res == '200':
+        state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'elasticsearch_exporter_check',
+              'criteria':  state
+             }
+
+    store_result(result)
+    return result
+
+
+def fluentd_exporter_check():
+    """
+    Check health of Fluentd Exporter
+    """
+    service = "fluentd-exporter"
+    namespace = "osh-infra"
+
+    state = "fail" #default
+    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9309/metrics')
+    if res == '200':
+        state = "pass"
+
+    result = {'category':  'platform',
+              'case_name': 'fluentd_exporter_check',
+              'criteria':  state
+             }
+
+    store_result(result)
+    return result
diff --git a/sdv/docker/sdvstate/internal/validator/airship/network_check.py b/sdv/docker/sdvstate/internal/validator/airship/network_check.py
new file mode 100644
index 0000000..bddf579
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/network_check.py
@@ -0,0 +1,114 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Network Related Checks
+"""
+
+
+import configparser
+
+from tools.conf import settings
+from tools.kube_utils import kube_exec, get_pod_with_labels
+
+from .store_result import store_result
+
+
+def physical_network_check():
+    """
+    physical_network_check
+    """
+    ml2_config = neutron_ml2_config()
+
+    physical_networks = settings.getValue('pdf_file')['physical_networks']
+
+    type_drivers = ml2_config.get('ml2', 'type_drivers').split(',')
+
+    flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',')
+
+    vlan_networks = []
+    network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',')
+    for network in network_vlan_ranges:
+        vlan_networks.append(network.split(':')[0])
+
+    result = {'category': 'network',
+              'case_name': 'physical_network_check',
+              'criteria': 'pass',
+              'details': []
+             }
+
+    for physnet in physical_networks:
+
+        res = {'network_name': physnet['name'],
+               'type': physnet['type'],
+               'criteria': 'fail'
+               }
+
+        if physnet['type'] in type_drivers:
+            if physnet['type'] == 'flat':
+                if physnet['name'] in flat_networks or '*' in flat_networks:
+                    res['criteria'] = 'pass'
+                else:
+                    res['details'] = 'physical network name not found'
+            if physnet['type'] == 'vlan':
+                if physnet['name'] in vlan_networks:
+                    res['criteria'] = 'pass'
+                else:
+                    res['details'] = 'physical network name not found'
+        else:
+            res['details'] = 'physical network type not found'
+
+        result['details'].append(res)
+        if res['criteria'] == 'fail':
+            result['criteria'] = 'fail'
+
+    store_result(result)
+    return result
+
+
+
+def neutron_ml2_config():
+    """
+    Returns parsed ml2 config from neutron
+    """
+    ovs = get_pod_with_labels("application=neutron,component=neutron-ovs-agent")
+    sriov = get_pod_with_labels("application=neutron,component=neutron-sriov-agent")
+
+    confs = get_neutron_ml2_conf_from_pod(ovs)
+    confs.extend(get_neutron_ml2_conf_from_pod(sriov))
+
+    config = configparser.ConfigParser()
+    for conf in confs:
+        config.read_string(conf)
+
+    return config
+
+
+
+
+def get_neutron_ml2_conf_from_pod(pod):
+    """
+    Reads ml2 config from neutron pod
+    """
+    cmd = ['ls', '/etc/neutron/plugins/ml2/']
+    response = kube_exec(pod, cmd)
+    files = response.rstrip("\n").split()
+
+    response = []
+    for filename in files:
+        cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename]
+        conf = kube_exec(pod, cmd)
+        response.append(conf)
+
+    return response
diff --git a/sdv/docker/sdvstate/internal/validator/airship/pod_health_check.py b/sdv/docker/sdvstate/internal/validator/airship/pod_health_check.py
new file mode 100644
index 0000000..0093ffc
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/pod_health_check.py
@@ -0,0 +1,111 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Pod Health Checks
+"""
+
+
+
+import logging
+
+from tools.kube_utils import kube_api
+from tools.conf import settings
+from tools.result_api import rfile
+
+from .store_result import store_result
+
+
+
+def pod_health_check():
+    """
+    Check health of all pods and get logs of failed pods
+    """
+    api = kube_api()
+    namespace_list = settings.getValue('airship_namespace_list')
+
+    result = {'category':  'platform',
+              'case_name': 'pod_health_check',
+              'criteria':  'pass',
+              'details': []
+             }
+
+    for namespace in namespace_list:
+        pod_list = api.list_namespaced_pod(namespace)
+        for pod in pod_list.items:
+            pod_stats = pod_status(pod)
+            if pod_stats['criteria'] == 'fail':
+                pod_stats['logs'] = get_logs(pod)
+                result['criteria'] = 'fail'
+            result['details'].append(pod_stats)
+
+
+    store_result(result)
+    return result
+
+
+
+def pod_status(pod):
+    """
+    Check health of a pod and returns it's status as result
+    """
+    result = {'criteria': 'pass',
+              'name': pod.metadata.name,
+              'namespace': pod.metadata.namespace,
+              'node': pod.spec.node_name}
+
+    if pod.status.container_statuses is None:
+        result['criteria'] = 'fail'
+        result['pod_details'] = rfile(str(pod))
+    else:
+        for container in pod.status.container_statuses:
+            if container.state.running is not None:
+                status = 'Running'
+            if container.state.terminated is not None:
+                status = container.state.terminated.reason
+            if container.state.waiting is not None:
+                status = container.state.waiting.reason
+
+            if status not in ('Running', 'Completed'):
+                result['criteria'] = 'fail'
+                result['pod_details'] = rfile(str(pod))
+
+    info = f'[Health: {result["criteria"]}] Name: {result["name"]}, '
+    info = info + f'Namespace: {result["namespace"]}, Node: {result["node"]}'
+
+    logger = logging.getLogger(__name__)
+    logger.debug(info)
+    return result
+
+
+def get_logs(pod):
+    """
+    Collects logs of all containers in ``pod``
+    """
+    api = kube_api()
+    logs = []
+    if pod.status.container_statuses is not None:
+        for container in pod.status.container_statuses:
+            con = {'container': container.name}
+            if container.state.waiting is not None and \
+               container.state.waiting.reason == 'PodInitializing':
+                log = 'Not found, status: waiting, reason: PodInitializing'
+            else:
+                log = api.read_namespaced_pod_log(name=pod.metadata.name,
+                                                  namespace=pod.metadata.namespace,
+                                                  container=container.name)
+            con['log'] = rfile(log)
+            logs.append(con)
+    return logs
diff --git a/sdv/docker/sdvstate/internal/validator/airship/store_result.py b/sdv/docker/sdvstate/internal/validator/airship/store_result.py
new file mode 100644
index 0000000..52f4e10
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/airship/store_result.py
@@ -0,0 +1,28 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+store_result function to log and store result
+"""
+import logging
+from tools.result_api import result_api
+
+def store_result(result):
+    """
+    Logs and stores result
+    """
+    logger = logging.getLogger(__name__)
+    logger.info(f'[State: {result["criteria"]}] {result["case_name"]}')
+
+    result_api.store(result)
diff --git a/sdv/docker/sdvstate/internal/validator/validator.py b/sdv/docker/sdvstate/internal/validator/validator.py
new file mode 100644
index 0000000..4f36008
--- /dev/null
+++ b/sdv/docker/sdvstate/internal/validator/validator.py
@@ -0,0 +1,27 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Interface for Software Validators
+"""
+
+class Validator():
+    """
+    Interface for Software to Validate
+    """
+    def __init__(self):
+        """Initialisation function.
+        """
+        
\ No newline at end of file
diff --git a/sdv/docker/sdvstate/settings/testsuits.yaml b/sdv/docker/sdvstate/settings/testsuits.yaml
new file mode 100644
index 0000000..dd154b9
--- /dev/null
+++ b/sdv/docker/sdvstate/settings/testsuits.yaml
@@ -0,0 +1,15 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+test_suite: default
\ No newline at end of file
diff --git a/sdv/docker/sdvstate/state b/sdv/docker/sdvstate/state
index 353df71..0df9592 100755
--- a/sdv/docker/sdvstate/state
+++ b/sdv/docker/sdvstate/state
@@ -31,9 +31,9 @@ import requests
 
 from tools.conf import settings
 from tools.result_api import result_api, Local
-from core import load_pdf
-from core import display_report
-from validator import AirshipValidator
+from internal import load_pdf
+from internal import display_report
+from internal.validator import AirshipValidator
 
 
 VERBOSITY_LEVELS = {
@@ -146,6 +146,7 @@ def parse_arguments():
                        ' or a list of csv lists of conf-file parameters: key=val; e.g. '
                        '[\'KUBE_CONFIG=/path/to/kubeconfig/file\','
                        '\'PDF_FILE=path/to/pdf/file\']')
+    group.add_argument('--test-suite', help='set of checks to perform. values: default, k8s')
 
     group = parser.add_argument_group('override conf-file options')
     group.add_argument('--pdf-file', help='Path to PDF file')
diff --git a/sdv/docker/sdvstate/tools/__init__.py b/sdv/docker/sdvstate/tools/__init__.py
new file mode 100644
index 0000000..b8cd56d
--- /dev/null
+++ b/sdv/docker/sdvstate/tools/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Tools package
+"""
diff --git a/sdv/docker/sdvstate/validator/__init__.py b/sdv/docker/sdvstate/validator/__init__.py
deleted file mode 100644
index 0e1fb38..0000000
--- a/sdv/docker/sdvstate/validator/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Package for Validators
-"""
-
-from .validator import Validator
-from .airship.airship import AirshipValidator
diff --git a/sdv/docker/sdvstate/validator/airship/__init__.py b/sdv/docker/sdvstate/validator/airship/__init__.py
deleted file mode 100644
index 78e42c4..0000000
--- a/sdv/docker/sdvstate/validator/airship/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Package for Airship
-"""
-
-
-### Pod Health Checks
-from .pod_health_check import pod_health_check
-
-### Ceph Health Checks
-from .ceph_check import ceph_health_check
-
-### Monitoring & Logging Agents Checks
-from .monitoring_logging_agent_check import prometheus_check
-from .monitoring_logging_agent_check import grafana_check
-# from .monitoring_logging_agent_check import prometheus_alert_manager_check
-from .monitoring_logging_agent_check import elasticsearch_check
-from .monitoring_logging_agent_check import kibana_check
-from .monitoring_logging_agent_check import nagios_check
-from .monitoring_logging_agent_check import elasticsearch_exporter_check
-from .monitoring_logging_agent_check import fluentd_exporter_check
-
-### Network Checks
-from .network_check import physical_network_check
-
-### Compute Related Checks
-from .compute_check import reserved_vnf_cores_check
-from .compute_check import isolated_cores_check
-from .compute_check import vswitch_pmd_cores_check
-from .compute_check import vswitch_dpdk_lcores_check
-from .compute_check import os_reserved_cores_check
-from .compute_check import nova_scheduler_filters_check
-from .compute_check import cpu_allocation_ratio_check
-
-from .store_result import store_result
diff --git a/sdv/docker/sdvstate/validator/airship/airship.py b/sdv/docker/sdvstate/validator/airship/airship.py
deleted file mode 100644
index f2bdebd..0000000
--- a/sdv/docker/sdvstate/validator/airship/airship.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Airship Validator
-"""
-
-import logging
-from datetime import datetime as dt
-
-from tools.conf import settings
-from tools.kube_utils import load_kube_api, delete_kube_curl_pod
-from validator.validator import Validator
-
-from . import *
-
-
-
-
-
-class AirshipValidator(Validator):
-    """Class for Airship Validation
-    """
-
-    def __init__(self):
-        """
-        Initialisation function.
-        """
-        super(AirshipValidator, self).__init__()
-        self._logger = logging.getLogger(__name__)
-
-        self._report = {"installer": "Airship",
-                        "criteria": "pass",
-                        "details": {"total_checks": 0,
-                                    "pass": [],
-                                    "fail": [],
-                                    "metadata": {}
-                                   }
-                        }
-
-        load_kube_api()
-
-
-    def validate(self):
-        """
-        Validation method
-        """
-
-        self._report['scenario'] = 'none'
-        self._report['case_name'] = 'ook_airship'
-        self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
-
-
-        # PLATFORM CHECKS
-        self.update_report(pod_health_check())
-
-        # STORAGE CHECKS
-        self.update_report(ceph_health_check())
-
-        # MONITORING & LOGGING AGENTS CHECKS
-        self.update_report(prometheus_check())
-        self.update_report(grafana_check())
-        ## current version of AlertManager doesn't support this
-        # prometheus_alert_manager_check()
-        self.update_report(elasticsearch_check())
-        self.update_report(kibana_check())
-        self.update_report(nagios_check())
-        self.update_report(elasticsearch_exporter_check())
-        self.update_report(fluentd_exporter_check())
-
-        # NETWORK CHECKS
-        self.update_report(physical_network_check())
-
-        # COMPUTE CHECKS
-        self.update_report(reserved_vnf_cores_check())
-        self.update_report(isolated_cores_check())
-        self.update_report(vswitch_pmd_cores_check())
-        self.update_report(vswitch_dpdk_lcores_check())
-        self.update_report(os_reserved_cores_check())
-        self.update_report(nova_scheduler_filters_check())
-        self.update_report(cpu_allocation_ratio_check())
-
-        delete_kube_curl_pod()
-
-        self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
-
-
-    def update_report(self, result):
-        """
-        Updates report with new results
-        """
-        case_name = result['case_name']
-        criteria = result['criteria']
-
-        self._report['details']['total_checks'] += 1
-        if criteria == 'pass':
-            self._report['details']['pass'].append(case_name)
-        elif criteria == 'fail':
-            self._report['details']['fail'].append(case_name)
-            self._report['criteria'] = 'fail'
-
-
-
-    def get_report(self):
-        """
-        Return final report as dict
-        """
-        self._report["project_name"] = settings.getValue("project_name")
-        self._report["version"] = settings.getValue("project_version")
-        self._report["build_tag"] = "none"
-
-        pdf = settings.getValue('pdf_file')
-        self._report["pod_name"] = pdf['management_info']['resource_pool_name']
-
-        store_result(self._report)
-
-        return self._report
diff --git a/sdv/docker/sdvstate/validator/airship/ceph_check.py b/sdv/docker/sdvstate/validator/airship/ceph_check.py
deleted file mode 100644
index b33e876..0000000
--- a/sdv/docker/sdvstate/validator/airship/ceph_check.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Ceph Related Checks
-"""
-
-import ast
-
-from tools.kube_utils import get_pod_with_labels, kube_exec
-from .store_result import store_result
-
-
-
-
-def ceph_health_check():
-    """
-    Check health of Ceph
-    """
-    pod = get_pod_with_labels('application=ceph,component=mon')
-
-    cmd = ['ceph', 'health', '-f', 'json']
-    response = kube_exec(pod, cmd)
-
-    response = ast.literal_eval(response)
-
-    result = {'category':  'storage',
-              'case_name': 'ceph_health_check',
-              'details': []
-             }
-
-    if response['status'] == 'HEALTH_OK':
-        result['criteria'] = 'pass'
-        result['details'] = 'HEALTH_OK'
-    else:
-        result['criteria'] = 'fail'
-        result['details'] = response
-
-    store_result(result)
-    return result
diff --git a/sdv/docker/sdvstate/validator/airship/compute_check.py b/sdv/docker/sdvstate/validator/airship/compute_check.py
deleted file mode 100644
index a602471..0000000
--- a/sdv/docker/sdvstate/validator/airship/compute_check.py
+++ /dev/null
@@ -1,661 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Compute Related Checks
-"""
-
-import configparser
-import json
-import re
-
-from tools.kube_utils import kube_exec, get_pod_with_labels
-from tools.conf import settings
-from .store_result import store_result
-
-
-###########
-# Checks
-###########
-
-def isolated_cores_check():
-    """
-    isolated_cores_check
-    """
-    traced_value = trace_isolated_cores()
-    required_value = required_isolated_cores()
-
-    result = {'category':  'compute',
-              'case_name': 'isolated_cores_check',
-              'details': {'traced_cores': traced_value,
-                          'required_cores': required_value
-                         }
-             }
-
-    if is_ranges_equals(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-
-    store_result(result)
-    return result
-
-
-
-def reserved_vnf_cores_check():
-    """
-    reserved_vnf_cores_check
-    """
-    traced_value = trace_reserved_vnf_cores()
-    required_value = required_reserved_vnf_cores()
-
-    result = {'category':  'compute',
-              'case_name': 'reserved_vnf_cores_check',
-              'details': {'traced_cores': traced_value,
-                          'required_cores': required_value
-                         }
-             }
-
-    if is_ranges_equals(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-
-    store_result(result)
-    return result
-
-
-
-def vswitch_pmd_cores_check():
-    """
-    vswitch_pmd_cores_check
-    """
-    traced_value = trace_vswitch_pmd_cores()
-    required_value = required_vswitch_pmd_cores()
-
-    result = {'category':  'compute',
-              'case_name': 'vswitch_pmd_cores_check',
-              'details': {'traced_cores': traced_value,
-                          'required_cores': required_value
-                         }
-             }
-
-    if is_ranges_equals(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-
-    store_result(result)
-    return result
-
-
-
-def vswitch_dpdk_lcores_check():
-    """
-    vswitch_dpdk_lcores_check
-    """
-    traced_value = trace_vswitch_dpdk_lcores()
-    required_value = required_vswitch_dpdk_lcores()
-
-    result = {'category':  'compute',
-              'case_name': 'vswitch_dpdk_lcores_check',
-              'details': {'traced_cores': traced_value,
-                          'required_cores': required_value
-                         }
-             }
-
-    if is_ranges_equals(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-
-    store_result(result)
-    return result
-
-
-
-def os_reserved_cores_check():
-    """
-    os_reserved_cores_check
-    """
-    traced_value = trace_os_reserved_cores()
-    required_value = required_os_reserved_cores()
-
-    result = {'category':  'compute',
-              'case_name': 'os_reserved_cores_check',
-              'details': {'traced_cores': traced_value,
-                          'required_cores': required_value
-                         }
-             }
-
-    if is_ranges_equals(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-
-    store_result(result)
-    return result
-
-
-
-def nova_scheduler_filters_check():
-    """
-    nova_scheduler_filters_check
-    """
-    traced_value = trace_nova_scheduler_filters()
-    required_value = required_nova_scheduler_filters()
-
-    result = {'category':  'compute',
-              'case_name': 'nova_scheduler_filters_check',
-              'details': {'traced_filters': traced_value,
-                          'required_filters': required_value
-                         }
-             }
-
-    if are_lists_equal(traced_value, required_value):
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-    store_result(result)
-    return result
-
-
-
-def cpu_allocation_ratio_check():
-    """
-    cpu_allocation_ratio_check
-    """
-    traced_value = trace_cpu_allocation_ratio()
-    required_value = required_cpu_allocation_ratio()
-
-    result = {'category':  'compute',
-              'case_name': 'cpu_allocation_ratio_check',
-              'details': {'traced_ratio': traced_value,
-                          'required_ratio': required_value
-                         }
-             }
-
-    if traced_value == required_value:
-        result['criteria'] = 'pass'
-    else:
-        result['criteria'] = 'fail'
-
-    store_result(result)
-    return result
-
-
-
-
-
-
-
-
-###############
-# helper functions
-###############
-
-
-
-def trace_isolated_cores():
-    """
-    Trace isolated_cores from Airship deployment
-
-    :return: value traced from `isolcpus` key in `/proc/cmdline`
-    """
-    pod = get_pod_with_labels('application=nova,component=compute')
-
-    cmd = ['cat', '/proc/cmdline']
-    proc_cmd = kube_exec(pod, cmd)
-
-    for option in proc_cmd.split():
-        if 'isolcpus' in option:
-            _, isolcpus_value = split_key_value(option)
-            break
-
-    return isolcpus_value
-
-
-def required_isolated_cores():
-    """
-    Returns value of `isolated_cpus` from platform_profile used by
-    Role for worker nodes in PDF
-
-    :return: isolated_cores value expected by the PDF
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    profile = get_platform_profile_by_role(worker_role)
-    return profile['isolated_cpus']
-
-
-
-
-
-
-def trace_reserved_vnf_cores():
-    """
-    Trace vnf_reserved_cores from Airship deployment
-
-    :return: value traced from `vcpu_pin_set` key in nova.conf
-    of actual deployment
-    """
-    try:
-        config = get_nova_conf()
-        vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
-    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
-        vcpu_pin_set = ''
-
-    return vcpu_pin_set
-
-
-def required_reserved_vnf_cores():
-    """
-    Returns value of vnf_cores from platform_profile used by
-    Role for worker nodes in PDF
-
-    :return: vnf_reserverd_core value expected by the PDF
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    profile = get_platform_profile_by_role(worker_role)
-    return profile['vnf_cores']
-
-
-
-
-
-
-def trace_vswitch_pmd_cores():
-    """
-    Trace vswitch_pmd_cores from Airship deployment
-
-    :return: value traced from `other_config:pmd-cpu-mask` in
-    openvswitchdb using ovs-vsctl
-    """
-    ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
-
-    cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
-    response = kube_exec(ovs_pod, cmd)
-
-    # convert config str to json str
-    match = re.findall("[a-zA-Z0-9-]+=", response)
-    for key in match:
-        response = response.replace(key, '"' + key[:-1] + '":')
-    match = re.findall(":[a-zA-Z0-9-]+", response)
-    for key in match:
-        response = response.replace(key[1:], '"' + key[1:] + '"')
-
-    config = json.loads(response)
-
-    if 'pmd-cpu-mask' in config:
-        pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
-    else:
-        pmd_cores = ''
-
-    return pmd_cores
-
-
-def required_vswitch_pmd_cores():
-    """
-    Returns value of vswitch_pmd_cores from platform_profile used by
-    Role for worker nodes in PDF
-
-    :return: vswitch_pmd_cores value expected by the PDF
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    profile = get_platform_profile_by_role(worker_role)
-    return profile['vswitch_pmd_cores']
-
-
-
-
-
-
-def trace_vswitch_dpdk_lcores():
-    """
-    Trace vswitch_dpdk_lcores from Airship deployment
-
-    :return: value traced from `other_config:dpdk-lcore-mask` in
-    openvswitchdb using ovs-vsctl
-    """
-    ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
-
-    cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
-    response = kube_exec(ovs_pod, cmd)
-
-    # convert config str to json str
-    match = re.findall("[a-zA-Z0-9-]+=", response)
-    for key in match:
-        response = response.replace(key, '"' + key[:-1] + '":')
-    match = re.findall(":[a-zA-Z0-9-]+", response)
-    for key in match:
-        response = response.replace(key[1:], '"' + key[1:] + '"')
-
-    config = json.loads(response)
-
-    if 'dpdk-lcore-mask' in config:
-        pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
-    else:
-        pmd_cores = ''
-
-    return pmd_cores
-
-
-def required_vswitch_dpdk_lcores():
-    """
-    Returns value of vswitch_dpdk_lcores from platform_profile used by
-    Role for worker nodes in PDF
-
-    :return: vswitch_dpdk_lcores value expected by the PDF
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    profile = get_platform_profile_by_role(worker_role)
-    return profile['vswitch_dpdk_lcores']
-
-
-
-
-
-
-def trace_os_reserved_cores():
-    """
-    Trace os_reserved_cores from Airship deployment
-
-    os_reserved_cores = all_cores - (reserved_vnf_cores +
-                                     vswitch_pmd_cores +
-                                     vswitch_dpdk_lcores)
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    all_cores = get_cores_by_role(worker_role)
-
-    reserved_vnf_cores = trace_reserved_vnf_cores()
-    vswitch_pmd_cores = trace_vswitch_pmd_cores()
-    vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
-
-    non_os_cores = []
-    non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
-    non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
-    non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
-
-    os_reserved_cores = set(all_cores).difference(set(non_os_cores))
-
-    # return as string with comma separated value
-    return ','.join(map(str, list(os_reserved_cores)))
-
-
-def required_os_reserved_cores():
-    """
-    Returns value of os_reserved_cores from platform_profile used by
-    Role for worker nodes in PDF
-
-    :return: os_reserved_cores value expected by the PDF
-    """
-    worker_role = settings.getValue('WORKER_ROLE_NAME')
-    profile = get_platform_profile_by_role(worker_role)
-    return profile['os_reserved_cores']
-
-
-
-
-
-def trace_nova_scheduler_filters():
-    """
-    Trace scheduler_filters from Airship deployment
-
-    :return: value traced from `enabled_filters` key in nova.conf
-    of actual deployment
-    """
-    try:
-        config = get_nova_conf()
-        filters = config.get('filter_scheduler', 'enabled_filters')
-    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
-        filters = ''
-
-    filters = filters.split(',')
-    map(str.strip, filters)
-
-    return filters
-
-def required_nova_scheduler_filters():
-    """
-    Required nova scheduler_filters by the PDF
-    """
-    pdf = settings.getValue('pdf_file')
-    filters = pdf['vim_functional']['scheduler_filters']
-
-    filters = filters.split(',')
-    map(str.strip, filters)
-
-    return filters
-
-
-
-
-
-
-
-def trace_cpu_allocation_ratio():
-    """
-    Trace cpu_allocation_ratio from Airship deployment
-
-    :return: value traced from `cpu_allocation_ratio` key in nova.conf
-    of actual deployment
-    """
-    try:
-        config = get_nova_conf()
-        cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
-    except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
-        cpu_allocation_ratio = ''
-
-    return float(cpu_allocation_ratio)
-
-def required_cpu_allocation_ratio():
-    """
-    Required cpu_allocation_ratio by the PDF
-    """
-    pdf = settings.getValue('pdf_file')
-    cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
-
-    return float(cpu_allocation_ratio)
-
-
-
-
-
-
-
-def get_role(role_name):
-    """
-    Searches and returns role with `role_name`
-    """
-    roles = settings.getValue('pdf_file')['roles']
-
-    for role in roles:
-        if role['name'] == role_name:
-            role_details = role
-
-    return role_details
-
-
-def get_platform_profile(profile_name):
-    """
-    Searches and returns platform_profile with `profile_name`
-    """
-    platform_profiles = settings.getValue('pdf_file')['platform_profiles']
-
-    for profile in platform_profiles:
-        if profile['profile_name'] == profile_name:
-            profile_details = profile
-
-    return profile_details
-
-def get_processor_profile(profile_name):
-    """
-    Searches and returns processor_profile with `profile_name`
-    """
-    processor_profiles = settings.getValue('pdf_file')['processor_profiles']
-
-    for profile in processor_profiles:
-        if profile['profile_name'] == profile_name:
-            profile_details = profile
-
-    return profile_details
-
-def get_platform_profile_by_role(role_name):
-    """
-    Returns platform profile details of a role
-    """
-    role = get_role(role_name)
-    profile = get_platform_profile(role['platform_profile'])
-    return profile
-
-
-def get_hardware_profile_by_role(role_name):
-    """
-    Returns hardware profile details of a role
-    """
-    role = get_role(role_name)
-
-    hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
-
-    for profile in hardware_profiles:
-        if profile['profile_name'] == role['hardware_profile']:
-            profile_details = profile
-
-    return profile_details
-
-
-def get_cores_by_role(role_name):
-    """
-    Returns cpu cores list of server hardware used in the role
-    """
-    hardware_profile = get_hardware_profile_by_role(role_name)
-    processor_profile = hardware_profile['profile_info']['processor_profile']
-    profile = get_processor_profile(processor_profile)
-
-    cpus = []
-
-    for numa in profile['profile_info']['numas']:
-        cpus.extend(convert_range_to_list(numa['cpu_set']))
-
-    return cpus
-
-
-
-
-
-
-
-def get_nova_conf():
-    """
-    Returns parsed nova.conf
-    """
-    pod = get_pod_with_labels('application=nova,component=compute')
-
-    cmd = ['cat', '/etc/nova/nova.conf']
-    response = kube_exec(pod, cmd)
-
-    config = configparser.ConfigParser()
-    config.read_string(response)
-
-    return config
-
-
-### cpu cores related helper function
-
-def convert_range_to_list(x):
-    """
-    Returns list of numbers from given range as string
-
-    e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
-    """
-    # pylint: disable=C0103
-    result = []
-    for part in x.split(','):
-        if '-' in part:
-            a, b = part.split('-')
-            a, b = int(a), int(b)
-            result.extend(range(a, b + 1))
-        elif part != '':
-            a = int(part)
-            result.append(a)
-    # remove duplicates
-    result = list(dict.fromkeys(result))
-    return result
-
-
-def is_ranges_equals(range1, range2):
-    """
-    Checks whether two ranges passed as string are equal
-
-    e.g.: is_ranges_equals('2-5', '2-4,5') returns true
-    """
-    set1 = set(convert_range_to_list(range1))
-    set2 = set(convert_range_to_list(range2))
-    return set1 == set2
-
-def are_lists_equal(list1, list2):
-    """
-    Checks whether two list are identicals
-    """
-    set1 = set(list1)
-    set2 = set(list2)
-    return set1 == set2
-
-
-
-def hex_to_comma_list(hex_mask):
-    """
-    Converts CPU mask given in hex to list of cores
-    """
-    binary = bin(int(hex_mask, 16))[2:]
-    reversed_binary = binary[::-1]
-    i = 0
-    output = ""
-    for bit in reversed_binary:
-        if bit == '1':
-            output = output + str(i) + ','
-        i = i + 1
-    return output[:-1]
-
-
-def comma_list_to_hex(cpus):
-    """
-    Converts a list of cpu cores in corresponding hex value
-    of cpu-mask
-    """
-    cpu_arr = cpus.split(",")
-    binary_mask = 0
-    for cpu in cpu_arr:
-        binary_mask = binary_mask | (1 << int(cpu))
-    return format(binary_mask, '02x')
-
-
-
-def split_key_value(key_value_str, delimiter='='):
-    """
-    splits given string into key and value based on delimiter
-
-    :param key_value_str: example string `someKey=somevalue`
-    :param delimiter: default delimiter is `=`
-    :return: [ key, value]
-    """
-    key, value = key_value_str.split(delimiter)
-    key = key.strip()
-    value = value.strip()
-    return key, value
diff --git a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
deleted file mode 100644
index 3754299..0000000
--- a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Monitoring & Logging Agents Related Checks
-"""
-
-import ast
-
-from tools.kube_utils import kube_curl
-from tools.result_api import rfile
-from .store_result import store_result
-
-
-def prometheus_check():
-    """
-    Check health of Prometheus
-    """
-    username = "prometheus"
-    password = "password123"
-    service = "prom-metrics"
-    namespace = "osh-infra"
-
-    health = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/healthy')
-    if 'Prometheus is Healthy' in res:
-        health = "pass"
-
-    readiness = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/ready')
-    if 'Prometheus is Ready' in res:
-        readiness = "pass"
-
-    if health == "pass" and readiness == "pass":
-        state = "pass"
-    else:
-        state = "fail"
-
-    result = {'category':  'platform',
-              'case_name': 'prometheus_check',
-              'criteria':  state,
-              'details': {'health': health, 'readiness': readiness}
-             }
-
-    store_result(result)
-    return result
-
-
-
-def grafana_check():
-    """
-    Check health of Grafana
-    """
-    username = "grafana"
-    password = "password123"
-    service = "grafana-dashboard"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
-                    "-o", "/dev/null", "-u",  \
-                    f'{username}:{password}', \
-                    f'{service}.{namespace}:3000/api/health')
-    if res == '200':
-        state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'grafana_check',
-              'criteria':  state
-             }
-
-    store_result(result)
-    return result
-
-
-def prometheus_alert_manager_check():
-    """
-    Check health of Alert Manager
-    """
-    service = "alerts-engine"
-    namespace = "osh-infra"
-
-    health = "fail" #default
-    res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/healthy')
-    if 'Prometheus is Healthy' in res:
-        health = "pass"
-
-    readiness = "fail" #default
-    res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/ready')
-    if 'Prometheus is Ready' in res:
-        readiness = "pass"
-
-    if health == "pass" and readiness == "pass":
-        state = "pass"
-    else:
-        state = "fail"
-
-    result = {'category':  'platform',
-              'case_name': 'prometheus_alert_manager_check',
-              'criteria':  state,
-              'details': {'health': health, 'readiness': readiness}
-             }
-
-
-    store_result(result)
-    return result
-
-
-def elasticsearch_check():
-    """
-    Check health of Elasticsearch cluster
-    """
-    username = "elasticsearch"
-    password = "password123"
-    service = "elasticsearch"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/_cluster/health')
-
-    if res == '':
-        res = 'Elasticsearch not reachable'
-    else:
-        res = ast.literal_eval(res)
-        if res['status'] == 'green':
-            state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'elasticsearch_check',
-              'criteria':  state,
-              'details': res
-             }
-
-    store_result(result)
-    return result
-
-
-def kibana_check():
-    """
-    Check health of Kibana
-    """
-    username = "elasticsearch"
-    password = "password123"
-    service = "kibana-dash"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/api/status')
-
-    if res == '':
-        res = 'kibana not reachable'
-    else:
-        res = ast.literal_eval(res)
-        if res['status']['overall']['state'] == 'green':
-            state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'kibana_check',
-              'criteria':  state,
-              'details': rfile(str(res))
-             }
-
-    store_result(result)
-    return result
-
-
-def nagios_check():
-    """
-    Check health of Nagios
-    """
-    username = "nagios"
-    password = "password123"
-    service = "nagios-metrics"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
-                    "-o", "/dev/null", "-u",  \
-                    f'{username}:{password}', \
-                    f'{service}.{namespace}')
-    if res == '200':
-        state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'nagios_check',
-              'criteria':  state
-             }
-
-    store_result(result)
-    return result
-
-
-def elasticsearch_exporter_check():
-    """
-    Check health of Elasticsearch Exporter
-    """
-    service = "elasticsearch-exporter"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9108/metrics')
-    if res == '200':
-        state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'elasticsearch_exporter_check',
-              'criteria':  state
-             }
-
-    store_result(result)
-    return result
-
-
-def fluentd_exporter_check():
-    """
-    Check health of Fluentd Exporter
-    """
-    service = "fluentd-exporter"
-    namespace = "osh-infra"
-
-    state = "fail" #default
-    res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9309/metrics')
-    if res == '200':
-        state = "pass"
-
-    result = {'category':  'platform',
-              'case_name': 'fluentd_exporter_check',
-              'criteria':  state
-             }
-
-    store_result(result)
-    return result
diff --git a/sdv/docker/sdvstate/validator/airship/network_check.py b/sdv/docker/sdvstate/validator/airship/network_check.py
deleted file mode 100644
index bddf579..0000000
--- a/sdv/docker/sdvstate/validator/airship/network_check.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-Network Related Checks
-"""
-
-
-import configparser
-
-from tools.conf import settings
-from tools.kube_utils import kube_exec, get_pod_with_labels
-
-from .store_result import store_result
-
-
-def physical_network_check():
-    """
-    physical_network_check
-    """
-    ml2_config = neutron_ml2_config()
-
-    physical_networks = settings.getValue('pdf_file')['physical_networks']
-
-    type_drivers = ml2_config.get('ml2', 'type_drivers').split(',')
-
-    flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',')
-
-    vlan_networks = []
-    network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',')
-    for network in network_vlan_ranges:
-        vlan_networks.append(network.split(':')[0])
-
-    result = {'category': 'network',
-              'case_name': 'physical_network_check',
-              'criteria': 'pass',
-              'details': []
-             }
-
-    for physnet in physical_networks:
-
-        res = {'network_name': physnet['name'],
-               'type': physnet['type'],
-               'criteria': 'fail'
-               }
-
-        if physnet['type'] in type_drivers:
-            if physnet['type'] == 'flat':
-                if physnet['name'] in flat_networks or '*' in flat_networks:
-                    res['criteria'] = 'pass'
-                else:
-                    res['details'] = 'physical network name not found'
-            if physnet['type'] == 'vlan':
-                if physnet['name'] in vlan_networks:
-                    res['criteria'] = 'pass'
-                else:
-                    res['details'] = 'physical network name not found'
-        else:
-            res['details'] = 'physical network type not found'
-
-        result['details'].append(res)
-        if res['criteria'] == 'fail':
-            result['criteria'] = 'fail'
-
-    store_result(result)
-    return result
-
-
-
-def neutron_ml2_config():
-    """
-    Returns parsed ml2 config from neutron
-    """
-    ovs = get_pod_with_labels("application=neutron,component=neutron-ovs-agent")
-    sriov = get_pod_with_labels("application=neutron,component=neutron-sriov-agent")
-
-    confs = get_neutron_ml2_conf_from_pod(ovs)
-    confs.extend(get_neutron_ml2_conf_from_pod(sriov))
-
-    config = configparser.ConfigParser()
-    for conf in confs:
-        config.read_string(conf)
-
-    return config
-
-
-
-
-def get_neutron_ml2_conf_from_pod(pod):
-    """
-    Reads ml2 config from neutron pod
-    """
-    cmd = ['ls', '/etc/neutron/plugins/ml2/']
-    response = kube_exec(pod, cmd)
-    files = response.rstrip("\n").split()
-
-    response = []
-    for filename in files:
-        cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename]
-        conf = kube_exec(pod, cmd)
-        response.append(conf)
-
-    return response
diff --git a/sdv/docker/sdvstate/validator/airship/pod_health_check.py b/sdv/docker/sdvstate/validator/airship/pod_health_check.py
deleted file mode 100644
index 0093ffc..0000000
--- a/sdv/docker/sdvstate/validator/airship/pod_health_check.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Pod Health Checks
-"""
-
-
-
-import logging
-
-from tools.kube_utils import kube_api
-from tools.conf import settings
-from tools.result_api import rfile
-
-from .store_result import store_result
-
-
-
-def pod_health_check():
-    """
-    Check health of all pods and get logs of failed pods
-    """
-    api = kube_api()
-    namespace_list = settings.getValue('airship_namespace_list')
-
-    result = {'category':  'platform',
-              'case_name': 'pod_health_check',
-              'criteria':  'pass',
-              'details': []
-             }
-
-    for namespace in namespace_list:
-        pod_list = api.list_namespaced_pod(namespace)
-        for pod in pod_list.items:
-            pod_stats = pod_status(pod)
-            if pod_stats['criteria'] == 'fail':
-                pod_stats['logs'] = get_logs(pod)
-                result['criteria'] = 'fail'
-            result['details'].append(pod_stats)
-
-
-    store_result(result)
-    return result
-
-
-
-def pod_status(pod):
-    """
-    Check health of a pod and returns it's status as result
-    """
-    result = {'criteria': 'pass',
-              'name': pod.metadata.name,
-              'namespace': pod.metadata.namespace,
-              'node': pod.spec.node_name}
-
-    if pod.status.container_statuses is None:
-        result['criteria'] = 'fail'
-        result['pod_details'] = rfile(str(pod))
-    else:
-        for container in pod.status.container_statuses:
-            if container.state.running is not None:
-                status = 'Running'
-            if container.state.terminated is not None:
-                status = container.state.terminated.reason
-            if container.state.waiting is not None:
-                status = container.state.waiting.reason
-
-            if status not in ('Running', 'Completed'):
-                result['criteria'] = 'fail'
-                result['pod_details'] = rfile(str(pod))
-
-    info = f'[Health: {result["criteria"]}] Name: {result["name"]}, '
-    info = info + f'Namespace: {result["namespace"]}, Node: {result["node"]}'
-
-    logger = logging.getLogger(__name__)
-    logger.debug(info)
-    return result
-
-
-def get_logs(pod):
-    """
-    Collects logs of all containers in ``pod``
-    """
-    api = kube_api()
-    logs = []
-    if pod.status.container_statuses is not None:
-        for container in pod.status.container_statuses:
-            con = {'container': container.name}
-            if container.state.waiting is not None and \
-               container.state.waiting.reason == 'PodInitializing':
-                log = 'Not found, status: waiting, reason: PodInitializing'
-            else:
-                log = api.read_namespaced_pod_log(name=pod.metadata.name,
-                                                  namespace=pod.metadata.namespace,
-                                                  container=container.name)
-            con['log'] = rfile(log)
-            logs.append(con)
-    return logs
diff --git a/sdv/docker/sdvstate/validator/airship/store_result.py b/sdv/docker/sdvstate/validator/airship/store_result.py
deleted file mode 100644
index 52f4e10..0000000
--- a/sdv/docker/sdvstate/validator/airship/store_result.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-store_result function to log and store result
-"""
-import logging
-from tools.result_api import result_api
-
-def store_result(result):
-    """
-    Logs and stores result
-    """
-    logger = logging.getLogger(__name__)
-    logger.info(f'[State: {result["criteria"]}] {result["case_name"]}')
-
-    result_api.store(result)
diff --git a/sdv/docker/sdvstate/validator/validator.py b/sdv/docker/sdvstate/validator/validator.py
deleted file mode 100644
index 4f36008..0000000
--- a/sdv/docker/sdvstate/validator/validator.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2020 University Of Delhi.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""
-Interface for Software Validators
-"""
-
-class Validator():
-    """
-    Interface for Software to Validate
-    """
-    def __init__(self):
-        """Initialisation function.
-        """
-        
\ No newline at end of file
-- 
cgit 1.2.3-korg