aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSridhar Rao <sridhar.rao@spirent.com>2020-08-31 04:28:20 +0000
committerGerrit Code Review <gerrit@opnfv.org>2020-08-31 04:28:20 +0000
commit27fff834bee372c53e1b5d222c398543ac3f34aa (patch)
tree2124fd712e414fce0d18e2f80689cb426cc9047f
parent3c25e018d2169e982cf5a292dd20cbee2a117336 (diff)
parent9fa1356bf9aabd50c4adcec082eedf1410f9a7a7 (diff)
Merge "Add support for TestAPI, Reporting and new checks"
-rw-r--r--sdv/docker/sdvstate/core/__init__.py1
-rw-r--r--sdv/docker/sdvstate/core/display_report.py57
-rw-r--r--sdv/docker/sdvstate/example/kubepod1020
-rw-r--r--sdv/docker/sdvstate/example/kubepod1520
-rw-r--r--sdv/docker/sdvstate/example/state.yml3
-rw-r--r--[-rwxr-xr-x]sdv/docker/sdvstate/server0
-rw-r--r--sdv/docker/sdvstate/settings/common.yml10
-rwxr-xr-xsdv/docker/sdvstate/state15
-rw-r--r--sdv/docker/sdvstate/validator/airship/__init__.py49
-rw-r--r--sdv/docker/sdvstate/validator/airship/airship.py92
-rw-r--r--sdv/docker/sdvstate/validator/airship/ceph_check.py51
-rw-r--r--sdv/docker/sdvstate/validator/airship/compute_check.py646
-rw-r--r--sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py243
-rw-r--r--sdv/docker/sdvstate/validator/airship/network_check.py114
-rw-r--r--sdv/docker/sdvstate/validator/airship/pod_health_check.py41
-rw-r--r--sdv/docker/sdvstate/validator/airship/store_result.py28
16 files changed, 1328 insertions, 62 deletions
diff --git a/sdv/docker/sdvstate/core/__init__.py b/sdv/docker/sdvstate/core/__init__.py
index ed33752..47830c5 100644
--- a/sdv/docker/sdvstate/core/__init__.py
+++ b/sdv/docker/sdvstate/core/__init__.py
@@ -19,3 +19,4 @@ contains all program specific dependencies
"""
from .load_pdf import load_pdf
+from .display_report import display_report
diff --git a/sdv/docker/sdvstate/core/display_report.py b/sdv/docker/sdvstate/core/display_report.py
new file mode 100644
index 0000000..97ccb55
--- /dev/null
+++ b/sdv/docker/sdvstate/core/display_report.py
@@ -0,0 +1,57 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Display Report
+"""
+
+import logging
+from datetime import datetime as dt
+
+
+
+def display_report(report):
+ """
+ Logs the final report
+ """
+ installer = report['installer']
+ result = report['criteria']
+ start_time = dt.strptime(report['start_date'], '%Y-%m-%d %H:%M:%S')
+ stop_time = dt.strptime(report['stop_date'], '%Y-%m-%d %H:%M:%S')
+ duration = (stop_time - start_time).total_seconds()
+
+ logger = logging.getLogger(__name__)
+ logger.info('')
+ logger.info('')
+ logger.info('========================================')
+ logger.info('')
+ logger.info(f' Installer: {installer}')
+ logger.info(f' Duration: {duration}')
+ logger.info(f' Result: {result}')
+ logger.info('')
+ logger.info('')
+ logger.info(f' CHECKS PASSED:')
+ logger.info(' =============')
+ for case_name in report['details']['pass']:
+ logger.info(f' {case_name}')
+ logger.info('')
+ logger.info('')
+ logger.info(f' CHECKS FAILED:')
+ logger.info(' =============')
+ for case_name in report['details']['fail']:
+ logger.info(f' {case_name}')
+ logger.info('')
+ logger.info('========================================')
+ logger.info('')
+ logger.info('')
diff --git a/sdv/docker/sdvstate/example/kubepod10 b/sdv/docker/sdvstate/example/kubepod10
deleted file mode 100644
index 2717fc6..0000000
--- a/sdv/docker/sdvstate/example/kubepod10
+++ /dev/null
@@ -1,20 +0,0 @@
----
-apiVersion: v1
-clusters:
-- cluster:
- server: https://10.10.100.21:6553
- insecure-skip-tls-verify: true
- name: kubernetes
-contexts:
-- context:
- cluster: kubernetes
- user: admin
- name: admin@kubernetes
-current-context: admin@kubernetes
-kind: Config
-preferences: {}
-users:
-- name: admin
- user:
- client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZRENDQWtpZ0F3SUJBZ0lVQmN1akh5bmUzMFBnMUw5MnNJZERmWEtlVm5Vd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tqRVRNQkVHQTFVRUNoTUtTM1ZpWlhKdVpYUmxjekVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pBZQpGdzB4T1RFd01UY3hOakUzTURCYUZ3MHlNREV3TVRZeE5qRTNNREJhTUNreEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1RNHdEQVlEVlFRREV3VmhaRzFwYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQUxRVnpUeG1UTUZHRkdiWks1OGwyUXZkT2hUZVZ2dzVKTWJyVE8wY2hhd1BtdmJXeXczSApMeUFpNTJsZkU5VGdONXBBVzVrVzJmS2tkREMwRnNXZXF2VDV4SFVvbVFGa3RRM2RWMEJnMXRXYVNIdnVHMXQwCndac2hIQWN6RTl0ZS93dFR6ajhkdFl0ZXdIbXpzd1J1bk9sRnFaUVZZT1hReENPYkEvZ2Z1V0o5RUFKNlduZDcKcUhZdEJvbzR0RkhVTmFocDRwUXNNS1VlbDZPUnA4NEM0WnNIenYyZm9Jb2pYd1V2TmJMNUE1VlZjallrK0taZwpCc3IyMWowT0c4N1F3Q0ZuOThMelJqUU92L01FTFRPOEoxemIrK3pvbkg0ZkpDckc1Q2RKNUFQbU81UnBEMGluCmJKNnFOR2QyY0kxaGdVWWx2aWI4QURXc21VelRkWU5wa0JrQ0F3RUFBYU4vTUgwd0RnWURWUjBQQVFIL0JBUUQKQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQQpNQjBHQTFVZERnUVdCQlRKSXRERnJwcGR3RExOTWFWY2IzQ1JiNVhBZURBZkJnTlZIU01FR0RBV2dCUjcwU1Z4Ck8wVlpzNXVRQkpaOEZ0cmluT25EaERBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQUFBRUQrQXNaaEFsWnRucTkKR0w5a1dvWDd5d0ZmR2FRdEgvSnFaT0VGYkx0bFdLVmlHc3gyVWlSRTN2U3l6VlFpMVBhNGR3cXF1MXc4bVNIVworc1REVlN1aGE5Q2NlbzcvT3F4dnl3ME43c0t2L0NPeml6YWF5djlXTmlBYXhFNjRMSk1sTWlrS2wrZG1zSlVMCktVUXJLVzhvcnlhZk4zSzZnd3NLS2FtU2Mzb1J4TG9wWHVveEo5a2lyVG5DOWpMVGdWSU1EM0I5aEtleEtLQ3YKb1hKVkUyMWViVnNiOExiSUcyaldRcWlnVktxWEFRN3gwcEt6RFcvN1dIc1JyRFRkbFpYU0ZUZS9IQUpZd2tuVwp1cmd2blJkZ1BYUHl6cHJhWU9iTCtTV3dvejRTS216OGV5TWpQcFd0TkFZQTdIYm5XT3RqU2NXNFJKWnpaQ1V3CldicStNZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
- client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdEJYTlBHWk13VVlVWnRrcm55WFpDOTA2Rk41Vy9Ea2t4dXRNN1J5RnJBK2E5dGJMCkRjY3ZJQ0xuYVY4VDFPQTNta0JibVJiWjhxUjBNTFFXeFo2cTlQbkVkU2laQVdTMURkMVhRR0RXMVpwSWUrNGIKVzNUQm15RWNCek1UMjE3L0MxUE9QeDIxaTE3QWViT3pCRzZjNlVXcGxCVmc1ZERFSTVzRCtCKzVZbjBRQW5wYQpkM3VvZGkwR2lqaTBVZFExcUduaWxDd3dwUjZYbzVHbnpnTGhtd2ZPL1orZ2lpTmZCUzgxc3ZrRGxWVnlOaVQ0CnBtQUd5dmJXUFE0Ynp0REFJV2Yzd3ZOR05BNi84d1F0TTd3blhOdjc3T2ljZmg4a0tzYmtKMG5rQStZN2xHa1AKU0tkc25xbzBaM1p3aldHQlJpVytKdndBTmF5WlROTjFnMm1RR1FJREFRQUJBb0lCQUYxcnlmS0JaMlFFUk9VdAoyQkZSZ3cxQ2tMVHV1dFZSbDZDUnhIQmxFWHMzQlQ3MElwWmRORGRKcEI3bnNkUUhGUkV5UGRKbkpsMVhydWJ0Ckpic1RHc0RIS1lGVnREb2kwa0lGQnhSZ3FGSmJIU3NkVkpmWE0vQ1Q5b1JOblFsNmVIaVoyeTZtN04wR0pIZCsKSDJvM0w3TmI3aUxpREVoc1NyUGw0T05CSWR6VEFQYy91b3hQbVQxQ2ZiQ3hVU051d1EzOS9mWHJVNzJTOFU4ZAoybXd2dDZpczQ2c09IWkNkNG0xNGJENE11Y2VsUG83V2ptT0hRZlUzd0g1NTE5Q0FtV0hDRFA0ZndNY3dYWlJUClZWUHcyU1VZRW9lMS9DM083cVVqMlRTMldJdysveHZOQml6WFpqajZTdmd5ME1FREtzamhsbWM2OE81MVAvajgKcmh3dFp1RUNnWUVBeTE1c2NuRFVidWFob1dJNzlCVzAxdWlyY0V1Vnp2TXhmc3ptbEJhZnp3dDZPL0FNQ3l1NwpKS2ZNR1JFQmxXR1RGMUhiUlREbGZzK3lTNFpjKzRQUDlmZVNVNFI1NWJkczlZRU44K3liSjJvbzVPcFlGOVFkCmtoL2JQRUZkN3pTbVQ0R2l2V2lxNklqVlFrTGNOVWFlczN4WlZ2d2NqUXd5cENtTU1aRlJtN1VDZ1lFQTRyREsKSTZITUdDcTR4eWZuaUhhT0ZkNlVEYjdmS3ZJNTJZYnU5R0IyR0JXdmkrcEhOZ1V1dU44SG9GT0hQS2NuYnkweQoyS0FwRjVaYTFSZUNHNGdSTE8vMjMybU42VnVCMERGWlNnUEFITTJKd1BtOFUrYjlWZFZaZEI1ZWJrTDhxNXlkCmZqM3F3S2NRVTZMR09wVy83ZEhsMUVUWE9kMUYzTi8wNFdzcGlWVUNnWUJtb0UrNXNKYURJSCtRSVRLQUNqUW4KLzJJRVdTQlFQd2xMSTd0NEg3S2xtUFVtS2d6cDFqZXFWOEwzSTAzWlJGUW1BSGpXZ2NaT0tDR2hXenl3NytPUwpERTBiT0U4TFRYVCtyeEdMZG1zVmlNejZPQWdjZmo0dDcwV0RNcmxrYlAxQVFmc04ralBGQk1nWm1BUG9IcXNYCmlEak5YSXhMNFV2czY4cURlUUhsd1FLQmdGTGs3UFg4cTJKRzlReTJsZDc3NDFjeDdoZmNyVVRLRU1kdnBSK3QKeW1GaVJMQTRPbFFScnhVaFVXdWFQOEM1S3gxbmZNbGtQOEtGVTYvS2llUkJiRzV2VFdwQzhnYmNWR3JxTU1sMAo5NkpRc3NmalNxK3Zyd0hkSTNubnhRWXk3cXhlZCtUN0JVWHZrWFBUK1FMaFVhN0lhMitrd01OREc5SDUvMVVTCjE3eUZBb0dBZjBubW53RjJRMTZVYXhhanI2M3hjUFlQS09FY1pHTFcxalhoMHVpNFJnK3lscEdSZ25xdVJzMk8KL3RDYTlUYm1JcG9KZHA3aWFPOTIzenI1MWphcnlBOCtuWWhoZ2dRQ29IdWNIY0ZBR213Ryt6R2NyMlBZYklseAo5TkVsUEFZM2pndFNWTW4yUkhMak0wUWVuTUQ1aG1HcHQvWVJOd3hPNkNBdXhaNUhzOTQ9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
diff --git a/sdv/docker/sdvstate/example/kubepod15 b/sdv/docker/sdvstate/example/kubepod15
deleted file mode 100644
index 7710cbc..0000000
--- a/sdv/docker/sdvstate/example/kubepod15
+++ /dev/null
@@ -1,20 +0,0 @@
----
-apiVersion: v1
-clusters:
-- cluster:
- server: https://10.10.150.21:6553
- insecure-skip-tls-verify: true
- name: kubernetes
-contexts:
-- context:
- cluster: kubernetes
- user: admin
- name: admin@kubernetes
-current-context: admin@kubernetes
-kind: Config
-preferences: {}
-users:
-- name: admin
- user:
- client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZRENDQWtpZ0F3SUJBZ0lVRFZ1T2IvczEyKzR1dGNTRnBuOEhQbFlIVWFBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tqRVRNQkVHQTFVRUNoTUtTM1ZpWlhKdVpYUmxjekVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pBZQpGdzB4T1RFeU1UVXdNelExTURCYUZ3MHlNREV5TVRRd016UTFNREJhTUNreEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1RNHdEQVlEVlFRREV3VmhaRzFwYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQUo0Z3R2ZUMyVktnV1BwRHRMTWE3d2k0aE1hZlBEZUVveUg2LzBrWUdKWEF6TTRuVE55NApZdXVldCtBZUdDNnJ6cHNDRG1GcVBrVkRRM1ZkMEsrd05VSXZmOGpZVXlMbWVXUEZxNStqV25SaHpSbUVyT2VBCk9UK0lMa0pFMUN3T2hPbEtjMlB0TjhPUzdFbVR1NmxkZHQ4OXM5Z1M5aXNmbm5JQmY2YkhNdGdqWWJrZEEzbEQKb0VLL282VS9LdkpydTN2L01IRXl6VUwwbjB4UHpHK3ZPVDRpRVZRV3A3M3o0d2gzalN4SENvQmJ4RU9hTk5mSgpoQjNFMUZhSTZMY3U4VHdWdnZ3WStHc3Z3NURXblJ5VXczL0REUXpNMGNQZkc2WUNmeWhjQkVJSUJ5ZEtwUTdYCi9NZ0p4MWV1QmRHdVFheHNaNHhvS0taZW4vQWhCbWZDTUVjQ0F3RUFBYU4vTUgwd0RnWURWUjBQQVFIL0JBUUQKQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQQpNQjBHQTFVZERnUVdCQlJMYUVVWXRRaGxMQnFCQUtJdTRrUDRwWWhRTlRBZkJnTlZIU01FR0RBV2dCU2g3bE54CmJXZ1pUSjZKRkUwdHhTdGdIS3hqd3pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQXBpWXFVaUMyNFVLM2YxUW8KMnp1YTlTKzVaeWZXOTgvcG9zWDVFZ0x2c25uYmJoaXRFeXptc1RQaG1yNTZ2WmNkVlVjb3B4NFc2M0luaDRUYQpHQlBUMjVwdGZIVEE1bTNURDIrb1dFQXlKMHhBbjR3M0VpdzRhYmY0aCs1Q0JlTm9ldXJlOXhMYlIzNnZZSG9aCnQ0aVk1Q3BraHhud3VLV0FZTnE4a2lsQTlvUzV2bm5ndUMxYVJEckQ5bTJLZlk1aWtiRndGWWUzRzRLTXAyaUgKWVpiMUxhZ3BlZHRjbTJSNnhNZ0RVSktKbkN5WFpIcXp1WHMzT1h1TTFRVzZlMVl2VU1aQUdMV25NYkJ2S3MzNQpyMUdsdFY5OUh0WHBoTnBqeFd6a1RNS0s2K0wwQ0xxNXducVZjVzNUK1Y1V05HbkhWMThBMkhEM0NUc3NRWmxBCm5pbGVXdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
- client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBbmlDMjk0TFpVcUJZK2tPMHN4cnZDTGlFeHA4OE40U2pJZnIvU1JnWWxjRE16aWRNCjNMaGk2NTYzNEI0WUxxdk9td0lPWVdvK1JVTkRkVjNRcjdBMVFpOS95TmhUSXVaNVk4V3JuNk5hZEdITkdZU3MKNTRBNVA0Z3VRa1RVTEE2RTZVcHpZKzAzdzVMc1NaTzdxVjEyM3oyejJCTDJLeCtlY2dGL3BzY3kyQ05odVIwRAplVU9nUXIranBUOHE4bXU3ZS84d2NUTE5RdlNmVEUvTWI2ODVQaUlSVkJhbnZmUGpDSGVOTEVjS2dGdkVRNW8wCjE4bUVIY1RVVm9qb3R5N3hQQlcrL0JqNGF5L0RrTmFkSEpURGY4TU5ETXpSdzk4YnBnSi9LRndFUWdnSEowcWwKRHRmOHlBbkhWNjRGMGE1QnJHeG5qR2dvcGw2ZjhDRUdaOEl3UndJREFRQUJBb0lCQUJ5cFkyQ3p0LzZSRCsrMAo3QUQyNWRMWDEwRkZSWjN1amI4d0JxdlNFVXE3bXFQWFhjZzRKNzM3aytxc3FjZHozc3diOEUxWis1V0VYcXJjCmFXSWU5MWhhMGJldTlrckNLY2lhNE1QYjBSNTlSN2JUWkovRmp4cmo3VGFYMFRsM0hFSkkrMmRtYlJBbkJtdEQKdXVVMUNzSG1KajRKR2RPeE5JQUhvNEt3WXBmb2NPME9acFhVZFlOQktwSUhseFhOWjJ4RkJiVzh6a1FRekZ6MAplWmQ3YzZNUmlaZFRPd0pqNWl4c0FWSytBTXVGRkFSamNmc1FBZktlS2J2YUdDTFBvQmFSak5US0Z6MEVhWmlZClNTM2NYMDRCTnk0NWNPVWlHK3RsSlgxeEhGT04weittMlArZWQ1dCtHSHB3UUg4ak1ZQzhkZlJCUVdSeGNCTjcKemd3NWp5RUNnWUVBeEM4REVPWm9QNXhOcFZwbXB6OVAvY2NiVE9XekdUTlZpZzFhcmdSMC90RWRRK0lKQlpDdgpqMzlWVk9FeDUrSlJHVmhmSG9NczlXZVZuSmJSZTFyN1QrWU1WOVFCOVYvbDVhdkJoc0Vhc1NpaThsVEpzT3dXCmJRTDQyMDd1QVhGemFoMHhPS253T2gwN3p0TTBFMXQwWFVrR1BFSzE2bldPbFc4K3IyOEEvSmNDZ1lFQXpsZEgKSlo5RTUxT0dmZU96Y2VQdWV0ZU9PYnNvWVU4eHhOaUg0MitKWVpKNFVqNVY2RGw4OHdEaS91NVNVOVBtUWM1dAozOFpncXdRRjRFWklTbk0rbElKZnluUmhmWU9YZXB4bEJnVFBVb2dUeGY1bm5jZjNOWGZrVVJxUWViZURqTEdjClBrbU1LbE9kK21jRnYxdGI4eXhzbVEwbjhFVWplVkdLT3JKVjc5RUNnWUJ1NC9QanRaanZlN1lYNVFWcE84eEgKTWlnb1N4MzAvS001S1ZzOFNhQ24rQ09HbjFsaUgrcGNQaWxKbFJEVWRZUkp3ejNnelZ5NFNoaXpManl5Y1RiawpickJEWkw3R3A3SVhKQUo3M09MdGlINnlZMkt0OG9TcWthZUFyeGl4RUNPZ3MyZURFK3VKcmNTRW43VXJ5K0gyCmFMUnhrM09vVjFLRS9TQjlvVXo1ZVFLQmdFZ1g4b0hRbmhCOC9IYXJ3aHkrMktvTyttQnRaZlJwNlNldngvck4KRTZFRnZnaHVRekc2TkUvck5XU0EvRDdSd0plcGVuWS9KN05ZMm55NzBiSkJoZEg1bzJKbk8xRFJVM0hCaHdLTgpWNnFzWk13KzBSRXR0cy8xcmM0d2k5NGJJbGxjRFEwdVFVemdua2ZKQ3hjSzRwdWFIKzl4eTB5RnU1azl4aUF3CkF4cWhBb0dBUlc3Qno1UjlSOWZKVUp3ZEhvMGpRVmhrL3lzSWlmQWRxQ3d6blpOcVM4cU9KMXpsSExhWkozMXcKbVdjNzA3UUN6Q3BOMk1YV2lnMzc2VVJpdXFtcEJTZW14bzFRendhQWJhK0Yvd1I1VzlncndzTmZ2RDR6TkVHbgp2dFllSS9taXlJOVFaay9PVkcrblRLL1ZIZExha3FOVFNKQUl6WSttZ2Y4SWphUTUrVW89Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== \ No newline at end of file
diff --git a/sdv/docker/sdvstate/example/state.yml b/sdv/docker/sdvstate/example/state.yml
index 1ca61e1..89dc548 100644
--- a/sdv/docker/sdvstate/example/state.yml
+++ b/sdv/docker/sdvstate/example/state.yml
@@ -15,3 +15,6 @@ PDF_FILE: example/intel-pod10.json
# Path to kube-config file
KUBE_CONFIG : example/kubepod10
+MASTER_ROLE_NAME : masters
+WORKER_ROLE_NAME : workers
+
diff --git a/sdv/docker/sdvstate/server b/sdv/docker/sdvstate/server
index ca37eca..ca37eca 100755..100644
--- a/sdv/docker/sdvstate/server
+++ b/sdv/docker/sdvstate/server
diff --git a/sdv/docker/sdvstate/settings/common.yml b/sdv/docker/sdvstate/settings/common.yml
index 65f131c..f25f861 100644
--- a/sdv/docker/sdvstate/settings/common.yml
+++ b/sdv/docker/sdvstate/settings/common.yml
@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
+project_name: cirv-sdv
+project_version: 1.0
##################################
# Program behavior configurations
@@ -24,4 +25,9 @@ log_verbosity: info
# Results
results_dir: /tmp/state/
-save_results_locally: True \ No newline at end of file
+save_results_locally: True
+
+# Test API
+enable_testapi: True
+testapi_url: http://testresults.opnfv.org/test/api/v1
+
diff --git a/sdv/docker/sdvstate/state b/sdv/docker/sdvstate/state
index 41d17a4..353df71 100755
--- a/sdv/docker/sdvstate/state
+++ b/sdv/docker/sdvstate/state
@@ -27,10 +27,12 @@ import re
import ast
import sys
from datetime import datetime
+import requests
from tools.conf import settings
from tools.result_api import result_api, Local
from core import load_pdf
+from core import display_report
from validator import AirshipValidator
@@ -229,6 +231,19 @@ def main():
if installer == 'airship':
airship = AirshipValidator()
airship.validate()
+ report = airship.get_report()
+
+
+ # Displaying Report
+ display_report(report)
+
+ if settings.getValue('enable_testapi'):
+ logger = logging.getLogger(__name__)
+ logger.info('Publishing results to TestAPI')
+ url = settings.getValue('testapi_url')
+ url += "/results/"
+ response = requests.post(url, json=report)
+ logger.info(response)
diff --git a/sdv/docker/sdvstate/validator/airship/__init__.py b/sdv/docker/sdvstate/validator/airship/__init__.py
new file mode 100644
index 0000000..78e42c4
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Package for Airship
+"""
+
+
+### Pod Health Checks
+from .pod_health_check import pod_health_check
+
+### Ceph Health Checks
+from .ceph_check import ceph_health_check
+
+### Monitoring & Logging Agents Checks
+from .monitoring_logging_agent_check import prometheus_check
+from .monitoring_logging_agent_check import grafana_check
+# from .monitoring_logging_agent_check import prometheus_alert_manager_check
+from .monitoring_logging_agent_check import elasticsearch_check
+from .monitoring_logging_agent_check import kibana_check
+from .monitoring_logging_agent_check import nagios_check
+from .monitoring_logging_agent_check import elasticsearch_exporter_check
+from .monitoring_logging_agent_check import fluentd_exporter_check
+
+### Network Checks
+from .network_check import physical_network_check
+
+### Compute Related Checks
+from .compute_check import reserved_vnf_cores_check
+from .compute_check import isolated_cores_check
+from .compute_check import vswitch_pmd_cores_check
+from .compute_check import vswitch_dpdk_lcores_check
+from .compute_check import os_reserved_cores_check
+from .compute_check import nova_scheduler_filters_check
+from .compute_check import cpu_allocation_ratio_check
+
+from .store_result import store_result
diff --git a/sdv/docker/sdvstate/validator/airship/airship.py b/sdv/docker/sdvstate/validator/airship/airship.py
index e77f06f..18de66d 100644
--- a/sdv/docker/sdvstate/validator/airship/airship.py
+++ b/sdv/docker/sdvstate/validator/airship/airship.py
@@ -18,16 +18,15 @@ Airship Validator
"""
import logging
-import ast
-import json
+from datetime import datetime as dt
from tools.conf import settings
-from tools.result_api import result_api, rfile
-from tools.kube_utils import *
+from tools.kube_utils import load_kube_api
from validator.validator import Validator
-## Checks
-from .pod_health_check import pod_health_check
+from . import *
+
+
@@ -42,10 +41,87 @@ class AirshipValidator(Validator):
super(AirshipValidator, self).__init__()
self._logger = logging.getLogger(__name__)
+ self._report = {"installer": "Airship",
+ "criteria": "pass",
+ "details": {"total_checks": 0,
+ "pass": [],
+ "fail": [],
+ "metadata": {}
+ }
+ }
+
load_kube_api()
-
+
def validate(self):
"""
+ Validation method
"""
- pod_health_check()
+
+ self._report['scenario'] = 'none'
+ self._report['case_name'] = 'ook_airship'
+ self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+
+ # PLATFORM CHECKS
+ self.update_report(pod_health_check())
+
+ # STORAGE CHECKS
+ self.update_report(ceph_health_check())
+
+ # MONITORING & LOGGING AGENTS CHECKS
+ self.update_report(prometheus_check())
+ self.update_report(grafana_check())
+ ## current version of AlertManager doesn't support this
+ # prometheus_alert_manager_check()
+ self.update_report(elasticsearch_check())
+ self.update_report(kibana_check())
+ self.update_report(nagios_check())
+ self.update_report(elasticsearch_exporter_check())
+ self.update_report(fluentd_exporter_check())
+
+ # NETWORK CHECKS
+ self.update_report(physical_network_check())
+
+ # COMPUTE CHECKS
+ self.update_report(reserved_vnf_cores_check())
+ self.update_report(isolated_cores_check())
+ self.update_report(vswitch_pmd_cores_check())
+ self.update_report(vswitch_dpdk_lcores_check())
+ self.update_report(os_reserved_cores_check())
+ self.update_report(nova_scheduler_filters_check())
+ self.update_report(cpu_allocation_ratio_check())
+
+ self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S')
+
+
+ def update_report(self, result):
+ """
+ Updates report with new results
+ """
+ case_name = result['case_name']
+ criteria = result['criteria']
+
+ self._report['details']['total_checks'] += 1
+ if criteria == 'pass':
+ self._report['details']['pass'].append(case_name)
+ elif criteria == 'fail':
+ self._report['details']['fail'].append(case_name)
+ self._report['criteria'] = 'fail'
+
+
+
+ def get_report(self):
+ """
+ Return final report as dict
+ """
+ self._report["project_name"] = settings.getValue("project_name")
+ self._report["version"] = settings.getValue("project_version")
+ self._report["build_tag"] = "none"
+
+ pdf = settings.getValue('pdf_file')
+ self._report["pod_name"] = pdf['management_info']['resource_pool_name']
+
+ store_result(self._report)
+
+ return self._report
diff --git a/sdv/docker/sdvstate/validator/airship/ceph_check.py b/sdv/docker/sdvstate/validator/airship/ceph_check.py
new file mode 100644
index 0000000..b33e876
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/ceph_check.py
@@ -0,0 +1,51 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Ceph Related Checks
+"""
+
+import ast
+
+from tools.kube_utils import get_pod_with_labels, kube_exec
+from .store_result import store_result
+
+
+
+
+def ceph_health_check():
+ """
+ Check health of Ceph
+ """
+ pod = get_pod_with_labels('application=ceph,component=mon')
+
+ cmd = ['ceph', 'health', '-f', 'json']
+ response = kube_exec(pod, cmd)
+
+ response = ast.literal_eval(response)
+
+ result = {'category': 'storage',
+ 'case_name': 'ceph_health_check',
+ 'details': []
+ }
+
+ if response['status'] == 'HEALTH_OK':
+ result['criteria'] = 'pass'
+ result['details'] = 'HEALTH_OK'
+ else:
+ result['criteria'] = 'fail'
+ result['details'] = response
+
+ store_result(result)
+ return result
diff --git a/sdv/docker/sdvstate/validator/airship/compute_check.py b/sdv/docker/sdvstate/validator/airship/compute_check.py
new file mode 100644
index 0000000..ff6f6db
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/compute_check.py
@@ -0,0 +1,646 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Compute Related Checks
+"""
+
+import configparser
+import json
+
+from tools.kube_utils import kube_exec, get_pod_with_labels
+from tools.conf import settings
+from .store_result import store_result
+
+
+###########
+# Checks
+###########
+
+def isolated_cores_check():
+ """
+ isolated_cores_check
+ """
+ traced_value = trace_isolated_cores()
+ required_value = required_isolated_cores()
+
+ result = {'category': 'compute',
+ 'case_name': 'isolated_cores_check',
+ 'details': {'traced_cores': traced_value,
+ 'required_cores': required_value
+ }
+ }
+
+ if is_ranges_equals(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+
+ store_result(result)
+ return result
+
+
+
+def reserved_vnf_cores_check():
+ """
+ reserved_vnf_cores_check
+ """
+ traced_value = trace_reserved_vnf_cores()
+ required_value = required_reserved_vnf_cores()
+
+ result = {'category': 'compute',
+ 'case_name': 'reserved_vnf_cores_check',
+ 'details': {'traced_cores': traced_value,
+ 'required_cores': required_value
+ }
+ }
+
+ if is_ranges_equals(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+
+ store_result(result)
+ return result
+
+
+
+def vswitch_pmd_cores_check():
+ """
+ vswitch_pmd_cores_check
+ """
+ traced_value = trace_vswitch_pmd_cores()
+ required_value = required_vswitch_pmd_cores()
+
+ result = {'category': 'compute',
+ 'case_name': 'vswitch_pmd_cores_check',
+ 'details': {'traced_cores': traced_value,
+ 'required_cores': required_value
+ }
+ }
+
+ if is_ranges_equals(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+
+ store_result(result)
+ return result
+
+
+
+def vswitch_dpdk_lcores_check():
+ """
+ vswitch_dpdk_lcores_check
+ """
+ traced_value = trace_vswitch_dpdk_lcores()
+ required_value = required_vswitch_dpdk_lcores()
+
+ result = {'category': 'compute',
+ 'case_name': 'vswitch_dpdk_lcores_check',
+ 'details': {'traced_cores': traced_value,
+ 'required_cores': required_value
+ }
+ }
+
+ if is_ranges_equals(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+
+ store_result(result)
+ return result
+
+
+
+def os_reserved_cores_check():
+ """
+ os_reserved_cores_check
+ """
+ traced_value = trace_os_reserved_cores()
+ required_value = required_os_reserved_cores()
+
+ result = {'category': 'compute',
+ 'case_name': 'os_reserved_cores_check',
+ 'details': {'traced_cores': traced_value,
+ 'required_cores': required_value
+ }
+ }
+
+ if is_ranges_equals(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+
+ store_result(result)
+ return result
+
+
+
+def nova_scheduler_filters_check():
+ """
+ nova_scheduler_filters_check
+ """
+ traced_value = trace_nova_scheduler_filters()
+ required_value = required_nova_scheduler_filters()
+
+ result = {'category': 'compute',
+ 'case_name': 'nova_scheduler_filters_check',
+ 'details': {'traced_filters': traced_value,
+ 'required_filters': required_value
+ }
+ }
+
+ if are_lists_equal(traced_value, required_value):
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+ store_result(result)
+ return result
+
+
+
+def cpu_allocation_ratio_check():
+ """
+ cpu_allocation_ratio_check
+ """
+ traced_value = trace_cpu_allocation_ratio()
+ required_value = required_cpu_allocation_ratio()
+
+ result = {'category': 'compute',
+ 'case_name': 'cpu_allocation_ratio_check',
+ 'details': {'traced_ratio': traced_value,
+ 'required_ratio': required_value
+ }
+ }
+
+ if traced_value == required_value:
+ result['criteria'] = 'pass'
+ else:
+ result['criteria'] = 'fail'
+
+ store_result(result)
+ return result
+
+
+
+
+
+
+
+
+###############
+# helper functions
+###############
+
+
+
+def trace_isolated_cores():
+ """
+ Trace isolated_cores from Airship deployment
+
+ :return: value traced from `isolcpus` key in `/proc/cmdline`
+ """
+ pod = get_pod_with_labels('application=nova,component=compute')
+
+ cmd = ['cat', '/proc/cmdline']
+ proc_cmd = kube_exec(pod, cmd)
+
+ for option in proc_cmd.split():
+ if 'isolcpus' in option:
+ _, isolcpus_value = split_key_value(option)
+ break
+
+ return isolcpus_value
+
+
+def required_isolated_cores():
+ """
+ Returns value of `isolated_cpus` from platform_profile used by
+ Role for worker nodes in PDF
+
+ :return: isolated_cores value expected by the PDF
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ profile = get_platform_profile_by_role(worker_role)
+ return profile['isolated_cpus']
+
+
+
+
+
+
+def trace_reserved_vnf_cores():
+ """
+ Trace vnf_reserved_cores from Airship deployment
+
+ :return: value traced from `vcpu_pin_set` key in nova.conf
+ of actual deployment
+ """
+ try:
+ config = get_nova_conf()
+ vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
+ except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+ vcpu_pin_set = ''
+
+ return vcpu_pin_set
+
+
+def required_reserved_vnf_cores():
+ """
+ Returns value of vnf_cores from platform_profile used by
+ Role for worker nodes in PDF
+
+ :return: vnf_reserverd_core value expected by the PDF
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ profile = get_platform_profile_by_role(worker_role)
+ return profile['vnf_cores']
+
+
+
+
+
+
+def trace_vswitch_pmd_cores():
+ """
+ Trace vswitch_pmd_cores from Airship deployment
+
+ :return: value traced from `other_config:pmd-cpu-mask` in
+ openvswitchdb using ovs-vsctl
+ """
+ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
+
+ cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
+ response = kube_exec(ovs_pod, cmd)
+
+ response.replace('=', ':')
+ config = json.loads(response)
+
+ if 'pmd-cpu-mask' in config:
+ pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
+ else:
+ pmd_cores = ''
+
+ return pmd_cores
+
+
+def required_vswitch_pmd_cores():
+ """
+ Returns value of vswitch_pmd_cores from platform_profile used by
+ Role for worker nodes in PDF
+
+ :return: vswitch_pmd_cores value expected by the PDF
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ profile = get_platform_profile_by_role(worker_role)
+ return profile['vswitch_pmd_cores']
+
+
+
+
+
+
+def trace_vswitch_dpdk_lcores():
+ """
+ Trace vswitch_dpdk_lcores from Airship deployment
+
+ :return: value traced from `other_config:dpdk-lcore-mask` in
+ openvswitchdb using ovs-vsctl
+ """
+ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
+
+ cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
+ response = kube_exec(ovs_pod, cmd)
+
+ response.replace('=', ':')
+ config = json.loads(response)
+
+ if 'dpdk-lcore-mask' in config:
+ pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
+ else:
+ pmd_cores = ''
+
+ return pmd_cores
+
+
+def required_vswitch_dpdk_lcores():
+ """
+ Returns value of vswitch_dpdk_lcores from platform_profile used by
+ Role for worker nodes in PDF
+
+ :return: vswitch_dpdk_lcores value expected by the PDF
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ profile = get_platform_profile_by_role(worker_role)
+ return profile['vswitch_dpdk_lcores']
+
+
+
+
+
+
+def trace_os_reserved_cores():
+ """
+ Trace os_reserved_cores from Airship deployment
+
+ os_reserved_cores = all_cores - (reserved_vnf_cores +
+ vswitch_pmd_cores +
+ vswitch_dpdk_lcores)
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ all_cores = get_cores_by_role(worker_role)
+
+ reserved_vnf_cores = trace_reserved_vnf_cores()
+ vswitch_pmd_cores = trace_vswitch_pmd_cores()
+ vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
+
+ non_os_cores = []
+ non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
+ non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
+ non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
+
+ os_reserved_cores = set(all_cores).difference(set(non_os_cores))
+
+ # return as string with comma separated value
+ return ','.join(map(str, list(os_reserved_cores)))
+
+
+def required_os_reserved_cores():
+ """
+ Returns value of os_reserved_cores from platform_profile used by
+ Role for worker nodes in PDF
+
+ :return: os_reserved_cores value expected by the PDF
+ """
+ worker_role = settings.getValue('WORKER_ROLE_NAME')
+ profile = get_platform_profile_by_role(worker_role)
+ return profile['os_reserved_cores']
+
+
+
+
+
+def trace_nova_scheduler_filters():
+ """
+ Trace scheduler_filters from Airship deployment
+
+ :return: value traced from `enabled_filters` key in nova.conf
+ of actual deployment
+ """
+ try:
+ config = get_nova_conf()
+ filters = config.get('filter_scheduler', 'enabled_filters')
+ except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+ filters = ''
+
+ filters = filters.split(',')
+ map(str.strip, filters)
+
+ return filters
+
+def required_nova_scheduler_filters():
+ """
+ Required nova scheduler_filters by the PDF
+ """
+ pdf = settings.getValue('pdf_file')
+ filters = pdf['vim_functional']['scheduler_filters']
+
+ filters = filters.split(',')
+ map(str.strip, filters)
+
+ return filters
+
+
+
+
+
+
+
+def trace_cpu_allocation_ratio():
+ """
+ Trace cpu_allocation_ratio from Airship deployment
+
+ :return: value traced from `cpu_allocation_ratio` key in nova.conf
+ of actual deployment
+ """
+ try:
+ config = get_nova_conf()
+ cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
+ except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
+ cpu_allocation_ratio = ''
+
+ return float(cpu_allocation_ratio)
+
+def required_cpu_allocation_ratio():
+ """
+ Required cpu_allocation_ratio by the PDF
+ """
+ pdf = settings.getValue('pdf_file')
+ cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
+
+ return float(cpu_allocation_ratio)
+
+
+
+
+
+
+
+def get_role(role_name):
+ """
+ Searches and returns role with `role_name`
+ """
+ roles = settings.getValue('pdf_file')['roles']
+
+ for role in roles:
+ if role['name'] == role_name:
+ role_details = role
+
+ return role_details
+
+
+def get_platform_profile(profile_name):
+ """
+ Searches and returns platform_profile with `profile_name`
+ """
+ platform_profiles = settings.getValue('pdf_file')['platform_profiles']
+
+ for profile in platform_profiles:
+ if profile['profile_name'] == profile_name:
+ profile_details = profile
+
+ return profile_details
+
+def get_processor_profile(profile_name):
+ """
+ Searches and returns processor_profile with `profile_name`
+ """
+ processor_profiles = settings.getValue('pdf_file')['processor_profiles']
+
+ for profile in processor_profiles:
+ if profile['profile_name'] == profile_name:
+ profile_details = profile
+
+ return profile_details
+
+def get_platform_profile_by_role(role_name):
+ """
+ Returns platform profile details of a role
+ """
+ role = get_role(role_name)
+ profile = get_platform_profile(role['platform_profile'])
+ return profile
+
+
+def get_hardware_profile_by_role(role_name):
+ """
+ Returns hardware profile details of a role
+ """
+ role = get_role(role_name)
+
+ hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
+
+ for profile in hardware_profiles:
+ if profile['profile_name'] == role['hardware_profile']:
+ profile_details = profile
+
+ return profile_details
+
+
+def get_cores_by_role(role_name):
+ """
+ Returns cpu cores list of server hardware used in the role
+ """
+ hardware_profile = get_hardware_profile_by_role(role_name)
+ processor_profile = hardware_profile['profile_info']['processor_profile']
+ profile = get_processor_profile(processor_profile)
+
+ cpus = []
+
+ for numa in profile['profile_info']['numas']:
+ cpus.extend(convert_range_to_list(numa['cpu_set']))
+
+ return cpus
+
+
+
+
+
+
+
+def get_nova_conf():
+ """
+ Returns parsed nova.conf
+ """
+ pod = get_pod_with_labels('application=nova,component=compute')
+
+ cmd = ['cat', '/etc/nova/nova.conf']
+ response = kube_exec(pod, cmd)
+
+ config = configparser.ConfigParser()
+ config.read_string(response)
+
+ return config
+
+
+### cpu cores related helper function
+
+def convert_range_to_list(x):
+ """
+ Returns list of numbers from given range as string
+
+ e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
+ """
+ # pylint: disable=C0103
+ result = []
+ for part in x.split(','):
+ if '-' in part:
+ a, b = part.split('-')
+ a, b = int(a), int(b)
+ result.extend(range(a, b + 1))
+ elif part != '':
+ a = int(part)
+ result.append(a)
+ # remove duplicates
+ result = list(dict.fromkeys(result))
+ return result
+
+
+def is_ranges_equals(range1, range2):
+ """
+ Checks whether two ranges passed as string are equal
+
+ e.g.: is_ranges_equals('2-5', '2-4,5') returns true
+ """
+ set1 = set(convert_range_to_list(range1))
+ set2 = set(convert_range_to_list(range2))
+ return set1 == set2
+
+def are_lists_equal(list1, list2):
+ """
+ Checks whether two list are identicals
+ """
+ set1 = set(list1)
+ set2 = set(list2)
+ return set1 == set2
+
+
+
+def hex_to_comma_list(hex_mask):
+ """
+ Converts CPU mask given in hex to list of cores
+ """
+ binary = bin(int(hex_mask, 16))[2:]
+ reversed_binary = binary[::-1]
+ i = 0
+ output = ""
+ for bit in reversed_binary:
+ if bit == '1':
+ output = output + str(i) + ','
+ i = i + 1
+ return output[:-1]
+
+
+def comma_list_to_hex(cpus):
+ """
+ Converts a list of cpu cores in corresponding hex value
+ of cpu-mask
+ """
+ cpu_arr = cpus.split(",")
+ binary_mask = 0
+ for cpu in cpu_arr:
+ binary_mask = binary_mask | (1 << int(cpu))
+ return format(binary_mask, '02x')
+
+
+
+def split_key_value(key_value_str, delimiter='='):
+ """
+ splits given string into key and value based on delimiter
+
+ :param key_value_str: example string `someKey=somevalue`
+ :param delimiter: default delimiter is `=`
+ :return: [ key, value]
+ """
+ key, value = key_value_str.split(delimiter)
+ key = key.strip()
+ value = value.strip()
+ return key, value
diff --git a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
new file mode 100644
index 0000000..3754299
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py
@@ -0,0 +1,243 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Monitoring & Logging Agents Related Checks
+"""
+
+import ast
+
+from tools.kube_utils import kube_curl
+from tools.result_api import rfile
+from .store_result import store_result
+
+
+def prometheus_check():
+ """
+ Check health of Prometheus
+ """
+ username = "prometheus"
+ password = "password123"
+ service = "prom-metrics"
+ namespace = "osh-infra"
+
+ health = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/healthy')
+ if 'Prometheus is Healthy' in res:
+ health = "pass"
+
+ readiness = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/ready')
+ if 'Prometheus is Ready' in res:
+ readiness = "pass"
+
+ if health == "pass" and readiness == "pass":
+ state = "pass"
+ else:
+ state = "fail"
+
+ result = {'category': 'platform',
+ 'case_name': 'prometheus_check',
+ 'criteria': state,
+ 'details': {'health': health, 'readiness': readiness}
+ }
+
+ store_result(result)
+ return result
+
+
+
+def grafana_check():
+ """
+ Check health of Grafana
+ """
+ username = "grafana"
+ password = "password123"
+ service = "grafana-dashboard"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
+ "-o", "/dev/null", "-u", \
+ f'{username}:{password}', \
+ f'{service}.{namespace}:3000/api/health')
+ if res == '200':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'grafana_check',
+ 'criteria': state
+ }
+
+ store_result(result)
+ return result
+
+
+def prometheus_alert_manager_check():
+ """
+ Check health of Alert Manager
+ """
+ service = "alerts-engine"
+ namespace = "osh-infra"
+
+ health = "fail" #default
+ res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/healthy')
+ if 'Prometheus is Healthy' in res:
+ health = "pass"
+
+ readiness = "fail" #default
+ res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/ready')
+ if 'Prometheus is Ready' in res:
+ readiness = "pass"
+
+ if health == "pass" and readiness == "pass":
+ state = "pass"
+ else:
+ state = "fail"
+
+ result = {'category': 'platform',
+ 'case_name': 'prometheus_alert_manager_check',
+ 'criteria': state,
+ 'details': {'health': health, 'readiness': readiness}
+ }
+
+
+ store_result(result)
+ return result
+
+
+def elasticsearch_check():
+ """
+ Check health of Elasticsearch cluster
+ """
+ username = "elasticsearch"
+ password = "password123"
+ service = "elasticsearch"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/_cluster/health')
+
+ if res == '':
+ res = 'Elasticsearch not reachable'
+ else:
+ res = ast.literal_eval(res)
+ if res['status'] == 'green':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'elasticsearch_check',
+ 'criteria': state,
+ 'details': res
+ }
+
+ store_result(result)
+ return result
+
+
+def kibana_check():
+ """
+ Check health of Kibana
+ """
+ username = "elasticsearch"
+ password = "password123"
+ service = "kibana-dash"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/api/status')
+
+ if res == '':
+ res = 'kibana not reachable'
+ else:
+ res = ast.literal_eval(res)
+ if res['status']['overall']['state'] == 'green':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'kibana_check',
+ 'criteria': state,
+ 'details': rfile(str(res))
+ }
+
+ store_result(result)
+ return result
+
+
+def nagios_check():
+ """
+ Check health of Nagios
+ """
+ username = "nagios"
+ password = "password123"
+ service = "nagios-metrics"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\
+ "-o", "/dev/null", "-u", \
+ f'{username}:{password}', \
+ f'{service}.{namespace}')
+ if res == '200':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'nagios_check',
+ 'criteria': state
+ }
+
+ store_result(result)
+ return result
+
+
+def elasticsearch_exporter_check():
+ """
+ Check health of Elasticsearch Exporter
+ """
+ service = "elasticsearch-exporter"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9108/metrics')
+ if res == '200':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'elasticsearch_exporter_check',
+ 'criteria': state
+ }
+
+ store_result(result)
+ return result
+
+
+def fluentd_exporter_check():
+ """
+ Check health of Fluentd Exporter
+ """
+ service = "fluentd-exporter"
+ namespace = "osh-infra"
+
+ state = "fail" #default
+ res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9309/metrics')
+ if res == '200':
+ state = "pass"
+
+ result = {'category': 'platform',
+ 'case_name': 'fluentd_exporter_check',
+ 'criteria': state
+ }
+
+ store_result(result)
+ return result
diff --git a/sdv/docker/sdvstate/validator/airship/network_check.py b/sdv/docker/sdvstate/validator/airship/network_check.py
new file mode 100644
index 0000000..bddf579
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/network_check.py
@@ -0,0 +1,114 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Network Related Checks
+"""
+
+
+import configparser
+
+from tools.conf import settings
+from tools.kube_utils import kube_exec, get_pod_with_labels
+
+from .store_result import store_result
+
+
+def physical_network_check():
+ """
+ physical_network_check
+ """
+ ml2_config = neutron_ml2_config()
+
+ physical_networks = settings.getValue('pdf_file')['physical_networks']
+
+ type_drivers = ml2_config.get('ml2', 'type_drivers').split(',')
+
+ flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',')
+
+ vlan_networks = []
+ network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',')
+ for network in network_vlan_ranges:
+ vlan_networks.append(network.split(':')[0])
+
+ result = {'category': 'network',
+ 'case_name': 'physical_network_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
+ for physnet in physical_networks:
+
+ res = {'network_name': physnet['name'],
+ 'type': physnet['type'],
+ 'criteria': 'fail'
+ }
+
+ if physnet['type'] in type_drivers:
+ if physnet['type'] == 'flat':
+ if physnet['name'] in flat_networks or '*' in flat_networks:
+ res['criteria'] = 'pass'
+ else:
+ res['details'] = 'physical network name not found'
+ if physnet['type'] == 'vlan':
+ if physnet['name'] in vlan_networks:
+ res['criteria'] = 'pass'
+ else:
+ res['details'] = 'physical network name not found'
+ else:
+ res['details'] = 'physical network type not found'
+
+ result['details'].append(res)
+ if res['criteria'] == 'fail':
+ result['criteria'] = 'fail'
+
+ store_result(result)
+ return result
+
+
+
+def neutron_ml2_config():
+ """
+ Returns parsed ml2 config from neutron
+ """
+ ovs = get_pod_with_labels("application=neutron,component=neutron-ovs-agent")
+ sriov = get_pod_with_labels("application=neutron,component=neutron-sriov-agent")
+
+ confs = get_neutron_ml2_conf_from_pod(ovs)
+ confs.extend(get_neutron_ml2_conf_from_pod(sriov))
+
+ config = configparser.ConfigParser()
+ for conf in confs:
+ config.read_string(conf)
+
+ return config
+
+
+
+
+def get_neutron_ml2_conf_from_pod(pod):
+ """
+ Reads ml2 config from neutron pod
+ """
+ cmd = ['ls', '/etc/neutron/plugins/ml2/']
+ response = kube_exec(pod, cmd)
+ files = response.rstrip("\n").split()
+
+ response = []
+ for filename in files:
+ cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename]
+ conf = kube_exec(pod, cmd)
+ response.append(conf)
+
+ return response
diff --git a/sdv/docker/sdvstate/validator/airship/pod_health_check.py b/sdv/docker/sdvstate/validator/airship/pod_health_check.py
index 34a6747..0093ffc 100644
--- a/sdv/docker/sdvstate/validator/airship/pod_health_check.py
+++ b/sdv/docker/sdvstate/validator/airship/pod_health_check.py
@@ -13,13 +13,19 @@
# limitations under the License.
+"""
+Pod Health Checks
+"""
+
+
import logging
-from kubernetes import client, config
from tools.kube_utils import kube_api
from tools.conf import settings
-from tools.result_api import result_api, rfile
+from tools.result_api import rfile
+
+from .store_result import store_result
@@ -29,13 +35,25 @@ def pod_health_check():
"""
api = kube_api()
namespace_list = settings.getValue('airship_namespace_list')
+
+ result = {'category': 'platform',
+ 'case_name': 'pod_health_check',
+ 'criteria': 'pass',
+ 'details': []
+ }
+
for namespace in namespace_list:
pod_list = api.list_namespaced_pod(namespace)
for pod in pod_list.items:
- result = pod_status(pod)
- if result['state'] == 'fail':
- result['logs'] = get_logs(pod)
- result_api.store(result)
+ pod_stats = pod_status(pod)
+ if pod_stats['criteria'] == 'fail':
+ pod_stats['logs'] = get_logs(pod)
+ result['criteria'] = 'fail'
+ result['details'].append(pod_stats)
+
+
+ store_result(result)
+ return result
@@ -43,14 +61,13 @@ def pod_status(pod):
"""
Check health of a pod and returns it's status as result
"""
- result = {'state': 'ok',
- 'kind': 'pod',
+ result = {'criteria': 'pass',
'name': pod.metadata.name,
'namespace': pod.metadata.namespace,
'node': pod.spec.node_name}
if pod.status.container_statuses is None:
- result['state'] = 'fail'
+ result['criteria'] = 'fail'
result['pod_details'] = rfile(str(pod))
else:
for container in pod.status.container_statuses:
@@ -62,14 +79,14 @@ def pod_status(pod):
status = container.state.waiting.reason
if status not in ('Running', 'Completed'):
- result['state'] = 'fail'
+ result['criteria'] = 'fail'
result['pod_details'] = rfile(str(pod))
- info = f'[Health: {result["state"]}] Name: {result["name"]}, '
+ info = f'[Health: {result["criteria"]}] Name: {result["name"]}, '
info = info + f'Namespace: {result["namespace"]}, Node: {result["node"]}'
logger = logging.getLogger(__name__)
- logger.info(info)
+ logger.debug(info)
return result
diff --git a/sdv/docker/sdvstate/validator/airship/store_result.py b/sdv/docker/sdvstate/validator/airship/store_result.py
new file mode 100644
index 0000000..52f4e10
--- /dev/null
+++ b/sdv/docker/sdvstate/validator/airship/store_result.py
@@ -0,0 +1,28 @@
+# Copyright 2020 University Of Delhi.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+store_result function to log and store result
+"""
+import logging
+from tools.result_api import result_api
+
+def store_result(result):
+ """
+ Logs and stores result
+ """
+ logger = logging.getLogger(__name__)
+ logger.info(f'[State: {result["criteria"]}] {result["case_name"]}')
+
+ result_api.store(result)