diff options
Diffstat (limited to 'sdv/docker')
35 files changed, 3046 insertions, 62 deletions
diff --git a/sdv/docker/sdvmodel/Dockerfile b/sdv/docker/sdvmodel/Dockerfile new file mode 100644 index 0000000..a71575e --- /dev/null +++ b/sdv/docker/sdvmodel/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.8-slim-buster + +MAINTAINER Parth Yadav <parthyadav3105@gmail.com> + +WORKDIR /server/ + +COPY resource-estimation/requirements.txt /server/requirements.txt +RUN pip install -r requirements.txt + +COPY website/ /website/ +COPY resource-estimation/ /server/ + +RUN rm requirements.txt + +CMD [ "python", "/server/server" ] diff --git a/sdv/docker/sdvmodel/resource-estimation/requirements.txt b/sdv/docker/sdvmodel/resource-estimation/requirements.txt new file mode 100644 index 0000000..8fd63d9 --- /dev/null +++ b/sdv/docker/sdvmodel/resource-estimation/requirements.txt @@ -0,0 +1,2 @@ +tornado == 6.0.4 +Jinja2 == 2.11.2 diff --git a/sdv/docker/sdvmodel/resource-estimation/server b/sdv/docker/sdvmodel/resource-estimation/server new file mode 100755 index 0000000..bae9781 --- /dev/null +++ b/sdv/docker/sdvmodel/resource-estimation/server @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Spirent Communications, University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Server +""" + + +import logging +import os +import sys +import copy +import json +from tornado.web import Application +from tornado.ioloop import IOLoop +import tornado.concurrent +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web +import tornado.log +import jinja2 + +# SOF: 11124247: Massey101 and Corey Klein +class StreamToLogger(): + """ + file-like stream object that redirects writes to a logger instance. + """ + def __init__(self, logger, log_level=logging.INFO): + self.logger = logger + self.log_level = log_level + self.linebuf = '' + + def write(self, buf): + """ + write logs + """ + temp_linebuf = self.linebuf + buf + self.linebuf = '' + for line in temp_linebuf.splitlines(True): + # From the io.TextIOWrapper docs: + # On output, if newline is None, any '\n' characters written + # are translated to the system default line separator. + # By default sys.stdout.write() expects '\n' newlines and then + # translates them so this is still cross platform. + if line[-1] == '\n': + self.logger.log(self.log_level, line.rstrip()) + else: + self.linebuf += line + + def flush(self): + """ + flush the buffer + """ + if self.linebuf != '': + self.logger.log(self.log_level, self.linebuf.rstrip()) + self.linebuf = '' + + + +class Server(): + """ + Server + """ + # pylint: disable=too-many-instance-attributes + + def __init__(self, hw_profile): + self.vcpus = hw_profile['vcpus'] + self.numas = hw_profile['numas'] + self.numa_vcpu_map = [] + self.sriov_support = False + self.hosted_vnfs = [] + for count in range(int(self.numas)): + self.numa_vcpu_map.append(hw_profile['numa' + +str(count) + +'_cpus_4vnfs']) + self.create_numa_sriov_map(hw_profile['nics']) + self.zone = 'default' + self.cpu_isolation = hw_profile['cpu_isol_set'] + self.available_cpu_map = self.numa_vcpu_map + + def create_numa_sriov_map(self, nics): + """ + Search for all sriov and nonsriov numas + """ + self.sriov_numas = [] + self.nonsriov_numas = [] + for nic in nics: + if nic['type'] == 'sriov': + self.sriov_support = True + if nic['numa'] not in self.sriov_numas: + self.sriov_numas.append(int(nic['numa'])) + else: + if nic['numa'] not in self.sriov_numas: + self.nonsriov_numas.append(int(nic['numa'])) + + def dump_profile(self): + """ + Print Server Profile + """ + print("The number of vCPUs: %s" %self.vcpus) + print("Number of NUMA nodes on this server: %s" %self.numas) + print("vCPUs available for the application in each NUMA: %s" %self.numa_vcpu_map) + print("SRIOV Support? %s" %self.sriov_support) + print("The Zone this server belongs to: %s" %self.zone) + print("vCPUs Isolated: %s" %self.cpu_isolation) + print("Numa to which SRIOV Nics belogs to: %s" %str(self.sriov_numas)) + +class Deployment(): + """ + Model deployment + """ + def __init__(self, rack_count, hw_profile): + self.server_list = [] + self.total_servers = 0 + self.hw_profile = hw_profile + self.rack_count = rack_count + self.server_zones = {} + + def create_deployment(self, vnf_profiles): + """ + Understand zones. + """ + zones = [] + for vnf in vnf_profiles: + if vnf['availability_zone'] not in zones: + zones.append(vnf['availability_zone']) + # print(zones) + for zone in zones: + for vnf in vnf_profiles: + if zone == vnf['availability_zone']: + for count in range(int(vnf['num_of_vnfs'])): + self.deploy(vnf, count) + self.server_zones[zone] = copy.deepcopy(self.server_list) + self.total_servers += len(self.server_list) + self.server_list.clear() + + def deploy(self, vnf, suffix): + """ + Understand deployment + """ + # pylint: disable=too-many-branches + + deploy = False + # If no servers, just do the deployment there and apped it. + if len(self.server_list) == 0: + server = Server(self.hw_profile) + for cnt in range(len(server.available_cpu_map)): + if int(server.available_cpu_map[cnt]) >= int(vnf['vcpus']): + if not ((vnf['sriov_support'] == 'yes' and cnt not in server.sriov_numas) or\ + (vnf['sriov_support'] == 'no' and cnt not in server.nonsriov_numas)): + server.available_cpu_map[cnt] = str( + int(server.available_cpu_map[cnt]) - int(vnf['vcpus'])) + deploy = True + server.hosted_vnfs.append({'vnf':vnf['profile_name'] +\ + str(suffix), 'numa': cnt}) + self.server_list.append(server) + return + if not deploy: + print("The existing hardware profile is not Suitable") + sys.exit() + # Servers already exist. Check if any eserver can accommodate the vnf: + for server in self.server_list: + # Check if SRIOV support is required for VNF and server supports + # Check if cpus are available in any of the numas + for cnt in range(len(server.available_cpu_map)): + if int(server.available_cpu_map[cnt]) >= int(vnf['vcpus']): + if not ((vnf['sriov_support'] == 'yes' and cnt not in server.sriov_numas) or\ + (vnf['sriov_support'] == 'no' and cnt not in server.nonsriov_numas)): + server.available_cpu_map[cnt] = str(int(server.available_cpu_map[cnt]) + - int(vnf['vcpus'])) + deploy = True + server.hosted_vnfs.append({'vnf':vnf['profile_name'] +\ + str(suffix), 'numa': cnt}) + return + # We need to create new server, do deployment there and append it the list + if not deploy: + server = Server(self.hw_profile) + for cnt in range(len(server.available_cpu_map)): + if int(server.available_cpu_map[cnt]) >= int(vnf['vcpus']): + if not ((vnf['sriov_support'] == 'yes' and cnt not in server.sriov_numas) or\ + (vnf['sriov_support'] == 'no' and cnt not in server.nonsriov_numas)): + server.available_cpu_map[cnt] = str( + int(server.available_cpu_map[cnt]) - int(vnf['vcpus'])) + deploy = True + server.hosted_vnfs.append({'vnf':vnf['profile_name'] +\ + str(suffix), 'numa': cnt}) + self.server_list.append(server) + return + if not deploy: + print("The existing hardware profile is not Suitable") + sys.exit() + + def display_deployment(self): + """ + Print Deployment Report + """ + print("Number of servers used %d" % self.total_servers) + print("------------------------------------------------") + count = 0 + for zone, server_list in self.server_zones.items(): + print("SERVERS IN AVAILABILITY ZONE: %s" %(zone)) + print("------------------------------------------------") + for server in server_list: + print("Server ID: " + str(count)) + for vnf in server.hosted_vnfs: + print("VNF: " + vnf['vnf'] + " NUMA: " + str(vnf['numa'])) + count = count + 1 + print("------------------------------------------------") + + def get_deployment(self): + """ + Returns servers and zones + """ + return self.total_servers, self.server_zones + + +# pylint: disable=W0223 + +class Estimate(tornado.web.RequestHandler): + """ + Resource estimator + """ + # def set_default_headers(self): + # self.set_header('Content-Type', 'application/json') + + def post(self): + """ + Server Resource Modelling Report + """ + model = {} + config = self.get_argument('config', None) + data = json.loads(config) + + vnf_profiles = (data['vnf_profiles']) + hw_profile = (data['hardware_profile']) + model['vnf_profiles'] = vnf_profiles + print("--------- Resource Modelling Report ------------") + print("------------------------------------------------") + print("The VNFs:") + for profile in vnf_profiles: + print(profile['profile_name']) + print("------------------------------------------------") + print("The Compute-Server Profile:") + server = Server(hw_profile) + server.dump_profile() + model['server'] = hw_profile + print("------------------------------------------------") + deployment = Deployment(2, hw_profile) + deployment.create_deployment(vnf_profiles) + deployment.display_deployment() + count, placement = deployment.get_deployment() + model['deployment_count'] = count + model['deployment'] = placement + loader = jinja2.FileSystemLoader(searchpath="template/") + jenv = jinja2.Environment(loader=loader) + template = jenv.get_template('report.html') + htmlout = template.render(model=model) + + self.finish(htmlout) + + +class HomeHandler(tornado.web.RequestHandler): + """ + Handler for '/' endpoint + """ + def get(self): + """ + Server Home Page + """ + self.render('/website/index.html') + + + +def server_main_block(): + """ + Main Function + """ + + app = Application([('/validate', Estimate), + ('/', HomeHandler), + ('/(.*)', tornado.web.StaticFileHandler, {'path' : '/website'})]) + + # Cli Config + tornado.options.define("port", default=80, help="run on the given port", type=int) + tornado.options.parse_command_line() + + + # Server Config + http_server = tornado.httpserver.HTTPServer(app) + http_server.listen(tornado.options.options.port) + + est_file = "/tmp/estimate.txt" + if os.path.exists(est_file): + os.remove(est_file) + + # Logging + logging.basicConfig( + level=logging.DEBUG, + format='%(message)s', + filename=est_file, + filemode='a' + ) + + stdout_logger = logging.getLogger('STDOUT') + sys.stdout = StreamToLogger(stdout_logger, logging.INFO) + + stderr_logger = logging.getLogger('STDERR') + sys.stderr = StreamToLogger(stderr_logger, logging.ERROR) + + tornado.log.enable_pretty_logging() + + # Tornado's event loop handles it from here + print("# Servering.... \n [Ctrl + C] to quit") + + try: + tornado.ioloop.IOLoop.instance().start() + except KeyboardInterrupt: + tornado.ioloop.IOLoop.instance().stop() + + # start + IOLoop.instance().start() + + +if __name__ == "__main__": + server_main_block() diff --git a/sdv/docker/sdvmodel/resource-estimation/template/report.html b/sdv/docker/sdvmodel/resource-estimation/template/report.html new file mode 100644 index 0000000..b53ea9f --- /dev/null +++ b/sdv/docker/sdvmodel/resource-estimation/template/report.html @@ -0,0 +1,75 @@ +<!DOCTYPE html> +<html> +<head> + <title>Resource Modelling Report</title> + + <meta charset="UTF-8"> + <meta content="width=device-width, initial-scale=1" name="viewport"> + + <link rel="stylesheet" type="text/css" href="style/report.css"> + <link rel="stylesheet" type="text/css" href="style/index.css"> + <link href="https://fonts.googleapis.com/css2?family=Ubuntu&display=swap" rel="stylesheet"> +</head> + +<body> + <div class="report"> + + <h1>Resource Modelling Report</h1> + <br> + + <h3>The VNFs Considered for Modelling:</h3> + + <div class="holder"> + {% for profile in model['vnf_profiles'] %} + <div class="vnf"> + {{ profile['profile_name'] }} + <img src="/assets/vnf.svg"> + </div> + {% endfor %} + </div> + <hr> + + <h3>The Compute-Node Server Profile:</h3> + + The number of vCPUs: {{ model['server']['vcpus'] }} + <br>Number of NUMA nodes on this server: {{ model['server']['numas']}} + <br>vCPUs available for the application in each NUMA: + <br>SRIOV Support?: {{model['sriov_support']}} + <br>vCPUs Isolated: {{ model['server']['cpu_isol_set'] }} + <br>Number of Servers Used: {{ model['deployment_count'] }} + <br> + + <hr> + + {% for zone, server_list in model['deployment'].items() %} + <h3>Servers in Availability zone: {{ zone }}</h3> + + {% for server in server_list %} + <div class="tab">Server ID: {{ loop.index }}</div> + <div class="holder server"> + {% for vnf in server.hosted_vnfs %} + <div class="vnf"> + {{ vnf['vnf'] }} (numa:{{ vnf['numa'] }}) + <img src="/assets/vnf.svg"> + </div> + {% endfor %} + </div> + <br> + {% endfor %} + + <hr> + {% endfor %} + + </div> + +<br> +<button onclick="window.location.href='/'">Go to Home</button> + + + + + + +</body> +</html> + diff --git a/sdv/docker/sdvmodel/website/actions.js b/sdv/docker/sdvmodel/website/actions.js new file mode 100644 index 0000000..aeb0e91 --- /dev/null +++ b/sdv/docker/sdvmodel/website/actions.js @@ -0,0 +1,39 @@ +/* Copyright 2020 University Of Delhi. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + + +// expand-arrow button +function toggleClass(element, classname){ + element.classList.toggle(classname) +} + +// Add button +function duplicate(button){ + if (button.previousElementSibling.hasAttribute('name') && + button.previousElementSibling.getAttribute('name') != null) + { + newdiv = button.previousElementSibling.cloneNode(true); + if (!newdiv.lastElementChild.classList.contains('del-button')){ + del ='<div class="del-button" onclick="remove(this)"></div>' + newdiv.innerHTML += del; + } + button.parentNode.insertBefore(newdiv, button) + } +} + +// Delete Button +function remove(button){ + button.parentNode.parentNode.removeChild(button.parentNode); +}
\ No newline at end of file diff --git a/sdv/docker/sdvmodel/website/assets/plus-circle-solid.svg b/sdv/docker/sdvmodel/website/assets/plus-circle-solid.svg new file mode 100644 index 0000000..39a0f8e --- /dev/null +++ b/sdv/docker/sdvmodel/website/assets/plus-circle-solid.svg @@ -0,0 +1,59 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + inkscape:version="1.0 (6e3e5246a0, 2020-05-07)" + sodipodi:docname="plus-circle-solid.svg" + id="svg4" + version="1.1" + viewBox="0 0 512 512" + role="img" + class="svg-inline--fa fa-plus-circle fa-w-16" + data-icon="plus-circle" + data-prefix="fas" + focusable="false" + aria-hidden="true"> + <metadata + id="metadata10"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs8" /> + <sodipodi:namedview + inkscape:current-layer="svg4" + inkscape:window-maximized="1" + inkscape:window-y="27" + inkscape:window-x="0" + inkscape:cy="256" + inkscape:cx="114.5679" + inkscape:zoom="1.265625" + showgrid="false" + id="namedview6" + inkscape:window-height="794" + inkscape:window-width="1600" + inkscape:pageshadow="2" + inkscape:pageopacity="0" + guidetolerance="10" + gridtolerance="10" + objecttolerance="10" + borderopacity="1" + bordercolor="#666666" + pagecolor="#ffffff" /> + <path + style="fill:#61b9ff;fill-opacity:1" + id="path2" + d="M256 8C119 8 8 119 8 256s111 248 248 248 248-111 248-248S393 8 256 8zm144 276c0 6.6-5.4 12-12 12h-92v92c0 6.6-5.4 12-12 12h-56c-6.6 0-12-5.4-12-12v-92h-92c-6.6 0-12-5.4-12-12v-56c0-6.6 5.4-12 12-12h92v-92c0-6.6 5.4-12 12-12h56c6.6 0 12 5.4 12 12v92h92c6.6 0 12 5.4 12 12v56z" + fill="currentColor" /> +</svg> diff --git a/sdv/docker/sdvmodel/website/assets/server.svg b/sdv/docker/sdvmodel/website/assets/server.svg new file mode 100644 index 0000000..547cdaf --- /dev/null +++ b/sdv/docker/sdvmodel/website/assets/server.svg @@ -0,0 +1,123 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + height="36.512501mm" + width="65.167557mm" + inkscape:version="1.0 (1.0+r73+1)" + sodipodi:docname="server.svg" + id="svg4" + version="1.1" + viewBox="0 0 246.30241 137.99999" + role="img" + class="svg-inline--fa fa-server fa-w-16" + data-icon="server" + data-prefix="fas" + focusable="false" + aria-hidden="true"> + <metadata + id="metadata10"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs8" /> + <sodipodi:namedview + units="mm" + fit-margin-bottom="0" + fit-margin-right="0" + fit-margin-left="0" + fit-margin-top="0" + inkscape:current-layer="svg4" + inkscape:window-maximized="1" + inkscape:window-y="27" + inkscape:window-x="0" + inkscape:cy="80.582597" + inkscape:cx="196.50384" + inkscape:zoom="1.3340661" + showgrid="false" + id="namedview6" + inkscape:window-height="658" + inkscape:window-width="1366" + inkscape:pageshadow="2" + inkscape:pageopacity="0" + guidetolerance="10" + gridtolerance="10" + objecttolerance="10" + borderopacity="1" + bordercolor="#666666" + pagecolor="#ffffff" + inkscape:document-rotation="0" /> + <path + d="m 9.563242,4 h 7.490814 c 0.295503,0 0.635382,0.327 0.635382,18 v 92 c 0,17.673 -0.339879,18 -0.635382,18 H 9.563242 C 9.267739,132 8.927861,131.673 8.927861,114 V 22 c 0,-17.673 0.339878,-18 0.635381,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428" + sodipodi:nodetypes="sssssssss" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-3" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 29.563242,4 h 7.490814 c 0.295503,0 0.635382,0.327 0.635382,18 v 92 c 0,17.673 -0.339879,18 -0.635382,18 h -7.490814 c -0.295502,0 -0.635381,-0.327 -0.635381,-18 V 22 c 0,-17.673 0.339879,-18 0.635381,-18 z" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-5" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 49.563242,4 h 7.490814 c 0.295503,0 0.635382,0.327 0.635382,18 v 92 c 0,17.673 -0.339879,18 -0.635382,18 h -7.490814 c -0.295503,0 -0.635381,-0.327 -0.635381,-18 V 22 c 0,-17.673 0.339878,-18 0.635381,-18 z" /> + <path + d="m 69.563242,4 h 7.490814 c 0.295503,0 0.635382,0.327 0.635382,18 v 92 c 0,17.673 -0.339879,18 -0.635382,18 h -7.490814 c -0.295502,0 -0.635381,-0.327 -0.635381,-18 V 22 c 0,-17.673 0.339879,-18 0.635381,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428-3-6" + sodipodi:nodetypes="sssssssss" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-2" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 89.563242,4 h 7.49081 c 0.29551,0 0.63539,0.327 0.63539,18 v 92 c 0,17.673 -0.33988,18 -0.63539,18 h -7.49081 c -0.2955,0 -0.63538,-0.327 -0.63538,-18 V 22 c 0,-17.673 0.33988,-18 0.63538,-18 z" /> + <path + d="m 109.56324,4 h 7.49081 c 0.29551,0 0.63539,0.327 0.63539,18 v 92 c 0,17.673 -0.33988,18 -0.63539,18 h -7.49081 c -0.2955,0 -0.63538,-0.327 -0.63538,-18 V 22 c 0,-17.673 0.33988,-18 0.63538,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428-3-9" + sodipodi:nodetypes="sssssssss" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-6" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 129.56324,4 h 7.49081 c 0.2955,0 0.63538,0.327 0.63538,18 v 92 c 0,17.673 -0.33988,18 -0.63538,18 h -7.49081 c -0.2955,0 -0.63538,-0.327 -0.63538,-18 V 22 c 0,-17.673 0.33988,-18 0.63538,-18 z" /> + <path + d="m 149.56324,4 h 7.49081 c 0.2955,0 0.63538,0.327 0.63538,18 v 92 c 0,17.673 -0.33988,18 -0.63538,18 h -7.49081 c -0.2955,0 -0.63539,-0.327 -0.63539,-18 V 22 c 0,-17.673 0.33988,-18 0.63539,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428-3-0" + sodipodi:nodetypes="sssssssss" /> + <path + d="m 169.56324,4 h 7.49081 c 0.2955,0 0.63538,0.327 0.63538,18 v 92 c 0,17.673 -0.33988,18 -0.63538,18 h -7.49081 c -0.29551,0 -0.63539,-0.327 -0.63539,-18 V 22 c 0,-17.673 0.33988,-18 0.63539,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428-5-6" + sodipodi:nodetypes="sssssssss" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-3-6-2" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 189.56324,4 h 7.49081 c 0.2955,0 0.63538,0.327 0.63538,18 v 92 c 0,17.673 -0.33988,18 -0.63538,18 h -7.49081 c -0.29551,0 -0.63539,-0.327 -0.63539,-18 V 22 c 0,-17.673 0.33988,-18 0.63539,-18 z" /> + <path + d="m 209.56324,4 h 7.49081 c 0.29551,0 0.63539,0.327 0.63539,18 v 92 c 0,17.673 -0.33988,18 -0.63539,18 h -7.49081 c -0.2955,0 -0.63538,-0.327 -0.63538,-18 V 22 c 0,-17.673 0.33988,-18 0.63538,-18 z" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + id="path2428-2-6" + sodipodi:nodetypes="sssssssss" /> + <path + sodipodi:nodetypes="sssssssss" + id="path2428-3-9-1" + style="opacity:0.13180452;fill:#787878;fill-opacity:1;stroke-width:0.135569" + d="m 229.56323,4 h 7.49081 c 0.29551,0 0.63539,0.327 0.63539,18 v 92 c 0,17.673 -0.33988,18 -0.63539,18 h -7.49081 c -0.2955,0 -0.63538,-0.327 -0.63538,-18 V 22 c 0,-17.673 0.33988,-18 0.63538,-18 z" /> +</svg> diff --git a/sdv/docker/sdvmodel/website/assets/trash-alt-regular.svg b/sdv/docker/sdvmodel/website/assets/trash-alt-regular.svg new file mode 100644 index 0000000..fbce77b --- /dev/null +++ b/sdv/docker/sdvmodel/website/assets/trash-alt-regular.svg @@ -0,0 +1,59 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + inkscape:version="1.0 (6e3e5246a0, 2020-05-07)" + sodipodi:docname="trash-alt-regular.svg" + id="svg4" + version="1.1" + viewBox="0 0 448 512" + role="img" + class="svg-inline--fa fa-trash-alt fa-w-14" + data-icon="trash-alt" + data-prefix="far" + focusable="false" + aria-hidden="true"> + <metadata + id="metadata10"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs8" /> + <sodipodi:namedview + inkscape:current-layer="svg4" + inkscape:window-maximized="1" + inkscape:window-y="27" + inkscape:window-x="0" + inkscape:cy="192.79012" + inkscape:cx="224" + inkscape:zoom="1.265625" + showgrid="false" + id="namedview6" + inkscape:window-height="794" + inkscape:window-width="1600" + inkscape:pageshadow="2" + inkscape:pageopacity="0" + guidetolerance="10" + gridtolerance="10" + objecttolerance="10" + borderopacity="1" + bordercolor="#666666" + pagecolor="#ffffff" /> + <path + style="fill:#ff6161;fill-opacity:1" + id="path2" + d="M268 416h24a12 12 0 0 0 12-12V188a12 12 0 0 0-12-12h-24a12 12 0 0 0-12 12v216a12 12 0 0 0 12 12zM432 80h-82.41l-34-56.7A48 48 0 0 0 274.41 0H173.59a48 48 0 0 0-41.16 23.3L98.41 80H16A16 16 0 0 0 0 96v16a16 16 0 0 0 16 16h16v336a48 48 0 0 0 48 48h288a48 48 0 0 0 48-48V128h16a16 16 0 0 0 16-16V96a16 16 0 0 0-16-16zM171.84 50.91A6 6 0 0 1 177 48h94a6 6 0 0 1 5.15 2.91L293.61 80H154.39zM368 464H80V128h288zm-212-48h24a12 12 0 0 0 12-12V188a12 12 0 0 0-12-12h-24a12 12 0 0 0-12 12v216a12 12 0 0 0 12 12z" + fill="currentColor" /> +</svg> diff --git a/sdv/docker/sdvmodel/website/assets/vnf.svg b/sdv/docker/sdvmodel/website/assets/vnf.svg new file mode 100644 index 0000000..7bd67e4 --- /dev/null +++ b/sdv/docker/sdvmodel/website/assets/vnf.svg @@ -0,0 +1,83 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + height="113.15604" + width="140.09048" + inkscape:version="1.0 (b51213c273, 2020-08-10)" + sodipodi:docname="vnf.svg" + id="svg4" + version="1.1" + viewBox="0 0 140.09048 113.15604" + role="img" + class="svg-inline--fa fa-server fa-w-16" + data-icon="server" + data-prefix="fas" + focusable="false" + aria-hidden="true"> + <metadata + id="metadata10"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + <dc:title /> + </cc:Work> + </rdf:RDF> + </metadata> + <defs + id="defs8" /> + <sodipodi:namedview + fit-margin-bottom="0" + fit-margin-right="0" + fit-margin-left="0" + fit-margin-top="0" + inkscape:current-layer="svg4" + inkscape:window-maximized="1" + inkscape:window-y="27" + inkscape:window-x="0" + inkscape:cy="165.58202" + inkscape:cx="181.58465" + inkscape:zoom="1.7552979" + showgrid="false" + id="namedview6" + inkscape:window-height="794" + inkscape:window-width="1600" + inkscape:pageshadow="2" + inkscape:pageopacity="0" + guidetolerance="10" + gridtolerance="10" + objecttolerance="10" + borderopacity="1" + bordercolor="#666666" + pagecolor="#ffffff" + inkscape:document-rotation="0" /> + <rect + style="opacity:1;fill:none;stroke:none" + id="rect887" + width="140.09048" + height="113.15604" + x="0" + y="0" /> + <ellipse + ry="22.535322" + rx="24.521544" + cy="56.892029" + cx="43.126831" + id="path832-6-7" + style="opacity:1;fill:#3dbbf5;stroke-width:1.19657;fill-opacity:1" /> + <ellipse + style="opacity:1;fill:#3dbbf5;stroke-width:1.19657;fill-opacity:1" + id="path832-6-7-5" + cx="97.109772" + cy="56.811798" + rx="24.521544" + ry="22.535322" /> +</svg> diff --git a/sdv/docker/sdvmodel/website/controller.js b/sdv/docker/sdvmodel/website/controller.js new file mode 100644 index 0000000..5b71352 --- /dev/null +++ b/sdv/docker/sdvmodel/website/controller.js @@ -0,0 +1,34 @@ +/* Copyright 2020 University Of Delhi. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +function getModel(){ + + config = {} + + // Get current values from form + for(category of document.getElementsByClassName('resmodData')) + config = mergeDeep(config, objectifyDiv(category)); + + requestModel(config); +} + +function requestModel(config){ + + form = document.getElementById('validate'); + form.elements['config'].value = JSON.stringify(config); + form.submit(); +} + + diff --git a/sdv/docker/sdvmodel/website/index.html b/sdv/docker/sdvmodel/website/index.html new file mode 100644 index 0000000..b08481a --- /dev/null +++ b/sdv/docker/sdvmodel/website/index.html @@ -0,0 +1,124 @@ +<!DOCTYPE html> +<html> +<head> + <title>Resource Modelling</title> + + <meta content="text/html;charset=utf-8" http-equiv="Content-Type"> + <meta content="utf-8" http-equiv="encoding"> + + <link rel="stylesheet" href="style/index.css"> + <link rel="stylesheet" href="style/array.css"> + + <script src="mergeDeep.js"></script> + <script src="readFromHTML.js"></script> + <script src="actions.js"></script> + <script src="controller.js"></script> + + + + <link href="https://fonts.googleapis.com/css2?family=Ubuntu&display=swap" rel="stylesheet"> +</head> + + + +<body> + +<div id="site"><form> + + + <fieldset class='resmodData'> + <legend class="collapse-arrow" onclick="toggleClass(this,'expand-arrow')"> + VNFs + </legend> + <div> + + <label>VNFs:</label><br> + <div class="arr" name="vnf_profiles"> + + <label>VNF Name:</label> + <input type="text" name="profile_name" value="userplane_app"><br> + <label>VCPUs:</label> + <input type="text" name="vcpus" value="36"><br> + <label>NUMAs:</label> + <input type="text" name="numas" value="1"><br> + <label>CPUs in NUMA0:</label> + <input type="text" name="cpus_in_numa0" value="36"><br> + <label>CPUs in NUMA1:</label> + <input type="text" name="cpus_in_numa1" value="0"><br> + <label>RAM Size</label> + <input type="text" name="ram_size" value="64"><br> + <label>Interface Count</label> + <input type="text" name="interfaces" value="6"><br> + <label>SRIOV Support</label> + <input type="text" name="sriov_support" value="yes"><br> + <label>VIRTIO Support</label> + <input type="text" name="virtio_support" value="yes"><br> + <label>Availability Zone</label> + <input type="text" name="availability_zone" value="dataplane_zone"><br> + <label>CPU Policy</label> + <input type="text" name="cpu_policy" value="sw==decicated"><br> + <label>CPU Pinned</label> + <input type="text" name="cpu_pinned" value="yes"><br> + <label>Number of VNFs</label> + <input type="text" name="num_of_vnfs" value="9"><br> + + </div><div class="add-button" onclick="duplicate(this)"></div> + + </div> + </fieldset> + + + <fieldset class='resmodData'> + <legend class="collapse-arrow" onclick="toggleClass(this,'expand-arrow')"> + Compute Hardware + </legend> + <div> + <label>Compute Node Hardware Info</label><br> + <div name="hardware_profile"> + <label> Total CPUs:</label> + <input type="text" name="vcpus" value="80"><br> + + <label> NUMAs:</label> + <input type="text" name="numas" value="2"><br> + + <label> #of Numa0 CPUS for VNFs:</label> + <input type="text" name="numa0_cpus_4vnfs" value="36"><br> + + <label> #of Numa1 CPUS for VNFs:</label> + <input type="text" name="numa1_cpus_4vnfs" value="36"><br> + + <label>RAM Size:</label> + <input type="text" name="ram_size" value="384 "><br> + <label>CPU Isolation Set</label> + <input type="text" name="cpu_isol_set" value="0-44"><br> + + <label>NICs:</label><br> + <div class="arr" name="nics"> + <div> + <label>NIC Name:</label> + <input type="text" name="name" value="ens785f0"><br> + <label>NIC Type:</label> + <input type="text" name="type" value="sriov"> + <label>NIC Speed:</label> + <input type="text" name="speed" value="25"><br> + <label>NIC NUMA:</label> + <input type="text" name="numa" value="0"><br> + </div> + </div> + <div class="add-button" onclick="duplicate(this)"></div> + </div> + </div> + </fieldset> + +</form> + + +<form id="validate" action="/validate" method="post"> + <input type="hidden" name="config"> +</form> +<button id="save-changes" type="button" onclick="getModel()">Show Modelling</button> + + +</div> +</body> +</html> diff --git a/sdv/docker/sdvmodel/website/mergeDeep.js b/sdv/docker/sdvmodel/website/mergeDeep.js new file mode 100644 index 0000000..970983a --- /dev/null +++ b/sdv/docker/sdvmodel/website/mergeDeep.js @@ -0,0 +1,46 @@ +/* Copyright 2020 University Of Delhi. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + + +/** +* Performs a deep merge of objects and returns new object. Does not modify +* objects (immutable) and merges arrays via concatenation. +* +* @param {...object} objects - Objects to merge +* @returns {object} New object with merged key/values +*/ +function mergeDeep(...objects) { + const isObject = obj => obj && typeof obj === 'object'; + + return objects.reduce((prev, obj) => { + Object.keys(obj).forEach(key => { + const pVal = prev[key]; + const oVal = obj[key]; + + if (Array.isArray(pVal) && Array.isArray(oVal)) { + prev[key] = pVal.concat(...oVal); + } + else if (isObject(pVal) && isObject(oVal)) { + prev[key] = mergeDeep(pVal, oVal); + } + else { + prev[key] = oVal; + } + }); + + return prev; + }, {}); +} + diff --git a/sdv/docker/sdvmodel/website/readFromHTML.js b/sdv/docker/sdvmodel/website/readFromHTML.js new file mode 100644 index 0000000..f14e089 --- /dev/null +++ b/sdv/docker/sdvmodel/website/readFromHTML.js @@ -0,0 +1,72 @@ +/* Copyright 2020 University Of Delhi. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + + +/** +* Reads HTML contents into javascript object +* +* +* @param {element} element to read, can be input or div element +* @returns {object} New object with values read +*/ +function objectifyDiv(element){ + var obj = {}; + var el = element.childNodes; + for(var i in el){ + + if(el[i] instanceof HTMLInputElement && el[i].hasAttribute('name')) { + + if(el[i].type == 'text') + obj = mergeDeep(obj, objectify(el[i].name, el[i].value)); + + } + if(el[i] instanceof HTMLSelectElement && el[i].hasAttribute('name')){ + obj = mergeDeep(obj, objectify(el[i].name, el[i].value)); + } + if(el[i] instanceof HTMLDivElement){ + + if(el[i].classList.contains('arr')){ + var key = el[i].getAttribute('name'); + var value = objectifyDiv(el[i]); + if(obj[key] == undefined) + obj[key] =[]; + obj[key].push(value[key]); + } + else + obj = mergeDeep(obj, objectifyDiv(el[i])); + + } + } + + if(element.hasAttribute('name')){ + var newobj = {}; + newobj[element.getAttribute('name')] = obj; + return newobj; + } + return obj; +} + + + +function objectify(key, value){ + var obj = {}; + var keys = key.split('.'); + for(var i = keys.length-1; i >= 0; i--){ + obj[keys[i]] = value; + value = obj; + obj = {}; + } + return value; +} diff --git a/sdv/docker/sdvmodel/website/style/array.css b/sdv/docker/sdvmodel/website/style/array.css new file mode 100644 index 0000000..45a53fc --- /dev/null +++ b/sdv/docker/sdvmodel/website/style/array.css @@ -0,0 +1,55 @@ + +@keyframes popup { + 0%{ + transform: scale(0.5); + } + 90%{ + transform: scale(1.1); + } + 100%{ + transform: scale(1.0); + } +} + +.arr{ + border-radius: 6px; + border-style: dashed; + border-color: #c9c9c9; + border-width: 2px; + display: inline-block; + padding-right: 3em; + margin-right: 15px; + margin-bottom: 5px; + position: relative; + animation: popup 0.2s ease; +} + + +.add-button{ + background-image: url("../assets/plus-circle-solid.svg"); + opacity: 0.7; + width: 2em; + height: 2em; + display: inline-block; + left: 0; +} +.add-button:hover{ + opacity: 1 +} + + + +.del-button{ + background-image: url("../assets/trash-alt-regular.svg"); + background-repeat: no-repeat; + opacity: 0.7; + width: 2em; + height: 2em; + position: absolute; + right: 0.5em; + bottom: 0.5em; +} +.del-button:hover{ + opacity: 1 +} + diff --git a/sdv/docker/sdvmodel/website/style/index.css b/sdv/docker/sdvmodel/website/style/index.css new file mode 100644 index 0000000..882c31a --- /dev/null +++ b/sdv/docker/sdvmodel/website/style/index.css @@ -0,0 +1,198 @@ + +html,body +{ + padding: 0px; + margin: 0px; + font-family: 'Ubuntu', sans-serif; + color: #333333; + background-color: white; + scroll-behavior: smooth; +} + + +#site{ + margin: 8%; + margin-top: 0px; + padding-bottom: 90px; +} + +.add-new{ + border: solid #c9c9c9; + border-width: 1px; + color: #f5f5f5; + opacity: 0.8; + font-size: 1.1em; + padding: 15px; + padding-top: 3px; + padding-bottom: 3px; + margin: 30px; + cursor: pointer; + display: inline-block; + text-align: center; + vertical-align: middle; + background-color: #1fad4e; +} +.add-new:hover{ + opacity: 1; + text-shadow: #f5f5f5 0px 0px 1px; +} + +.add-new img{ + width: 1em; + height: 1em; + margin-left: 5px; + vertical-align: middle; + filter: brightness(0) invert(1); + box-shadow: white 0px 0px 1px; +} + +fieldset { + border-radius: 6px; + border-color: #c9c9c9; + border-width: 1px; + margin-top: 45px; + margin-bottom: 45px; + background-color: #f5f5f5; +} + + + +fieldset legend{ + font-weight: bold; + padding: 1em; + text-shadow: #bbb 0px 0px 2px; +} + +.collapse-arrow::after{ + content: "â–²"; + margin: 1em; + color: #61b9ff; + text-shadow: #61b9ff 0px 0px 4px; + cursor: pointer; + font-size: 1.2em; +} + +.expand-arrow::after { + content: "â–¼"; +} + +@keyframes expand { + 0%{ + transform: scale(0.5); + } + 90%{ + transform: scale(1.1); + } + 100%{ + transform: scale(1.0); + } +} + +/* when expand-arrow hide all siblings */ +.expand-arrow ~ *{ + display:none; +} + + + + +fieldset label{ + padding: 1em; + padding-left: 2em; + text-shadow: #f0f0f0 0px 0px 2px; + width: 230px; + display: inline-block; +} + + +fieldset input{ + border: solid #c9c9c9; + border-radius: 6px; + border-width: 1px; + padding: 5px; + padding-left: 15px; + color: #333333; + width: 150px; +} + +fieldset input:hover{ + background-color: #e8e8e8; +} + +button{ + border: solid #c9c9c9; + border-width: 1px; + height: 45px; + width: 170px; + color: #f5f5f5; + opacity: 0.8; + font-size: 1.1em; + margin: 30px; + cursor: pointer; +} +button:hover{ + opacity: 1; + text-shadow: #f5f5f5 0px 0px 1px; +} + +#save-changes{ + background-color: #50affa; + float: right; +} + +#save-changes::after{ + content: ""; + clear: both; + display: inline; +} + +#delete{ + background-color: #fa3c3c; +} + +#reload{ + background-color: #1fad4e; +} + +button img{ + width: 1em; + height: 1em; + margin-right: 10px; + margin-left: 10px; + filter: brightness(0) invert(1); +} + + + + +#save-changes.changed{ + background-color: #deae00; + +} + + +select{ + background-color: white; + appearance: none; + -webkit-appearance: none; + -moz-appearance: none; + border: solid #c9c9c9; + border-radius: 6px; + border-width: 1px; + padding: 3px; + padding-left: 15px; + color: #333333; + min-width: 150px; + cursor: pointer; +} + +select:hover{ + background-color: #e8e8e8; +} + + +.select::after{ + content: "â–¼"; + color: transparent; + text-shadow: #61b9ff -1.5em 0px 0px; +} diff --git a/sdv/docker/sdvmodel/website/style/report.css b/sdv/docker/sdvmodel/website/style/report.css new file mode 100644 index 0000000..bc259fb --- /dev/null +++ b/sdv/docker/sdvmodel/website/style/report.css @@ -0,0 +1,90 @@ + + +.report{ + text-align: left; + margin: 8%; + margin-top: 3%; + padding: 50px; + + background-color: #f5f5f5; + border-radius: 6px; + border-color: #c9c9c9; + border-width: 1px; + border-style: solid; + text-shadow: #CCC 0px 0px 2px; + word-spacing: 0.1em; + line-height: 1.5em; +} + +hr{ + width: 60%; +} + + +button{ + background-color: #50affa; + float: right; +} + + +.holder{ + display: flex; + flex-wrap: wrap; + width: 98%; +} + + +.tab{ + background-color: #f0f0f0; + border-radius: 8px; + border-bottom-right-radius: 0px; + border-bottom-left-radius: 0px; + border-color: #757474; + border-width: 2px; + border-style: solid; + display: inline-block; + padding: 3px; + padding-left: 6px; + padding-right: 10px; +} + +.server{ + background-image: url("/assets/server.svg"); + background-repeat: repeat; + background-size: auto 100%; + + background-color: #e3e3e3; + border-color: #757474; + border-width: 8px; + border-style: double; +} + + + +.vnf{ + min-width: 80px; + margin: 10px; + + font-size: 0.9em; + padding: 5px; + + + border-radius: 8px; + border-color: #3dbbf5; + border-width: 3px; + border-style: solid; + + background-color: #e6f4fa; +} + +.vnf::after{ + clear: both; + content: ''; + display: inline-block; +} + +.vnf > img{ + width: 35px; + margin-left: 10px; + float: right +} diff --git a/sdv/docker/sdvstate/core/__init__.py b/sdv/docker/sdvstate/core/__init__.py index ed33752..47830c5 100644 --- a/sdv/docker/sdvstate/core/__init__.py +++ b/sdv/docker/sdvstate/core/__init__.py @@ -19,3 +19,4 @@ contains all program specific dependencies """ from .load_pdf import load_pdf +from .display_report import display_report diff --git a/sdv/docker/sdvstate/core/display_report.py b/sdv/docker/sdvstate/core/display_report.py new file mode 100644 index 0000000..97ccb55 --- /dev/null +++ b/sdv/docker/sdvstate/core/display_report.py @@ -0,0 +1,57 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Display Report +""" + +import logging +from datetime import datetime as dt + + + +def display_report(report): + """ + Logs the final report + """ + installer = report['installer'] + result = report['criteria'] + start_time = dt.strptime(report['start_date'], '%Y-%m-%d %H:%M:%S') + stop_time = dt.strptime(report['stop_date'], '%Y-%m-%d %H:%M:%S') + duration = (stop_time - start_time).total_seconds() + + logger = logging.getLogger(__name__) + logger.info('') + logger.info('') + logger.info('========================================') + logger.info('') + logger.info(f' Installer: {installer}') + logger.info(f' Duration: {duration}') + logger.info(f' Result: {result}') + logger.info('') + logger.info('') + logger.info(f' CHECKS PASSED:') + logger.info(' =============') + for case_name in report['details']['pass']: + logger.info(f' {case_name}') + logger.info('') + logger.info('') + logger.info(f' CHECKS FAILED:') + logger.info(' =============') + for case_name in report['details']['fail']: + logger.info(f' {case_name}') + logger.info('') + logger.info('========================================') + logger.info('') + logger.info('') diff --git a/sdv/docker/sdvstate/example/kubepod10 b/sdv/docker/sdvstate/example/kubepod10 deleted file mode 100644 index 2717fc6..0000000 --- a/sdv/docker/sdvstate/example/kubepod10 +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: v1 -clusters: -- cluster: - server: https://10.10.100.21:6553 - insecure-skip-tls-verify: true - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: admin - name: admin@kubernetes -current-context: admin@kubernetes -kind: Config -preferences: {} -users: -- name: admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZRENDQWtpZ0F3SUJBZ0lVQmN1akh5bmUzMFBnMUw5MnNJZERmWEtlVm5Vd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tqRVRNQkVHQTFVRUNoTUtTM1ZpWlhKdVpYUmxjekVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pBZQpGdzB4T1RFd01UY3hOakUzTURCYUZ3MHlNREV3TVRZeE5qRTNNREJhTUNreEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1RNHdEQVlEVlFRREV3VmhaRzFwYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQUxRVnpUeG1UTUZHRkdiWks1OGwyUXZkT2hUZVZ2dzVKTWJyVE8wY2hhd1BtdmJXeXczSApMeUFpNTJsZkU5VGdONXBBVzVrVzJmS2tkREMwRnNXZXF2VDV4SFVvbVFGa3RRM2RWMEJnMXRXYVNIdnVHMXQwCndac2hIQWN6RTl0ZS93dFR6ajhkdFl0ZXdIbXpzd1J1bk9sRnFaUVZZT1hReENPYkEvZ2Z1V0o5RUFKNlduZDcKcUhZdEJvbzR0RkhVTmFocDRwUXNNS1VlbDZPUnA4NEM0WnNIenYyZm9Jb2pYd1V2TmJMNUE1VlZjallrK0taZwpCc3IyMWowT0c4N1F3Q0ZuOThMelJqUU92L01FTFRPOEoxemIrK3pvbkg0ZkpDckc1Q2RKNUFQbU81UnBEMGluCmJKNnFOR2QyY0kxaGdVWWx2aWI4QURXc21VelRkWU5wa0JrQ0F3RUFBYU4vTUgwd0RnWURWUjBQQVFIL0JBUUQKQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQQpNQjBHQTFVZERnUVdCQlRKSXRERnJwcGR3RExOTWFWY2IzQ1JiNVhBZURBZkJnTlZIU01FR0RBV2dCUjcwU1Z4Ck8wVlpzNXVRQkpaOEZ0cmluT25EaERBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQUFBRUQrQXNaaEFsWnRucTkKR0w5a1dvWDd5d0ZmR2FRdEgvSnFaT0VGYkx0bFdLVmlHc3gyVWlSRTN2U3l6VlFpMVBhNGR3cXF1MXc4bVNIVworc1REVlN1aGE5Q2NlbzcvT3F4dnl3ME43c0t2L0NPeml6YWF5djlXTmlBYXhFNjRMSk1sTWlrS2wrZG1zSlVMCktVUXJLVzhvcnlhZk4zSzZnd3NLS2FtU2Mzb1J4TG9wWHVveEo5a2lyVG5DOWpMVGdWSU1EM0I5aEtleEtLQ3YKb1hKVkUyMWViVnNiOExiSUcyaldRcWlnVktxWEFRN3gwcEt6RFcvN1dIc1JyRFRkbFpYU0ZUZS9IQUpZd2tuVwp1cmd2blJkZ1BYUHl6cHJhWU9iTCtTV3dvejRTS216OGV5TWpQcFd0TkFZQTdIYm5XT3RqU2NXNFJKWnpaQ1V3CldicStNZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBdEJYTlBHWk13VVlVWnRrcm55WFpDOTA2Rk41Vy9Ea2t4dXRNN1J5RnJBK2E5dGJMCkRjY3ZJQ0xuYVY4VDFPQTNta0JibVJiWjhxUjBNTFFXeFo2cTlQbkVkU2laQVdTMURkMVhRR0RXMVpwSWUrNGIKVzNUQm15RWNCek1UMjE3L0MxUE9QeDIxaTE3QWViT3pCRzZjNlVXcGxCVmc1ZERFSTVzRCtCKzVZbjBRQW5wYQpkM3VvZGkwR2lqaTBVZFExcUduaWxDd3dwUjZYbzVHbnpnTGhtd2ZPL1orZ2lpTmZCUzgxc3ZrRGxWVnlOaVQ0CnBtQUd5dmJXUFE0Ynp0REFJV2Yzd3ZOR05BNi84d1F0TTd3blhOdjc3T2ljZmg4a0tzYmtKMG5rQStZN2xHa1AKU0tkc25xbzBaM1p3aldHQlJpVytKdndBTmF5WlROTjFnMm1RR1FJREFRQUJBb0lCQUYxcnlmS0JaMlFFUk9VdAoyQkZSZ3cxQ2tMVHV1dFZSbDZDUnhIQmxFWHMzQlQ3MElwWmRORGRKcEI3bnNkUUhGUkV5UGRKbkpsMVhydWJ0Ckpic1RHc0RIS1lGVnREb2kwa0lGQnhSZ3FGSmJIU3NkVkpmWE0vQ1Q5b1JOblFsNmVIaVoyeTZtN04wR0pIZCsKSDJvM0w3TmI3aUxpREVoc1NyUGw0T05CSWR6VEFQYy91b3hQbVQxQ2ZiQ3hVU051d1EzOS9mWHJVNzJTOFU4ZAoybXd2dDZpczQ2c09IWkNkNG0xNGJENE11Y2VsUG83V2ptT0hRZlUzd0g1NTE5Q0FtV0hDRFA0ZndNY3dYWlJUClZWUHcyU1VZRW9lMS9DM083cVVqMlRTMldJdysveHZOQml6WFpqajZTdmd5ME1FREtzamhsbWM2OE81MVAvajgKcmh3dFp1RUNnWUVBeTE1c2NuRFVidWFob1dJNzlCVzAxdWlyY0V1Vnp2TXhmc3ptbEJhZnp3dDZPL0FNQ3l1NwpKS2ZNR1JFQmxXR1RGMUhiUlREbGZzK3lTNFpjKzRQUDlmZVNVNFI1NWJkczlZRU44K3liSjJvbzVPcFlGOVFkCmtoL2JQRUZkN3pTbVQ0R2l2V2lxNklqVlFrTGNOVWFlczN4WlZ2d2NqUXd5cENtTU1aRlJtN1VDZ1lFQTRyREsKSTZITUdDcTR4eWZuaUhhT0ZkNlVEYjdmS3ZJNTJZYnU5R0IyR0JXdmkrcEhOZ1V1dU44SG9GT0hQS2NuYnkweQoyS0FwRjVaYTFSZUNHNGdSTE8vMjMybU42VnVCMERGWlNnUEFITTJKd1BtOFUrYjlWZFZaZEI1ZWJrTDhxNXlkCmZqM3F3S2NRVTZMR09wVy83ZEhsMUVUWE9kMUYzTi8wNFdzcGlWVUNnWUJtb0UrNXNKYURJSCtRSVRLQUNqUW4KLzJJRVdTQlFQd2xMSTd0NEg3S2xtUFVtS2d6cDFqZXFWOEwzSTAzWlJGUW1BSGpXZ2NaT0tDR2hXenl3NytPUwpERTBiT0U4TFRYVCtyeEdMZG1zVmlNejZPQWdjZmo0dDcwV0RNcmxrYlAxQVFmc04ralBGQk1nWm1BUG9IcXNYCmlEak5YSXhMNFV2czY4cURlUUhsd1FLQmdGTGs3UFg4cTJKRzlReTJsZDc3NDFjeDdoZmNyVVRLRU1kdnBSK3QKeW1GaVJMQTRPbFFScnhVaFVXdWFQOEM1S3gxbmZNbGtQOEtGVTYvS2llUkJiRzV2VFdwQzhnYmNWR3JxTU1sMAo5NkpRc3NmalNxK3Zyd0hkSTNubnhRWXk3cXhlZCtUN0JVWHZrWFBUK1FMaFVhN0lhMitrd01OREc5SDUvMVVTCjE3eUZBb0dBZjBubW53RjJRMTZVYXhhanI2M3hjUFlQS09FY1pHTFcxalhoMHVpNFJnK3lscEdSZ25xdVJzMk8KL3RDYTlUYm1JcG9KZHA3aWFPOTIzenI1MWphcnlBOCtuWWhoZ2dRQ29IdWNIY0ZBR213Ryt6R2NyMlBZYklseAo5TkVsUEFZM2pndFNWTW4yUkhMak0wUWVuTUQ1aG1HcHQvWVJOd3hPNkNBdXhaNUhzOTQ9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/sdv/docker/sdvstate/example/kubepod15 b/sdv/docker/sdvstate/example/kubepod15 deleted file mode 100644 index 7710cbc..0000000 --- a/sdv/docker/sdvstate/example/kubepod15 +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: v1 -clusters: -- cluster: - server: https://10.10.150.21:6553 - insecure-skip-tls-verify: true - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: admin - name: admin@kubernetes -current-context: admin@kubernetes -kind: Config -preferences: {} -users: -- name: admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURZRENDQWtpZ0F3SUJBZ0lVRFZ1T2IvczEyKzR1dGNTRnBuOEhQbFlIVWFBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tqRVRNQkVHQTFVRUNoTUtTM1ZpWlhKdVpYUmxjekVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pBZQpGdzB4T1RFeU1UVXdNelExTURCYUZ3MHlNREV5TVRRd016UTFNREJhTUNreEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1RNHdEQVlEVlFRREV3VmhaRzFwYmpDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQUo0Z3R2ZUMyVktnV1BwRHRMTWE3d2k0aE1hZlBEZUVveUg2LzBrWUdKWEF6TTRuVE55NApZdXVldCtBZUdDNnJ6cHNDRG1GcVBrVkRRM1ZkMEsrd05VSXZmOGpZVXlMbWVXUEZxNStqV25SaHpSbUVyT2VBCk9UK0lMa0pFMUN3T2hPbEtjMlB0TjhPUzdFbVR1NmxkZHQ4OXM5Z1M5aXNmbm5JQmY2YkhNdGdqWWJrZEEzbEQKb0VLL282VS9LdkpydTN2L01IRXl6VUwwbjB4UHpHK3ZPVDRpRVZRV3A3M3o0d2gzalN4SENvQmJ4RU9hTk5mSgpoQjNFMUZhSTZMY3U4VHdWdnZ3WStHc3Z3NURXblJ5VXczL0REUXpNMGNQZkc2WUNmeWhjQkVJSUJ5ZEtwUTdYCi9NZ0p4MWV1QmRHdVFheHNaNHhvS0taZW4vQWhCbWZDTUVjQ0F3RUFBYU4vTUgwd0RnWURWUjBQQVFIL0JBUUQKQWdXZ01CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQQpNQjBHQTFVZERnUVdCQlJMYUVVWXRRaGxMQnFCQUtJdTRrUDRwWWhRTlRBZkJnTlZIU01FR0RBV2dCU2g3bE54CmJXZ1pUSjZKRkUwdHhTdGdIS3hqd3pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQXBpWXFVaUMyNFVLM2YxUW8KMnp1YTlTKzVaeWZXOTgvcG9zWDVFZ0x2c25uYmJoaXRFeXptc1RQaG1yNTZ2WmNkVlVjb3B4NFc2M0luaDRUYQpHQlBUMjVwdGZIVEE1bTNURDIrb1dFQXlKMHhBbjR3M0VpdzRhYmY0aCs1Q0JlTm9ldXJlOXhMYlIzNnZZSG9aCnQ0aVk1Q3BraHhud3VLV0FZTnE4a2lsQTlvUzV2bm5ndUMxYVJEckQ5bTJLZlk1aWtiRndGWWUzRzRLTXAyaUgKWVpiMUxhZ3BlZHRjbTJSNnhNZ0RVSktKbkN5WFpIcXp1WHMzT1h1TTFRVzZlMVl2VU1aQUdMV25NYkJ2S3MzNQpyMUdsdFY5OUh0WHBoTnBqeFd6a1RNS0s2K0wwQ0xxNXducVZjVzNUK1Y1V05HbkhWMThBMkhEM0NUc3NRWmxBCm5pbGVXdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBbmlDMjk0TFpVcUJZK2tPMHN4cnZDTGlFeHA4OE40U2pJZnIvU1JnWWxjRE16aWRNCjNMaGk2NTYzNEI0WUxxdk9td0lPWVdvK1JVTkRkVjNRcjdBMVFpOS95TmhUSXVaNVk4V3JuNk5hZEdITkdZU3MKNTRBNVA0Z3VRa1RVTEE2RTZVcHpZKzAzdzVMc1NaTzdxVjEyM3oyejJCTDJLeCtlY2dGL3BzY3kyQ05odVIwRAplVU9nUXIranBUOHE4bXU3ZS84d2NUTE5RdlNmVEUvTWI2ODVQaUlSVkJhbnZmUGpDSGVOTEVjS2dGdkVRNW8wCjE4bUVIY1RVVm9qb3R5N3hQQlcrL0JqNGF5L0RrTmFkSEpURGY4TU5ETXpSdzk4YnBnSi9LRndFUWdnSEowcWwKRHRmOHlBbkhWNjRGMGE1QnJHeG5qR2dvcGw2ZjhDRUdaOEl3UndJREFRQUJBb0lCQUJ5cFkyQ3p0LzZSRCsrMAo3QUQyNWRMWDEwRkZSWjN1amI4d0JxdlNFVXE3bXFQWFhjZzRKNzM3aytxc3FjZHozc3diOEUxWis1V0VYcXJjCmFXSWU5MWhhMGJldTlrckNLY2lhNE1QYjBSNTlSN2JUWkovRmp4cmo3VGFYMFRsM0hFSkkrMmRtYlJBbkJtdEQKdXVVMUNzSG1KajRKR2RPeE5JQUhvNEt3WXBmb2NPME9acFhVZFlOQktwSUhseFhOWjJ4RkJiVzh6a1FRekZ6MAplWmQ3YzZNUmlaZFRPd0pqNWl4c0FWSytBTXVGRkFSamNmc1FBZktlS2J2YUdDTFBvQmFSak5US0Z6MEVhWmlZClNTM2NYMDRCTnk0NWNPVWlHK3RsSlgxeEhGT04weittMlArZWQ1dCtHSHB3UUg4ak1ZQzhkZlJCUVdSeGNCTjcKemd3NWp5RUNnWUVBeEM4REVPWm9QNXhOcFZwbXB6OVAvY2NiVE9XekdUTlZpZzFhcmdSMC90RWRRK0lKQlpDdgpqMzlWVk9FeDUrSlJHVmhmSG9NczlXZVZuSmJSZTFyN1QrWU1WOVFCOVYvbDVhdkJoc0Vhc1NpaThsVEpzT3dXCmJRTDQyMDd1QVhGemFoMHhPS253T2gwN3p0TTBFMXQwWFVrR1BFSzE2bldPbFc4K3IyOEEvSmNDZ1lFQXpsZEgKSlo5RTUxT0dmZU96Y2VQdWV0ZU9PYnNvWVU4eHhOaUg0MitKWVpKNFVqNVY2RGw4OHdEaS91NVNVOVBtUWM1dAozOFpncXdRRjRFWklTbk0rbElKZnluUmhmWU9YZXB4bEJnVFBVb2dUeGY1bm5jZjNOWGZrVVJxUWViZURqTEdjClBrbU1LbE9kK21jRnYxdGI4eXhzbVEwbjhFVWplVkdLT3JKVjc5RUNnWUJ1NC9QanRaanZlN1lYNVFWcE84eEgKTWlnb1N4MzAvS001S1ZzOFNhQ24rQ09HbjFsaUgrcGNQaWxKbFJEVWRZUkp3ejNnelZ5NFNoaXpManl5Y1RiawpickJEWkw3R3A3SVhKQUo3M09MdGlINnlZMkt0OG9TcWthZUFyeGl4RUNPZ3MyZURFK3VKcmNTRW43VXJ5K0gyCmFMUnhrM09vVjFLRS9TQjlvVXo1ZVFLQmdFZ1g4b0hRbmhCOC9IYXJ3aHkrMktvTyttQnRaZlJwNlNldngvck4KRTZFRnZnaHVRekc2TkUvck5XU0EvRDdSd0plcGVuWS9KN05ZMm55NzBiSkJoZEg1bzJKbk8xRFJVM0hCaHdLTgpWNnFzWk13KzBSRXR0cy8xcmM0d2k5NGJJbGxjRFEwdVFVemdua2ZKQ3hjSzRwdWFIKzl4eTB5RnU1azl4aUF3CkF4cWhBb0dBUlc3Qno1UjlSOWZKVUp3ZEhvMGpRVmhrL3lzSWlmQWRxQ3d6blpOcVM4cU9KMXpsSExhWkozMXcKbVdjNzA3UUN6Q3BOMk1YV2lnMzc2VVJpdXFtcEJTZW14bzFRendhQWJhK0Yvd1I1VzlncndzTmZ2RDR6TkVHbgp2dFllSS9taXlJOVFaay9PVkcrblRLL1ZIZExha3FOVFNKQUl6WSttZ2Y4SWphUTUrVW89Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
\ No newline at end of file diff --git a/sdv/docker/sdvstate/example/state.yml b/sdv/docker/sdvstate/example/state.yml index 1ca61e1..89dc548 100644 --- a/sdv/docker/sdvstate/example/state.yml +++ b/sdv/docker/sdvstate/example/state.yml @@ -15,3 +15,6 @@ PDF_FILE: example/intel-pod10.json # Path to kube-config file KUBE_CONFIG : example/kubepod10 +MASTER_ROLE_NAME : masters +WORKER_ROLE_NAME : workers + diff --git a/sdv/docker/sdvstate/server b/sdv/docker/sdvstate/server index ca37eca..ca37eca 100755..100644 --- a/sdv/docker/sdvstate/server +++ b/sdv/docker/sdvstate/server diff --git a/sdv/docker/sdvstate/settings/common.yml b/sdv/docker/sdvstate/settings/common.yml index 65f131c..f25f861 100644 --- a/sdv/docker/sdvstate/settings/common.yml +++ b/sdv/docker/sdvstate/settings/common.yml @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. - +project_name: cirv-sdv +project_version: 1.0 ################################## # Program behavior configurations @@ -24,4 +25,9 @@ log_verbosity: info # Results results_dir: /tmp/state/ -save_results_locally: True
\ No newline at end of file +save_results_locally: True + +# Test API +enable_testapi: True +testapi_url: http://testresults.opnfv.org/test/api/v1 + diff --git a/sdv/docker/sdvstate/state b/sdv/docker/sdvstate/state index 41d17a4..353df71 100755 --- a/sdv/docker/sdvstate/state +++ b/sdv/docker/sdvstate/state @@ -27,10 +27,12 @@ import re import ast import sys from datetime import datetime +import requests from tools.conf import settings from tools.result_api import result_api, Local from core import load_pdf +from core import display_report from validator import AirshipValidator @@ -229,6 +231,19 @@ def main(): if installer == 'airship': airship = AirshipValidator() airship.validate() + report = airship.get_report() + + + # Displaying Report + display_report(report) + + if settings.getValue('enable_testapi'): + logger = logging.getLogger(__name__) + logger.info('Publishing results to TestAPI') + url = settings.getValue('testapi_url') + url += "/results/" + response = requests.post(url, json=report) + logger.info(response) diff --git a/sdv/docker/sdvstate/validator/airship/__init__.py b/sdv/docker/sdvstate/validator/airship/__init__.py new file mode 100644 index 0000000..78e42c4 --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Package for Airship +""" + + +### Pod Health Checks +from .pod_health_check import pod_health_check + +### Ceph Health Checks +from .ceph_check import ceph_health_check + +### Monitoring & Logging Agents Checks +from .monitoring_logging_agent_check import prometheus_check +from .monitoring_logging_agent_check import grafana_check +# from .monitoring_logging_agent_check import prometheus_alert_manager_check +from .monitoring_logging_agent_check import elasticsearch_check +from .monitoring_logging_agent_check import kibana_check +from .monitoring_logging_agent_check import nagios_check +from .monitoring_logging_agent_check import elasticsearch_exporter_check +from .monitoring_logging_agent_check import fluentd_exporter_check + +### Network Checks +from .network_check import physical_network_check + +### Compute Related Checks +from .compute_check import reserved_vnf_cores_check +from .compute_check import isolated_cores_check +from .compute_check import vswitch_pmd_cores_check +from .compute_check import vswitch_dpdk_lcores_check +from .compute_check import os_reserved_cores_check +from .compute_check import nova_scheduler_filters_check +from .compute_check import cpu_allocation_ratio_check + +from .store_result import store_result diff --git a/sdv/docker/sdvstate/validator/airship/airship.py b/sdv/docker/sdvstate/validator/airship/airship.py index e77f06f..18de66d 100644 --- a/sdv/docker/sdvstate/validator/airship/airship.py +++ b/sdv/docker/sdvstate/validator/airship/airship.py @@ -18,16 +18,15 @@ Airship Validator """ import logging -import ast -import json +from datetime import datetime as dt from tools.conf import settings -from tools.result_api import result_api, rfile -from tools.kube_utils import * +from tools.kube_utils import load_kube_api from validator.validator import Validator -## Checks -from .pod_health_check import pod_health_check +from . import * + + @@ -42,10 +41,87 @@ class AirshipValidator(Validator): super(AirshipValidator, self).__init__() self._logger = logging.getLogger(__name__) + self._report = {"installer": "Airship", + "criteria": "pass", + "details": {"total_checks": 0, + "pass": [], + "fail": [], + "metadata": {} + } + } + load_kube_api() - + def validate(self): """ + Validation method """ - pod_health_check() + + self._report['scenario'] = 'none' + self._report['case_name'] = 'ook_airship' + self._report['start_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S') + + + # PLATFORM CHECKS + self.update_report(pod_health_check()) + + # STORAGE CHECKS + self.update_report(ceph_health_check()) + + # MONITORING & LOGGING AGENTS CHECKS + self.update_report(prometheus_check()) + self.update_report(grafana_check()) + ## current version of AlertManager doesn't support this + # prometheus_alert_manager_check() + self.update_report(elasticsearch_check()) + self.update_report(kibana_check()) + self.update_report(nagios_check()) + self.update_report(elasticsearch_exporter_check()) + self.update_report(fluentd_exporter_check()) + + # NETWORK CHECKS + self.update_report(physical_network_check()) + + # COMPUTE CHECKS + self.update_report(reserved_vnf_cores_check()) + self.update_report(isolated_cores_check()) + self.update_report(vswitch_pmd_cores_check()) + self.update_report(vswitch_dpdk_lcores_check()) + self.update_report(os_reserved_cores_check()) + self.update_report(nova_scheduler_filters_check()) + self.update_report(cpu_allocation_ratio_check()) + + self._report['stop_date'] = dt.now().strftime('%Y-%m-%d %H:%M:%S') + + + def update_report(self, result): + """ + Updates report with new results + """ + case_name = result['case_name'] + criteria = result['criteria'] + + self._report['details']['total_checks'] += 1 + if criteria == 'pass': + self._report['details']['pass'].append(case_name) + elif criteria == 'fail': + self._report['details']['fail'].append(case_name) + self._report['criteria'] = 'fail' + + + + def get_report(self): + """ + Return final report as dict + """ + self._report["project_name"] = settings.getValue("project_name") + self._report["version"] = settings.getValue("project_version") + self._report["build_tag"] = "none" + + pdf = settings.getValue('pdf_file') + self._report["pod_name"] = pdf['management_info']['resource_pool_name'] + + store_result(self._report) + + return self._report diff --git a/sdv/docker/sdvstate/validator/airship/ceph_check.py b/sdv/docker/sdvstate/validator/airship/ceph_check.py new file mode 100644 index 0000000..b33e876 --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/ceph_check.py @@ -0,0 +1,51 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Ceph Related Checks +""" + +import ast + +from tools.kube_utils import get_pod_with_labels, kube_exec +from .store_result import store_result + + + + +def ceph_health_check(): + """ + Check health of Ceph + """ + pod = get_pod_with_labels('application=ceph,component=mon') + + cmd = ['ceph', 'health', '-f', 'json'] + response = kube_exec(pod, cmd) + + response = ast.literal_eval(response) + + result = {'category': 'storage', + 'case_name': 'ceph_health_check', + 'details': [] + } + + if response['status'] == 'HEALTH_OK': + result['criteria'] = 'pass' + result['details'] = 'HEALTH_OK' + else: + result['criteria'] = 'fail' + result['details'] = response + + store_result(result) + return result diff --git a/sdv/docker/sdvstate/validator/airship/compute_check.py b/sdv/docker/sdvstate/validator/airship/compute_check.py new file mode 100644 index 0000000..ff6f6db --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/compute_check.py @@ -0,0 +1,646 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Compute Related Checks +""" + +import configparser +import json + +from tools.kube_utils import kube_exec, get_pod_with_labels +from tools.conf import settings +from .store_result import store_result + + +########### +# Checks +########### + +def isolated_cores_check(): + """ + isolated_cores_check + """ + traced_value = trace_isolated_cores() + required_value = required_isolated_cores() + + result = {'category': 'compute', + 'case_name': 'isolated_cores_check', + 'details': {'traced_cores': traced_value, + 'required_cores': required_value + } + } + + if is_ranges_equals(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + + store_result(result) + return result + + + +def reserved_vnf_cores_check(): + """ + reserved_vnf_cores_check + """ + traced_value = trace_reserved_vnf_cores() + required_value = required_reserved_vnf_cores() + + result = {'category': 'compute', + 'case_name': 'reserved_vnf_cores_check', + 'details': {'traced_cores': traced_value, + 'required_cores': required_value + } + } + + if is_ranges_equals(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + + store_result(result) + return result + + + +def vswitch_pmd_cores_check(): + """ + vswitch_pmd_cores_check + """ + traced_value = trace_vswitch_pmd_cores() + required_value = required_vswitch_pmd_cores() + + result = {'category': 'compute', + 'case_name': 'vswitch_pmd_cores_check', + 'details': {'traced_cores': traced_value, + 'required_cores': required_value + } + } + + if is_ranges_equals(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + + store_result(result) + return result + + + +def vswitch_dpdk_lcores_check(): + """ + vswitch_dpdk_lcores_check + """ + traced_value = trace_vswitch_dpdk_lcores() + required_value = required_vswitch_dpdk_lcores() + + result = {'category': 'compute', + 'case_name': 'vswitch_dpdk_lcores_check', + 'details': {'traced_cores': traced_value, + 'required_cores': required_value + } + } + + if is_ranges_equals(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + + store_result(result) + return result + + + +def os_reserved_cores_check(): + """ + os_reserved_cores_check + """ + traced_value = trace_os_reserved_cores() + required_value = required_os_reserved_cores() + + result = {'category': 'compute', + 'case_name': 'os_reserved_cores_check', + 'details': {'traced_cores': traced_value, + 'required_cores': required_value + } + } + + if is_ranges_equals(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + + store_result(result) + return result + + + +def nova_scheduler_filters_check(): + """ + nova_scheduler_filters_check + """ + traced_value = trace_nova_scheduler_filters() + required_value = required_nova_scheduler_filters() + + result = {'category': 'compute', + 'case_name': 'nova_scheduler_filters_check', + 'details': {'traced_filters': traced_value, + 'required_filters': required_value + } + } + + if are_lists_equal(traced_value, required_value): + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + store_result(result) + return result + + + +def cpu_allocation_ratio_check(): + """ + cpu_allocation_ratio_check + """ + traced_value = trace_cpu_allocation_ratio() + required_value = required_cpu_allocation_ratio() + + result = {'category': 'compute', + 'case_name': 'cpu_allocation_ratio_check', + 'details': {'traced_ratio': traced_value, + 'required_ratio': required_value + } + } + + if traced_value == required_value: + result['criteria'] = 'pass' + else: + result['criteria'] = 'fail' + + store_result(result) + return result + + + + + + + + +############### +# helper functions +############### + + + +def trace_isolated_cores(): + """ + Trace isolated_cores from Airship deployment + + :return: value traced from `isolcpus` key in `/proc/cmdline` + """ + pod = get_pod_with_labels('application=nova,component=compute') + + cmd = ['cat', '/proc/cmdline'] + proc_cmd = kube_exec(pod, cmd) + + for option in proc_cmd.split(): + if 'isolcpus' in option: + _, isolcpus_value = split_key_value(option) + break + + return isolcpus_value + + +def required_isolated_cores(): + """ + Returns value of `isolated_cpus` from platform_profile used by + Role for worker nodes in PDF + + :return: isolated_cores value expected by the PDF + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + profile = get_platform_profile_by_role(worker_role) + return profile['isolated_cpus'] + + + + + + +def trace_reserved_vnf_cores(): + """ + Trace vnf_reserved_cores from Airship deployment + + :return: value traced from `vcpu_pin_set` key in nova.conf + of actual deployment + """ + try: + config = get_nova_conf() + vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') + except (configparser.NoOptionError, configparser.MissingSectionHeaderError): + vcpu_pin_set = '' + + return vcpu_pin_set + + +def required_reserved_vnf_cores(): + """ + Returns value of vnf_cores from platform_profile used by + Role for worker nodes in PDF + + :return: vnf_reserverd_core value expected by the PDF + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + profile = get_platform_profile_by_role(worker_role) + return profile['vnf_cores'] + + + + + + +def trace_vswitch_pmd_cores(): + """ + Trace vswitch_pmd_cores from Airship deployment + + :return: value traced from `other_config:pmd-cpu-mask` in + openvswitchdb using ovs-vsctl + """ + ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') + + cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] + response = kube_exec(ovs_pod, cmd) + + response.replace('=', ':') + config = json.loads(response) + + if 'pmd-cpu-mask' in config: + pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) + else: + pmd_cores = '' + + return pmd_cores + + +def required_vswitch_pmd_cores(): + """ + Returns value of vswitch_pmd_cores from platform_profile used by + Role for worker nodes in PDF + + :return: vswitch_pmd_cores value expected by the PDF + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + profile = get_platform_profile_by_role(worker_role) + return profile['vswitch_pmd_cores'] + + + + + + +def trace_vswitch_dpdk_lcores(): + """ + Trace vswitch_dpdk_lcores from Airship deployment + + :return: value traced from `other_config:dpdk-lcore-mask` in + openvswitchdb using ovs-vsctl + """ + ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') + + cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] + response = kube_exec(ovs_pod, cmd) + + response.replace('=', ':') + config = json.loads(response) + + if 'dpdk-lcore-mask' in config: + pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) + else: + pmd_cores = '' + + return pmd_cores + + +def required_vswitch_dpdk_lcores(): + """ + Returns value of vswitch_dpdk_lcores from platform_profile used by + Role for worker nodes in PDF + + :return: vswitch_dpdk_lcores value expected by the PDF + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + profile = get_platform_profile_by_role(worker_role) + return profile['vswitch_dpdk_lcores'] + + + + + + +def trace_os_reserved_cores(): + """ + Trace os_reserved_cores from Airship deployment + + os_reserved_cores = all_cores - (reserved_vnf_cores + + vswitch_pmd_cores + + vswitch_dpdk_lcores) + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + all_cores = get_cores_by_role(worker_role) + + reserved_vnf_cores = trace_reserved_vnf_cores() + vswitch_pmd_cores = trace_vswitch_pmd_cores() + vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() + + non_os_cores = [] + non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) + non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) + non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) + + os_reserved_cores = set(all_cores).difference(set(non_os_cores)) + + # return as string with comma separated value + return ','.join(map(str, list(os_reserved_cores))) + + +def required_os_reserved_cores(): + """ + Returns value of os_reserved_cores from platform_profile used by + Role for worker nodes in PDF + + :return: os_reserved_cores value expected by the PDF + """ + worker_role = settings.getValue('WORKER_ROLE_NAME') + profile = get_platform_profile_by_role(worker_role) + return profile['os_reserved_cores'] + + + + + +def trace_nova_scheduler_filters(): + """ + Trace scheduler_filters from Airship deployment + + :return: value traced from `enabled_filters` key in nova.conf + of actual deployment + """ + try: + config = get_nova_conf() + filters = config.get('filter_scheduler', 'enabled_filters') + except (configparser.NoOptionError, configparser.MissingSectionHeaderError): + filters = '' + + filters = filters.split(',') + map(str.strip, filters) + + return filters + +def required_nova_scheduler_filters(): + """ + Required nova scheduler_filters by the PDF + """ + pdf = settings.getValue('pdf_file') + filters = pdf['vim_functional']['scheduler_filters'] + + filters = filters.split(',') + map(str.strip, filters) + + return filters + + + + + + + +def trace_cpu_allocation_ratio(): + """ + Trace cpu_allocation_ratio from Airship deployment + + :return: value traced from `cpu_allocation_ratio` key in nova.conf + of actual deployment + """ + try: + config = get_nova_conf() + cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') + except (configparser.NoOptionError, configparser.MissingSectionHeaderError): + cpu_allocation_ratio = '' + + return float(cpu_allocation_ratio) + +def required_cpu_allocation_ratio(): + """ + Required cpu_allocation_ratio by the PDF + """ + pdf = settings.getValue('pdf_file') + cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] + + return float(cpu_allocation_ratio) + + + + + + + +def get_role(role_name): + """ + Searches and returns role with `role_name` + """ + roles = settings.getValue('pdf_file')['roles'] + + for role in roles: + if role['name'] == role_name: + role_details = role + + return role_details + + +def get_platform_profile(profile_name): + """ + Searches and returns platform_profile with `profile_name` + """ + platform_profiles = settings.getValue('pdf_file')['platform_profiles'] + + for profile in platform_profiles: + if profile['profile_name'] == profile_name: + profile_details = profile + + return profile_details + +def get_processor_profile(profile_name): + """ + Searches and returns processor_profile with `profile_name` + """ + processor_profiles = settings.getValue('pdf_file')['processor_profiles'] + + for profile in processor_profiles: + if profile['profile_name'] == profile_name: + profile_details = profile + + return profile_details + +def get_platform_profile_by_role(role_name): + """ + Returns platform profile details of a role + """ + role = get_role(role_name) + profile = get_platform_profile(role['platform_profile']) + return profile + + +def get_hardware_profile_by_role(role_name): + """ + Returns hardware profile details of a role + """ + role = get_role(role_name) + + hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] + + for profile in hardware_profiles: + if profile['profile_name'] == role['hardware_profile']: + profile_details = profile + + return profile_details + + +def get_cores_by_role(role_name): + """ + Returns cpu cores list of server hardware used in the role + """ + hardware_profile = get_hardware_profile_by_role(role_name) + processor_profile = hardware_profile['profile_info']['processor_profile'] + profile = get_processor_profile(processor_profile) + + cpus = [] + + for numa in profile['profile_info']['numas']: + cpus.extend(convert_range_to_list(numa['cpu_set'])) + + return cpus + + + + + + + +def get_nova_conf(): + """ + Returns parsed nova.conf + """ + pod = get_pod_with_labels('application=nova,component=compute') + + cmd = ['cat', '/etc/nova/nova.conf'] + response = kube_exec(pod, cmd) + + config = configparser.ConfigParser() + config.read_string(response) + + return config + + +### cpu cores related helper function + +def convert_range_to_list(x): + """ + Returns list of numbers from given range as string + + e.g.: convert_range_to_list('3-5') will give [3, 4, 5] + """ + # pylint: disable=C0103 + result = [] + for part in x.split(','): + if '-' in part: + a, b = part.split('-') + a, b = int(a), int(b) + result.extend(range(a, b + 1)) + elif part != '': + a = int(part) + result.append(a) + # remove duplicates + result = list(dict.fromkeys(result)) + return result + + +def is_ranges_equals(range1, range2): + """ + Checks whether two ranges passed as string are equal + + e.g.: is_ranges_equals('2-5', '2-4,5') returns true + """ + set1 = set(convert_range_to_list(range1)) + set2 = set(convert_range_to_list(range2)) + return set1 == set2 + +def are_lists_equal(list1, list2): + """ + Checks whether two list are identicals + """ + set1 = set(list1) + set2 = set(list2) + return set1 == set2 + + + +def hex_to_comma_list(hex_mask): + """ + Converts CPU mask given in hex to list of cores + """ + binary = bin(int(hex_mask, 16))[2:] + reversed_binary = binary[::-1] + i = 0 + output = "" + for bit in reversed_binary: + if bit == '1': + output = output + str(i) + ',' + i = i + 1 + return output[:-1] + + +def comma_list_to_hex(cpus): + """ + Converts a list of cpu cores in corresponding hex value + of cpu-mask + """ + cpu_arr = cpus.split(",") + binary_mask = 0 + for cpu in cpu_arr: + binary_mask = binary_mask | (1 << int(cpu)) + return format(binary_mask, '02x') + + + +def split_key_value(key_value_str, delimiter='='): + """ + splits given string into key and value based on delimiter + + :param key_value_str: example string `someKey=somevalue` + :param delimiter: default delimiter is `=` + :return: [ key, value] + """ + key, value = key_value_str.split(delimiter) + key = key.strip() + value = value.strip() + return key, value diff --git a/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py new file mode 100644 index 0000000..3754299 --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/monitoring_logging_agent_check.py @@ -0,0 +1,243 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Monitoring & Logging Agents Related Checks +""" + +import ast + +from tools.kube_utils import kube_curl +from tools.result_api import rfile +from .store_result import store_result + + +def prometheus_check(): + """ + Check health of Prometheus + """ + username = "prometheus" + password = "password123" + service = "prom-metrics" + namespace = "osh-infra" + + health = "fail" #default + res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/healthy') + if 'Prometheus is Healthy' in res: + health = "pass" + + readiness = "fail" #default + res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/-/ready') + if 'Prometheus is Ready' in res: + readiness = "pass" + + if health == "pass" and readiness == "pass": + state = "pass" + else: + state = "fail" + + result = {'category': 'platform', + 'case_name': 'prometheus_check', + 'criteria': state, + 'details': {'health': health, 'readiness': readiness} + } + + store_result(result) + return result + + + +def grafana_check(): + """ + Check health of Grafana + """ + username = "grafana" + password = "password123" + service = "grafana-dashboard" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\ + "-o", "/dev/null", "-u", \ + f'{username}:{password}', \ + f'{service}.{namespace}:3000/api/health') + if res == '200': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'grafana_check', + 'criteria': state + } + + store_result(result) + return result + + +def prometheus_alert_manager_check(): + """ + Check health of Alert Manager + """ + service = "alerts-engine" + namespace = "osh-infra" + + health = "fail" #default + res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/healthy') + if 'Prometheus is Healthy' in res: + health = "pass" + + readiness = "fail" #default + res = kube_curl("-sL", "-m", "3", f'{service}.{namespace}:9093/-/ready') + if 'Prometheus is Ready' in res: + readiness = "pass" + + if health == "pass" and readiness == "pass": + state = "pass" + else: + state = "fail" + + result = {'category': 'platform', + 'case_name': 'prometheus_alert_manager_check', + 'criteria': state, + 'details': {'health': health, 'readiness': readiness} + } + + + store_result(result) + return result + + +def elasticsearch_check(): + """ + Check health of Elasticsearch cluster + """ + username = "elasticsearch" + password = "password123" + service = "elasticsearch" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/_cluster/health') + + if res == '': + res = 'Elasticsearch not reachable' + else: + res = ast.literal_eval(res) + if res['status'] == 'green': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'elasticsearch_check', + 'criteria': state, + 'details': res + } + + store_result(result) + return result + + +def kibana_check(): + """ + Check health of Kibana + """ + username = "elasticsearch" + password = "password123" + service = "kibana-dash" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-u", f'{username}:{password}', f'{service}.{namespace}/api/status') + + if res == '': + res = 'kibana not reachable' + else: + res = ast.literal_eval(res) + if res['status']['overall']['state'] == 'green': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'kibana_check', + 'criteria': state, + 'details': rfile(str(res)) + } + + store_result(result) + return result + + +def nagios_check(): + """ + Check health of Nagios + """ + username = "nagios" + password = "password123" + service = "nagios-metrics" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}",\ + "-o", "/dev/null", "-u", \ + f'{username}:{password}', \ + f'{service}.{namespace}') + if res == '200': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'nagios_check', + 'criteria': state + } + + store_result(result) + return result + + +def elasticsearch_exporter_check(): + """ + Check health of Elasticsearch Exporter + """ + service = "elasticsearch-exporter" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9108/metrics') + if res == '200': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'elasticsearch_exporter_check', + 'criteria': state + } + + store_result(result) + return result + + +def fluentd_exporter_check(): + """ + Check health of Fluentd Exporter + """ + service = "fluentd-exporter" + namespace = "osh-infra" + + state = "fail" #default + res = kube_curl("-sL", "-m", "3", "-w", "%{http_code}", "-o", "/dev/null", f'{service}.{namespace}:9309/metrics') + if res == '200': + state = "pass" + + result = {'category': 'platform', + 'case_name': 'fluentd_exporter_check', + 'criteria': state + } + + store_result(result) + return result diff --git a/sdv/docker/sdvstate/validator/airship/network_check.py b/sdv/docker/sdvstate/validator/airship/network_check.py new file mode 100644 index 0000000..bddf579 --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/network_check.py @@ -0,0 +1,114 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Network Related Checks +""" + + +import configparser + +from tools.conf import settings +from tools.kube_utils import kube_exec, get_pod_with_labels + +from .store_result import store_result + + +def physical_network_check(): + """ + physical_network_check + """ + ml2_config = neutron_ml2_config() + + physical_networks = settings.getValue('pdf_file')['physical_networks'] + + type_drivers = ml2_config.get('ml2', 'type_drivers').split(',') + + flat_networks = ml2_config.get('ml2_type_flat', 'flat_networks').split(',') + + vlan_networks = [] + network_vlan_ranges = ml2_config.get('ml2_type_vlan', 'network_vlan_ranges').split(',') + for network in network_vlan_ranges: + vlan_networks.append(network.split(':')[0]) + + result = {'category': 'network', + 'case_name': 'physical_network_check', + 'criteria': 'pass', + 'details': [] + } + + for physnet in physical_networks: + + res = {'network_name': physnet['name'], + 'type': physnet['type'], + 'criteria': 'fail' + } + + if physnet['type'] in type_drivers: + if physnet['type'] == 'flat': + if physnet['name'] in flat_networks or '*' in flat_networks: + res['criteria'] = 'pass' + else: + res['details'] = 'physical network name not found' + if physnet['type'] == 'vlan': + if physnet['name'] in vlan_networks: + res['criteria'] = 'pass' + else: + res['details'] = 'physical network name not found' + else: + res['details'] = 'physical network type not found' + + result['details'].append(res) + if res['criteria'] == 'fail': + result['criteria'] = 'fail' + + store_result(result) + return result + + + +def neutron_ml2_config(): + """ + Returns parsed ml2 config from neutron + """ + ovs = get_pod_with_labels("application=neutron,component=neutron-ovs-agent") + sriov = get_pod_with_labels("application=neutron,component=neutron-sriov-agent") + + confs = get_neutron_ml2_conf_from_pod(ovs) + confs.extend(get_neutron_ml2_conf_from_pod(sriov)) + + config = configparser.ConfigParser() + for conf in confs: + config.read_string(conf) + + return config + + + + +def get_neutron_ml2_conf_from_pod(pod): + """ + Reads ml2 config from neutron pod + """ + cmd = ['ls', '/etc/neutron/plugins/ml2/'] + response = kube_exec(pod, cmd) + files = response.rstrip("\n").split() + + response = [] + for filename in files: + cmd = ['cat', '/etc/neutron/plugins/ml2/' + filename] + conf = kube_exec(pod, cmd) + response.append(conf) + + return response diff --git a/sdv/docker/sdvstate/validator/airship/pod_health_check.py b/sdv/docker/sdvstate/validator/airship/pod_health_check.py index 34a6747..0093ffc 100644 --- a/sdv/docker/sdvstate/validator/airship/pod_health_check.py +++ b/sdv/docker/sdvstate/validator/airship/pod_health_check.py @@ -13,13 +13,19 @@ # limitations under the License. +""" +Pod Health Checks +""" + + import logging -from kubernetes import client, config from tools.kube_utils import kube_api from tools.conf import settings -from tools.result_api import result_api, rfile +from tools.result_api import rfile + +from .store_result import store_result @@ -29,13 +35,25 @@ def pod_health_check(): """ api = kube_api() namespace_list = settings.getValue('airship_namespace_list') + + result = {'category': 'platform', + 'case_name': 'pod_health_check', + 'criteria': 'pass', + 'details': [] + } + for namespace in namespace_list: pod_list = api.list_namespaced_pod(namespace) for pod in pod_list.items: - result = pod_status(pod) - if result['state'] == 'fail': - result['logs'] = get_logs(pod) - result_api.store(result) + pod_stats = pod_status(pod) + if pod_stats['criteria'] == 'fail': + pod_stats['logs'] = get_logs(pod) + result['criteria'] = 'fail' + result['details'].append(pod_stats) + + + store_result(result) + return result @@ -43,14 +61,13 @@ def pod_status(pod): """ Check health of a pod and returns it's status as result """ - result = {'state': 'ok', - 'kind': 'pod', + result = {'criteria': 'pass', 'name': pod.metadata.name, 'namespace': pod.metadata.namespace, 'node': pod.spec.node_name} if pod.status.container_statuses is None: - result['state'] = 'fail' + result['criteria'] = 'fail' result['pod_details'] = rfile(str(pod)) else: for container in pod.status.container_statuses: @@ -62,14 +79,14 @@ def pod_status(pod): status = container.state.waiting.reason if status not in ('Running', 'Completed'): - result['state'] = 'fail' + result['criteria'] = 'fail' result['pod_details'] = rfile(str(pod)) - info = f'[Health: {result["state"]}] Name: {result["name"]}, ' + info = f'[Health: {result["criteria"]}] Name: {result["name"]}, ' info = info + f'Namespace: {result["namespace"]}, Node: {result["node"]}' logger = logging.getLogger(__name__) - logger.info(info) + logger.debug(info) return result diff --git a/sdv/docker/sdvstate/validator/airship/store_result.py b/sdv/docker/sdvstate/validator/airship/store_result.py new file mode 100644 index 0000000..52f4e10 --- /dev/null +++ b/sdv/docker/sdvstate/validator/airship/store_result.py @@ -0,0 +1,28 @@ +# Copyright 2020 University Of Delhi. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +store_result function to log and store result +""" +import logging +from tools.result_api import result_api + +def store_result(result): + """ + Logs and stores result + """ + logger = logging.getLogger(__name__) + logger.info(f'[State: {result["criteria"]}] {result["case_name"]}') + + result_api.store(result) diff --git a/sdv/docker/sdvurls/Dockerfile b/sdv/docker/sdvurls/Dockerfile new file mode 100644 index 0000000..e6d447a --- /dev/null +++ b/sdv/docker/sdvurls/Dockerfile @@ -0,0 +1,10 @@ +FROM python:3.8-slim-buster + +WORKDIR /sdvurls/ + +COPY requirements.txt /state/requirements.txt +RUN pip install -r requirements.txt + +COPY server /sdvurls/ + +CMD [ "python", "/sdvurls/server" ] diff --git a/sdv/docker/sdvurls/requirements.txt b/sdv/docker/sdvurls/requirements.txt new file mode 100644 index 0000000..c38d2e9 --- /dev/null +++ b/sdv/docker/sdvurls/requirements.txt @@ -0,0 +1,13 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +# Copyright (c) 2020 Spirent Communications +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +tornado == 6.0.4 +urllib3 # MIT +GitPython;python_version>='3.0' # BSD License (3 clause) diff --git a/sdv/docker/sdvurls/server b/sdv/docker/sdvurls/server new file mode 100644 index 0000000..8d3ec7a --- /dev/null +++ b/sdv/docker/sdvurls/server @@ -0,0 +1,281 @@ +# Copyright 2020 Spirent Communications. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Airship implementation of Software Predeployment Validation +""" + +import os +import shutil +from pathlib import Path +import logging +import json +import git +import urllib3 + +from tornado.web import Application +from tornado.ioloop import IOLoop +import tornado.concurrent +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web +import tornado.log + + +def check_link(link): + """ + Function the check the availability of Hyperlinks + """ + timeout = urllib3.util.Timeout(connect=5.0, read=7.0) + http = urllib3.PoolManager(timeout=timeout) + try: + http.request('HEAD', link) + except urllib3.exceptions.LocationValueError as err: + print(err.args) + return False + except urllib3.exceptions.MaxRetryError as err: + print(err.args) + return False + except urllib3.exceptions.RequestError as err: + print(err.args) + return False + except urllib3.exceptions.ConnectTimeoutError as err: + print(err.args) + return False + except urllib3.exceptions.PoolError as err: + print(err.args) + return False + except urllib3.exceptions.HTTPError as err: + print(err.args) + return False + return True + + +class Airship(): + """ + Ariship URLS Validation + """ + def __init__(self, params): + """ Airship class constructor """ + self.url = params['AIRSHIP_MANIFEST_URL'] + self.branch = params['AIRSHIP_MANIFEST_BRANCH'] + self.dl_path = '/tmp' + self.site_name = params['AIRSHIP_MANIFEST_SITE_NAME'] + self.tmversion = params['AIRSHIP_TREASUREMAP_VERSION'] + self.manifest = None + self.dirpath = Path(self.dl_path, 'airship') + self.tmdirpath = Path(self.dl_path, 'treasuremap') + self.locations = [] + self.validcount = 0 + self.invalidcount = 0 + self.respath = os.path.join(self.dl_path, ('urls-' + + self.site_name + + '-check.txt')) + + def clone_repo(self): + """ + Cloning the repos + """ + git.Repo.clone_from(self.url, + self.dirpath, + branch=self.branch) + git.Repo.clone_from('https://github.com/airshipit/treasuremap', + self.tmdirpath, + branch=self.tmversion) + + def cleanup_manifest(self): + """ + Remove existing manifests + """ + # Next Remove any manifest files, if it exists + if self.dirpath.exists() and self.dirpath.is_dir(): + shutil.rmtree(self.dirpath) + if self.tmdirpath.exists() and self.tmdirpath.is_dir(): + shutil.rmtree(self.tmdirpath) + + def manifest_exists_locally(self): + """ + Check if manifests exists locally + """ + if self.dirpath.exists() and self.dirpath.is_dir(): + return True + return False + + def validate(self): + """ + Hyperlink Validation + """ + self.cleanup_manifest() + # Next, clone the repo to the provided path. + self.clone_repo() + + if self.dirpath.exists() and self.dirpath.is_dir(): + # Get the file(s) where links are defined. + self.find_locations( + os.path.join(self.dirpath, 'type', + 'cntt', 'software', + 'config', 'versions.yaml')) + self.find_locations( + os.path.join(self.tmdirpath, 'global', + 'software', 'config', 'versions.yaml')) + + with open(self.respath, "w+") as report: + for location in self.locations: + if check_link(location): + report.write("The Link: %s is VALID" % (location)) + self.validcount += 1 + else: + self.invalidcount += 1 + report.write("The Link: %s is INVALID" % (location)) + self.cleanup_manifest() + + def getresults(self): + """ + Return Valid and Invalid Counts + """ + return(self.validcount, self.invalidcount) + + # pylint: disable=consider-using-enumerate + def find_locations(self, yamlfile): + """ + Find all the hyperlinks in the manifests + """ + with open(yamlfile, 'r') as filep: + lines = filep.readlines() + for index in range(len(lines)): + line = lines[index].strip() + if line.startswith('location:'): + link = line.split(":", 1)[1] + if "opendev" in link: + if ((len(lines) > index+1) and + (lines[index+1].strip().startswith( + 'reference:'))): + ref = lines[index+1].split(":", 1)[1] + link = link + '/commit/' + ref.strip() + if link.strip() not in self.locations: + print(link) + self.locations.append(link.strip()) + if 'docker.' in line: + link = line.split(":", 1)[1] + link = link.replace('"', '') + parts = link.split('/') + if len(parts) == 3: + link = ('https://index.' + + parts[0].strip() + + '/v1/repositories/' + + parts[1] + '/' + parts[2].split(':')[0] + + '/tags/' + parts[2].split(':')[-1]) + if link.strip() not in self.locations: + print(link) + self.locations.append(link.strip()) + # quay.io/coreos/etcd:v3.4.2 + # https://quay.io/api/v1/repository/coreos/etcd/tag/v3.4.2 + if 'quay.' in line: + link = line.split(":", 1)[1] + link = link.replace('"', '') + parts = link.split('/') + if len(parts) == 3: + link = ('https://' + + parts[0].strip() + + '/api/v1/repository/' + + parts[1] + '/' + parts[2].split(':')[0] + + '/tag/' + parts[2].split(':')[-1]) + if link.strip() not in self.locations: + print(link) + self.locations.append(link.strip()) + + +# pylint: disable=W0223 +class AirshipUrlsValidator(tornado.web.RequestHandler): + """ Validate URLS """ + def set_default_headers(self): + """ set default headers""" + self.set_header('Content-Type', 'application/json') + + def post(self): + """ + POST request + usage: + /airship/?name='' installer='' link='' version='' + :return: logs from test results + """ + # decode the body + data = json.loads(self.request.body.decode()) + params = {} + branch = 'master' + installer = data['installer'] + name = data['name'] + link = data['link'] + version = data['version'] + if installer and 'airship' in installer.lower(): + if name and link and branch and version: + params['AIRSHIP_MANIFEST_URL'] = link + params['AIRSHIP_MANIFEST_BRANCH'] = branch + params['AIRSHIP_MANIFEST_SITE_NAME'] = name + params['AIRSHIP_TREASUREMAP_VERSION'] = version + airship = Airship(params) + airship.validate() + valid, invalid = airship.getresults() + self.write("Valid Links: " + + str(valid) + + " Invalid Links: " + + str(invalid)) + + +# pylint: disable=W0223 +class TripleoUrlsValidator(tornado.web.RequestHandler): + """ Validate URLS """ + + def post(self): + """ + POST request + """ + self.write('error: Not Implemented') + + +def main(): + """ The Main Control """ + app = Application([('/airship', AirshipUrlsValidator), + ('/tripleo', TripleoUrlsValidator)]) + + # Cli Config + tornado.options.define("port", default=8989, + help="running on the given port", type=int) + tornado.options.parse_command_line() + + # Server Config + http_server = tornado.httpserver.HTTPServer(app) + http_server.listen(tornado.options.options.port) + + # Tornado's event loop handles it from here + print("# Server Listening.... \n [Ctrl + C] to quit") + + # Logging + log_file_filename = "/var/log/tornado.log" + handler = logging.FileHandler(log_file_filename) + app_log = logging.getLogger("tornado.general") + tornado.log.enable_pretty_logging() + app_log.addHandler(handler) + + try: + tornado.ioloop.IOLoop.instance().start() + except KeyboardInterrupt: + tornado.ioloop.IOLoop.instance().stop() + + # start + IOLoop.instance().start() + +if __name__ == "__main__": + main() |