aboutsummaryrefslogtreecommitdiffstats
path: root/app/discover
diff options
context:
space:
mode:
authorYaron Yogev <yaronyogev@gmail.com>2017-07-27 09:02:54 +0300
committerYaron Yogev <yaronyogev@gmail.com>2017-07-27 14:56:25 +0300
commit7e83d0876ddb84a45e130eeba28bc40ef53c074b (patch)
tree47d76239ae7658d87c66abd142df92709427e7dd /app/discover
parent378ecbd8947589b9cbb39013a0c2e2aa201e03bd (diff)
Calipso initial release for OPNFV
Change-Id: I7210c244b0c10fa80bfa8c77cb86c9d6ddf8bc88 Signed-off-by: Yaron Yogev <yaronyogev@gmail.com>
Diffstat (limited to 'app/discover')
-rw-r--r--app/discover/__init__.py10
-rw-r--r--app/discover/clique_finder.py174
-rw-r--r--app/discover/configuration.py70
-rw-r--r--app/discover/event_handler.py45
-rw-r--r--app/discover/event_manager.py265
-rw-r--r--app/discover/events/__init__.py10
-rw-r--r--app/discover/events/event_base.py36
-rw-r--r--app/discover/events/event_delete_base.py60
-rw-r--r--app/discover/events/event_instance_add.py45
-rw-r--r--app/discover/events/event_instance_delete.py18
-rw-r--r--app/discover/events/event_instance_update.py55
-rw-r--r--app/discover/events/event_interface_add.py139
-rw-r--r--app/discover/events/event_interface_delete.py40
-rw-r--r--app/discover/events/event_metadata_parser.py75
-rw-r--r--app/discover/events/event_network_add.py50
-rw-r--r--app/discover/events/event_network_delete.py17
-rw-r--r--app/discover/events/event_network_update.py44
-rw-r--r--app/discover/events/event_port_add.py309
-rw-r--r--app/discover/events/event_port_delete.py80
-rw-r--r--app/discover/events/event_port_update.py38
-rw-r--r--app/discover/events/event_router_add.py123
-rw-r--r--app/discover/events/event_router_delete.py37
-rw-r--r--app/discover/events/event_router_update.py82
-rw-r--r--app/discover/events/event_subnet_add.py154
-rw-r--r--app/discover/events/event_subnet_delete.py57
-rw-r--r--app/discover/events/event_subnet_update.py102
-rw-r--r--app/discover/events/listeners/__init__.py10
-rwxr-xr-xapp/discover/events/listeners/default_listener.py314
-rw-r--r--app/discover/events/listeners/listener_base.py18
-rw-r--r--app/discover/fetch_host_object_types.py37
-rw-r--r--app/discover/fetch_region_object_types.py37
-rw-r--r--app/discover/fetcher.py35
-rw-r--r--app/discover/fetcher_new.py30
-rw-r--r--app/discover/fetchers/__init__.py9
-rw-r--r--app/discover/fetchers/aci/__init__.py9
-rw-r--r--app/discover/fetchers/aci/aci_access.py200
-rw-r--r--app/discover/fetchers/aci/aci_fetch_switch_pnic.py91
-rw-r--r--app/discover/fetchers/api/__init__.py9
-rw-r--r--app/discover/fetchers/api/api_access.py195
-rw-r--r--app/discover/fetchers/api/api_fetch_availability_zones.py56
-rw-r--r--app/discover/fetchers/api/api_fetch_end_points.py35
-rw-r--r--app/discover/fetchers/api/api_fetch_host_instances.py59
-rw-r--r--app/discover/fetchers/api/api_fetch_network.py76
-rw-r--r--app/discover/fetchers/api/api_fetch_networks.py86
-rw-r--r--app/discover/fetchers/api/api_fetch_port.py60
-rw-r--r--app/discover/fetchers/api/api_fetch_ports.py55
-rw-r--r--app/discover/fetchers/api/api_fetch_project_hosts.py144
-rw-r--r--app/discover/fetchers/api/api_fetch_projects.py66
-rw-r--r--app/discover/fetchers/api/api_fetch_regions.py51
-rw-r--r--app/discover/fetchers/cli/__init__.py9
-rw-r--r--app/discover/fetchers/cli/cli_access.py206
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py122
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py44
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservice.py80
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservices.py27
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics.py22
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py68
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py18
-rw-r--r--app/discover/fetchers/cli/cli_fetch_oteps_lxb.py86
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors.py40
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py35
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py56
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py64
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vpp_vedges.py58
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py140
-rw-r--r--app/discover/fetchers/db/__init__.py9
-rw-r--r--app/discover/fetchers/db/db_access.py142
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregate_hosts.py36
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregates.py21
-rw-r--r--app/discover/fetchers/db/db_fetch_availability_zones.py22
-rw-r--r--app/discover/fetchers/db/db_fetch_az_network_hosts.py31
-rw-r--r--app/discover/fetchers/db/db_fetch_host_instances.py15
-rw-r--r--app/discover/fetchers/db/db_fetch_host_network_agents.py35
-rw-r--r--app/discover/fetchers/db/db_fetch_instances.py60
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py81
-rw-r--r--app/discover/fetchers/db/db_fetch_port.py34
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_ovs.py178
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_vpp.py56
-rw-r--r--app/discover/fetchers/folder_fetcher.py36
-rw-r--r--app/discover/find_links.py30
-rw-r--r--app/discover/find_links_for_instance_vnics.py59
-rw-r--r--app/discover/find_links_for_oteps.py85
-rw-r--r--app/discover/find_links_for_pnics.py58
-rw-r--r--app/discover/find_links_for_vconnectors.py88
-rw-r--r--app/discover/find_links_for_vedges.py124
-rw-r--r--app/discover/find_links_for_vservice_vnics.py56
-rw-r--r--app/discover/manager.py45
-rw-r--r--app/discover/monitoring_mgr.py10
-rw-r--r--app/discover/network_agents_list.py23
-rw-r--r--app/discover/plugins/__init__.py10
-rwxr-xr-xapp/discover/scan.py324
-rw-r--r--app/discover/scan_error.py11
-rw-r--r--app/discover/scan_manager.py294
-rw-r--r--app/discover/scan_metadata_parser.py202
-rw-r--r--app/discover/scanner.py253
95 files changed, 7320 insertions, 0 deletions
diff --git a/app/discover/__init__.py b/app/discover/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py
new file mode 100644
index 0000000..9b5aad2
--- /dev/null
+++ b/app/discover/clique_finder.py
@@ -0,0 +1,174 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson.objectid import ObjectId
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliqueFinder(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inventory = self.inv.inventory_collection
+ self.links = self.inv.collections["links"]
+ self.clique_types = self.inv.collections["clique_types"]
+ self.clique_types_by_type = {}
+ self.clique_constraints = self.inv.collections["clique_constraints"]
+ self.cliques = self.inv.collections["cliques"]
+
+ def find_cliques_by_link(self, links_list):
+ return self.links.find({'links': {'$in': links_list}})
+
+ def find_links_by_source(self, db_id):
+ return self.links.find({'source': db_id})
+
+ def find_links_by_target(self, db_id):
+ return self.links.find({'target': db_id})
+
+ def find_cliques(self):
+ self.log.info("scanning for cliques")
+ clique_types = self.get_clique_types().values()
+ for clique_type in clique_types:
+ self.find_cliques_for_type(clique_type)
+ self.log.info("finished scanning for cliques")
+
+ def get_clique_types(self):
+ if not self.clique_types_by_type:
+ clique_types = self.clique_types.find({"environment": self.get_env()})
+ default_clique_types = \
+ self.clique_types.find({'environment': 'ANY'})
+ for clique_type in clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ self.clique_types_by_type[focal_point_type] = clique_type
+ # if some focal point type does not have an explicit definition in
+ # clique_types for this specific environment, use the default
+ # clique type definition with environment=ANY
+ for clique_type in default_clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ if focal_point_type not in clique_types:
+ self.clique_types_by_type[focal_point_type] = clique_type
+ return self.clique_types_by_type
+
+ def find_cliques_for_type(self, clique_type):
+ type = clique_type["focal_point_type"]
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ object_type = clique_type["focal_point_type"]
+ objects_for_focal_point_type = self.inventory.find({
+ "environment": self.get_env(),
+ "type": object_type
+ })
+ for o in objects_for_focal_point_type:
+ self.construct_clique_for_focal_point(o, clique_type, constraints)
+
+ def rebuild_clique(self, clique):
+ focal_point_db_id = clique['focal_point']
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ clique_types = self.get_clique_types()
+ o = self.inventory.find_one({'_id': focal_point_db_id})
+ clique_type = clique_types[o['type']]
+ new_clique = self.construct_clique_for_focal_point(o, clique_type, constraints)
+ if not new_clique:
+ self.cliques.delete({'_id': clique['_id']})
+
+ def construct_clique_for_focal_point(self, o, clique_type, constraints):
+ # keep a hash of nodes in clique that were visited for each type
+ # start from the focal point
+ nodes_of_type = {o["type"]: {str(o["_id"]): 1}}
+ clique = {
+ "environment": self.env,
+ "focal_point": o["_id"],
+ "focal_point_type": o["type"],
+ "links": [],
+ "links_detailed": [],
+ "constraints": {}
+ }
+ for c in constraints:
+ val = o[c] if c in o else None
+ clique["constraints"][c] = val
+ for link_type in clique_type["link_types"]:
+ # check if it's backwards
+ link_type_parts = link_type.split('-')
+ link_type_parts.reverse()
+ link_type_reversed = '-'.join(link_type_parts)
+ matches = self.links.find_one({
+ "environment": self.env,
+ "link_type": link_type_reversed
+ })
+ reversed = True if matches else False
+ if reversed:
+ link_type = link_type_reversed
+ from_type = link_type[:link_type.index("-")]
+ to_type = link_type[link_type.index("-") + 1:]
+ side_to_match = 'target' if reversed else 'source'
+ other_side = 'target' if not reversed else 'source'
+ match_type = to_type if reversed else from_type
+ if match_type not in nodes_of_type.keys():
+ continue
+ other_side_type = to_type if not reversed else from_type
+ for match_point in nodes_of_type[match_type].keys():
+ matches = self.links.find({
+ "environment": self.env,
+ "link_type": link_type,
+ side_to_match: ObjectId(match_point)
+ })
+ for link in matches:
+ id = link["_id"]
+ if id in clique["links"]:
+ continue
+ if not self.check_constraints(clique, link):
+ continue
+ clique["links"].append(id)
+ clique["links_detailed"].append(link)
+ other_side_point = str(link[other_side])
+ if other_side_type not in nodes_of_type:
+ nodes_of_type[other_side_type] = {}
+ nodes_of_type[other_side_type][other_side_point] = 1
+
+ # after adding the links to the clique, create/update the clique
+ if not clique["links"]:
+ return None
+ focal_point_obj = self.inventory.find({"_id": clique["focal_point"]})
+ if not focal_point_obj:
+ return None
+ focal_point_obj = focal_point_obj[0]
+ focal_point_obj["clique"] = True
+ focal_point_obj.pop("_id", None)
+ self.cliques.update_one(
+ {
+ "environment": self.get_env(),
+ "focal_point": clique["focal_point"]
+ },
+ {'$set': clique},
+ upsert=True)
+ clique_document = self.inventory.update_one(
+ {"_id": clique["focal_point"]},
+ {'$set': focal_point_obj},
+ upsert=True)
+ return clique_document
+
+ def check_constraints(self, clique, link):
+ if "attributes" not in link:
+ return True
+ attributes = link["attributes"]
+ constraints = clique["constraints"]
+ for c in constraints:
+ if c not in attributes:
+ continue # constraint not applicable to this link
+ constr_values = constraints[c]
+ link_val = attributes[c]
+ if isinstance(constr_values, list):
+ if link_val not in constr_values:
+ return False
+ elif link_val != constraints[c]:
+ return False
+ return True
diff --git a/app/discover/configuration.py b/app/discover/configuration.py
new file mode 100644
index 0000000..c7bc0c0
--- /dev/null
+++ b/app/discover/configuration.py
@@ -0,0 +1,70 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.singleton import Singleton
+
+
+class Configuration(metaclass=Singleton):
+ def __init__(self, environments_collection="environments_config"):
+ super().__init__()
+ self.db_client = MongoAccess()
+ self.db = MongoAccess.db
+ self.inv = InventoryMgr()
+ self.collection = self.inv.collections.get(environments_collection)
+ self.env_name = None
+ self.environment = None
+ self.configuration = None
+ self.log = FullLogger()
+
+ def use_env(self, env_name):
+ self.log.info("Configuration taken from environment: {}".format(env_name))
+ self.env_name = env_name
+
+ envs = self.collection.find({"name": env_name})
+ if envs.count() == 0:
+ raise ValueError("use_env: could not find matching environment")
+ if envs.count() > 1:
+ raise ValueError("use_env: found multiple matching environments")
+
+ self.environment = envs[0]
+ self.configuration = self.environment["configuration"]
+
+ def get_env_config(self):
+ return self.environment
+
+ def get_configuration(self):
+ return self.configuration
+
+ def get_env_name(self):
+ return self.env_name
+
+ def update_env(self, values):
+ self.collection.update_one({"name": self.env_name},
+ {'$set': MongoAccess.encode_mongo_keys(values)})
+
+ def get(self, component):
+ try:
+ matches = [c for c in self.configuration if c["name"] == component]
+ except AttributeError:
+ raise ValueError("Configuration: environment not set")
+ if len(matches) == 0:
+ raise IndexError("No matches for configuration component: " + component)
+ if len(matches) > 1:
+ raise IndexError("Found multiple matches for configuration component: " + component)
+ return matches[0]
+
+ def has_network_plugin(self, name):
+ if 'mechanism_drivers' not in self.environment:
+ self.log.error('Environment missing mechanism_drivers definition: ' +
+ self.environment['name'])
+ mechanism_drivers = self.environment['mechanism_drivers']
+ return name in mechanism_drivers
diff --git a/app/discover/event_handler.py b/app/discover/event_handler.py
new file mode 100644
index 0000000..12199f8
--- /dev/null
+++ b/app/discover/event_handler.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.util import ClassResolver
+
+
+class EventHandler:
+
+ def __init__(self, env: str, inventory_collection: str):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ self.env = env
+ self.log = FullLogger(env=env)
+ self.handlers = {}
+
+ def discover_handlers(self, handlers_package: str, event_handlers: dict):
+ if not event_handlers:
+ raise TypeError("Event handlers list is empty")
+
+ for event_name, handler_name in event_handlers.items():
+ handler = ClassResolver.get_instance_of_class(handler_name, handlers_package)
+ if not issubclass(handler.__class__, EventBase):
+ raise TypeError("Event handler '{}' is not a subclass of EventBase"
+ .format(handler_name))
+ if event_name in self.handlers:
+ self.log.warning("A handler is already registered for event type '{}'. Overwriting"
+ .format(event_name))
+ self.handlers[event_name] = handler
+
+ def handle(self, event_name: str, notification: dict) -> EventResult:
+ if event_name not in self.handlers:
+ self.log.info("No handler is able to process event of type '{}'"
+ .format(event_name))
+ return self.handlers[event_name].handle(self.env, notification)
+
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
new file mode 100644
index 0000000..ce40ce4
--- /dev/null
+++ b/app/discover/event_manager.py
@@ -0,0 +1,265 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import signal
+import time
+from multiprocessing import Process, Manager as SharedManager
+
+import os
+
+from discover.events.listeners.default_listener import DefaultListener
+from discover.events.listeners.listener_base import ListenerBase
+from discover.manager import Manager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+
+
+class EventManager(Manager):
+
+ # After EventManager receives a SIGTERM,
+ # it will try to terminate all listeners.
+ # After this delay, a SIGKILL will be sent
+ # to each listener that is still alive.
+ SIGKILL_DELAY = 5 # in seconds
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "collection": "environments_config",
+ "inventory": "inventory",
+ "interval": 5,
+ "loglevel": "INFO"
+ }
+
+ LISTENERS = {
+ 'Mirantis-6.0': DefaultListener,
+ 'Mirantis-7.0': DefaultListener,
+ 'Mirantis-8.0': DefaultListener,
+ 'RDO-Mitaka': DefaultListener,
+ 'RDO-Liberty': DefaultListener,
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.interval = None
+ self.processes = []
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=EventManager.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB server access details")
+ parser.add_argument("-c", "--collection", nargs="?", type=str,
+ default=EventManager.DEFAULTS["collection"],
+ help="Environments collection to read from "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["collection"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=EventManager.DEFAULTS["inventory"],
+ help="name of inventory collection "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["inventory"]))
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=EventManager.DEFAULTS["interval"],
+ help="Interval between collection polls "
+ "(must be more than {} seconds. Default: {})"
+ .format(EventManager.MIN_INTERVAL,
+ EventManager.DEFAULTS["interval"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=EventManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(EventManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.args.inventory)
+ self.collection = self.db_client.db[self.args.collection]
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started EventManager with following configuration:\n"
+ "Mongo config file path: {0}\n"
+ "Collection: {1}\n"
+ "Polling interval: {2} second(s)"
+ .format(self.args.mongo_config, self.collection.name, self.interval))
+
+ def get_listener(self, env: str):
+ env_config = self.inv.get_env_config(env)
+ return self.LISTENERS.get(env_config.get('distribution'))
+
+ def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
+ listener.listen({
+ 'env': env_name,
+ 'mongo_config': self.args.mongo_config,
+ 'inventory': self.args.inventory,
+ 'loglevel': self.args.loglevel,
+ 'environments_collection': self.args.collection,
+ 'process_vars': process_vars
+ })
+
+ def _get_alive_processes(self):
+ return [p for p in self.processes
+ if p['process'].is_alive()]
+
+ # Get all processes that should be terminated
+ def _get_stuck_processes(self, stopped_processes: list):
+ return [p for p in self._get_alive_processes()
+ if p.get("name") in map(lambda p: p.get("name"), stopped_processes)]
+
+ # Give processes time to finish and kill them if they are stuck
+ def _kill_stuck_processes(self, process_list: list):
+ if self._get_stuck_processes(process_list):
+ time.sleep(self.SIGKILL_DELAY)
+ for process in self._get_stuck_processes(process_list):
+ self.log.info("Killing event listener '{0}'".format(process.get("name")))
+ os.kill(process.get("process").pid, signal.SIGKILL)
+
+ def _get_operational(self, process: dict) -> OperationalStatus:
+ try:
+ return process.get("vars", {})\
+ .get("operational")
+ except:
+ self.log.error("Event listener '{0}' is unreachable".format(process.get("name")))
+ return OperationalStatus.STOPPED
+
+ def _update_operational_status(self, status: OperationalStatus):
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) == status]}},
+ {"$set": {"operational": status.value}}
+ )
+
+ def update_operational_statuses(self):
+ self._update_operational_status(OperationalStatus.RUNNING)
+ self._update_operational_status(OperationalStatus.ERROR)
+ self._update_operational_status(OperationalStatus.STOPPED)
+
+ def cleanup_processes(self):
+ # Query for envs that are no longer eligible for listening
+ # (scanned == false and/or listen == false)
+ dropped_envs = [env['name']
+ for env
+ in self.collection
+ .find(filter={'$or': [{'scanned': False},
+ {'listen': False}]},
+ projection=['name'])]
+
+ live_processes = []
+ stopped_processes = []
+ # Drop already terminated processes
+ # and for all others perform filtering
+ for process in self._get_alive_processes():
+ # If env no longer qualifies for listening,
+ # stop the listener.
+ # Otherwise, keep the process
+ if process['name'] in dropped_envs:
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process['process'].terminate()
+ stopped_processes.append(process)
+ else:
+ live_processes.append(process)
+
+ self._kill_stuck_processes(stopped_processes)
+
+ # Update all 'operational' statuses
+ # for processes stopped on the previous step
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in stopped_processes]}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+ # Keep the living processes
+ self.processes = live_processes
+
+ def do_action(self):
+ try:
+ while True:
+ # Update "operational" field in db before removing dead processes
+ # so that we keep last statuses of env listeners before they were terminated
+ self.update_operational_statuses()
+
+ # Perform a cleanup that filters out all processes
+ # that are no longer eligible for listening
+ self.cleanup_processes()
+
+ envs = self.collection.find({'scanned': True, 'listen': True})
+
+ # Iterate over environments that don't have an event listener attached
+ for env in filter(lambda e: e['name'] not in
+ map(lambda process: process["name"], self.processes),
+ envs):
+ env_name = env['name']
+
+ if not self.inv.is_feature_supported(env_name, EnvironmentFeatures.LISTENING):
+ self.log.error("Listening is not supported for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ listener = self.get_listener(env_name)
+ if not listener:
+ self.log.error("No listener is defined for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ # A dict that is shared between event manager and newly created env listener
+ process_vars = SharedManager().dict()
+ p = Process(target=self.listen_to_events,
+ args=(listener, env_name, process_vars,),
+ name=env_name)
+ self.processes.append({"process": p, "name": env_name, "vars": process_vars})
+ self.log.info("Starting event listener '{0}'".format(env_name))
+ p.start()
+
+ # Make sure statuses are up-to-date before event manager goes to sleep
+ self.update_operational_statuses()
+ time.sleep(self.interval)
+ finally:
+ # Fetch operational statuses before terminating listeners.
+ # Shared variables won't be available after termination.
+ stopping_processes = [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) != OperationalStatus.ERROR]
+ self._update_operational_status(OperationalStatus.ERROR)
+
+ # Gracefully stop processes
+ for process in self._get_alive_processes():
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process.get("process").terminate()
+
+ # Kill all remaining processes
+ self._kill_stuck_processes(self.processes)
+
+ # Updating operational statuses for stopped processes
+ self.collection.update_many(
+ {"name": {"$in": stopping_processes}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+if __name__ == "__main__":
+ EventManager().run()
diff --git a/app/discover/events/__init__.py b/app/discover/events/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/event_base.py b/app/discover/events/event_base.py
new file mode 100644
index 0000000..6b3b290
--- /dev/null
+++ b/app/discover/events/event_base.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABC
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class EventResult:
+ def __init__(self,
+ result: bool, retry: bool = False, message: str = None,
+ related_object: str = None,
+ display_context: str = None):
+ self.result = result
+ self.retry = retry
+ self.message = message
+ self.related_object = related_object
+ self.display_context = display_context
+
+
+class EventBase(Fetcher, ABC):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def handle(self, env, values) -> EventResult:
+ pass
diff --git a/app/discover/events/event_delete_base.py b/app/discover/events/event_delete_base.py
new file mode 100644
index 0000000..1cf94c3
--- /dev/null
+++ b/app/discover/events/event_delete_base.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from bson.objectid import ObjectId
+
+from discover.clique_finder import CliqueFinder
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventDeleteBase(EventBase):
+
+ def delete_handler(self, env, object_id, object_type) -> EventResult:
+ item = self.inv.get_by_id(env, object_id)
+ if not item:
+ self.log.info('{0} document is not found, aborting {0} delete'.format(object_type))
+ return EventResult(result=False, retry=False)
+
+ db_id = ObjectId(item['_id'])
+ id_path = item['id_path'] + '/'
+
+ # remove related clique
+ clique_finder = CliqueFinder()
+ self.inv.delete('cliques', {'focal_point': db_id})
+
+ # keep related links to do rebuild of cliques using them
+ matched_links_source = clique_finder.find_links_by_source(db_id)
+ matched_links_target = clique_finder.find_links_by_target(db_id)
+
+ links_using_object = []
+ links_using_object.extend([l['_id'] for l in matched_links_source])
+ links_using_object.extend([l['_id'] for l in matched_links_target])
+
+ # find cliques using these links
+ if links_using_object:
+ matched_cliques = clique_finder.find_cliques_by_link(links_using_object)
+ # find cliques using these links and rebuild them
+ for clique in matched_cliques:
+ clique_finder.rebuild_clique(clique)
+
+ # remove all related links
+ self.inv.delete('links', {'source': db_id})
+ self.inv.delete('links', {'target': db_id})
+
+ # remove object itself
+ self.inv.delete('inventory', {'_id': db_id})
+
+ # remove children
+ regexp = re.compile('^' + id_path)
+ self.inv.delete('inventory', {'id_path': {'$regex': regexp}})
+ return EventResult(result=True,
+ related_object=object_id,
+ display_context=object_id)
diff --git a/app/discover/events/event_instance_add.py b/app/discover/events/event_instance_add.py
new file mode 100644
index 0000000..4dd2b20
--- /dev/null
+++ b/app/discover/events/event_instance_add.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.scanner import Scanner
+
+
+class EventInstanceAdd(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ instance_id = values['payload']['instance_id']
+ host_id = values['payload']['host']
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instances root not found, aborting instance add')
+ return EventResult(result=False, retry=True)
+
+ # scan instance
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan("ScanInstancesRoot", instances_root,
+ limit_to_child_id=instance_id,
+ limit_to_child_type='instance')
+ scanner.scan_from_queue()
+
+ # scan host
+ host = self.inv.get_by_id(env, host_id)
+ scanner.scan('ScanHost', host,
+ limit_to_child_type=['vconnectors_folder',
+ 'vedges_folder'])
+ scanner.scan_from_queue()
+ scanner.scan_links()
+ scanner.scan_cliques()
+
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_instance_delete.py b/app/discover/events/event_instance_delete.py
new file mode 100644
index 0000000..714d0c7
--- /dev/null
+++ b/app/discover/events/event_instance_delete.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventInstanceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ # find the corresponding object
+ instance_id = values['payload']['instance_id']
+ return self.delete_handler(env, instance_id, "instance")
diff --git a/app/discover/events/event_instance_update.py b/app/discover/events/event_instance_update.py
new file mode 100644
index 0000000..6231c30
--- /dev/null
+++ b/app/discover/events/event_instance_update.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_instance_add import EventInstanceAdd
+from discover.events.event_instance_delete import EventInstanceDelete
+
+
+class EventInstanceUpdate(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ payload = values['payload']
+ instance_id = payload['instance_id']
+ state = payload['state']
+ old_state = payload['old_state']
+
+ if state == 'building':
+ return EventResult(result=False, retry=False)
+
+ if state == 'active' and old_state == 'building':
+ return EventInstanceAdd().handle(env, values)
+
+ if state == 'deleted' and old_state == 'active':
+ return EventInstanceDelete().handle(env, values)
+
+ name = payload['display_name']
+ instance = self.inv.get_by_id(env, instance_id)
+ if not instance:
+ self.log.info('instance document not found, aborting instance update')
+ return EventResult(result=False, retry=True)
+
+ instance['name'] = name
+ instance['object_name'] = name
+ name_path = instance['name_path']
+ instance['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ if name_path != instance['name_path']:
+ self.inv.values_replace({
+ "environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": instance['name_path']}})
+ self.inv.set(instance)
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
new file mode 100644
index 0000000..a06ad14
--- /dev/null
+++ b/app/discover/events/event_interface_add.py
@@ -0,0 +1,139 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import time
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventInterfaceAdd(EventBase):
+
+ def __init__(self):
+ super().__init__()
+ self.delay = 2
+
+ def add_gateway_port(self, env, project, network_name, router_doc, host_id):
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_id = router_doc['id']
+ router = fetcher.get_vservice(host_id, router_id)
+ device_id = decode_router_id(router_id)
+ router_doc['gw_port_id'] = router['gw_port_id']
+
+ # add gateway port documents.
+ port_doc = EventSubnetAdd().add_port_document(env, router_doc['gw_port_id'], project_name=project)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=device_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if not ret:
+ time.sleep(self.delay)
+ self.log.info("Wait %s second, and then fetch vnic document again." % self.delay)
+ add_vnic_document()
+
+ def update_router(self, env, project, network_id, network_name, router_doc, host_id):
+ if router_doc:
+ if 'network' in router_doc:
+ if network_id not in router_doc['network']:
+ router_doc['network'].append(network_id)
+ else:
+ router_doc['network'] = [network_id]
+
+ # if gw_port_id is None, add gateway port first.
+ if not router_doc.get('gw_port_id'):
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ else:
+ # check the gateway port document, add it if document does not exist.
+ port = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ if not port:
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ self.inv.set(router_doc)
+ else:
+ self.log.info("router document not found, aborting interface adding")
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ project = values['_context_project_name']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ port_id = interface['port_id']
+ subnet_id = interface['subnet_id']
+ router_id = encode_router_id(host_id, interface['id'])
+
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting interface adding")
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+ network_id = network_document['id']
+
+ # add router-interface port document.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+ port_doc = EventSubnetAdd().add_port_document(env, port_id, network_name=network_name)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+ router_doc = self.inv.get_by_id(env, router_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=interface['id'],
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if ret is False:
+ # try it again to fetch vnic document, vnic will be created a little bit late before CLI fetch.
+ time.sleep(self.delay)
+ self.log.info("Wait {} seconds, and then fetch vnic document again.".format(self.delay))
+ add_vnic_document()
+
+ # update the router document: gw_port_id, network.
+ self.update_router(env, project, network_id, network_name, router_doc, host_id)
+
+ # update vservice-vnic, vnic-network,
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+
+ scanner.scan_cliques()
+ self.log.info("Finished router-interface added.")
+
+ return EventResult(result=True,
+ related_object=interface['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_interface_delete.py b/app/discover/events/event_interface_delete.py
new file mode 100644
index 0000000..b1df978
--- /dev/null
+++ b/app/discover/events/event_interface_delete.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.events.event_port_delete import EventPortDelete
+from utils.util import encode_router_id
+
+
+class EventInterfaceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ port_id = interface['port_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = encode_router_id(host_id, interface['id'])
+
+ # update router document
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Interface deleting handler: port document not found.")
+ return EventResult(result=False, retry=False)
+ network_id = port_doc['network_id']
+
+ router_doc = self.inv.get_by_id(env, router_id)
+ if router_doc and network_id in router_doc.get('network', []):
+ router_doc['network'].remove(network_id)
+ self.inv.set(router_doc)
+
+ # delete port document
+ result = EventPortDelete().delete_port(env, port_id)
+ result.related_object = interface['id']
+ result.display_context = network_id
+ return result
diff --git a/app/discover/events/event_metadata_parser.py b/app/discover/events/event_metadata_parser.py
new file mode 100644
index 0000000..5d09376
--- /dev/null
+++ b/app/discover/events/event_metadata_parser.py
@@ -0,0 +1,75 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from typing import List, Tuple
+
+from utils.metadata_parser import MetadataParser
+
+
+class EventMetadataParser(MetadataParser):
+
+ HANDLERS_PACKAGE = 'handlers_package'
+ QUEUES = 'queues'
+ EVENT_HANDLERS = 'event_handlers'
+
+ REQUIRED_EXPORTS = [HANDLERS_PACKAGE, EVENT_HANDLERS]
+
+ def __init__(self):
+ super().__init__()
+ self.handlers_package = None
+ self.queues = []
+ self.event_handlers = []
+
+ def get_required_fields(self) -> list:
+ return self.REQUIRED_EXPORTS
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+
+ package = metadata.get(self.HANDLERS_PACKAGE)
+ if not package or not isinstance(package, str):
+ self.add_error("Handlers package '{}' is invalid".format(package))
+
+ event_handlers = metadata.get(self.EVENT_HANDLERS)
+ if not event_handlers or not isinstance(event_handlers, dict):
+ self.add_error("Event handlers attribute is invalid or empty"
+ "(should be a non-empty dict)")
+
+ return len(self.errors) == 0
+
+ def _finalize_parsing(self, metadata):
+ handlers_package = metadata[self.HANDLERS_PACKAGE]
+ queues = metadata.get(self.QUEUES, None)
+ event_handlers = metadata[self.EVENT_HANDLERS]
+
+ # Convert variables to EventHandler-friendly format
+ self.handlers_package = handlers_package
+
+ try:
+ if queues and isinstance(queues, list):
+ self.queues = [{"queue": q["queue"],
+ "exchange": q["exchange"]}
+ for q in queues]
+ except KeyError:
+ self.add_error("Queues variable has invalid format")
+ return
+
+ self.event_handlers = event_handlers
+
+ def parse_metadata_file(self, file_path: str) -> dict:
+ metadata = super().parse_metadata_file(file_path)
+ self._finalize_parsing(metadata)
+ super().check_errors()
+ return metadata
+
+
+def parse_metadata_file(file_path: str):
+ parser = EventMetadataParser()
+ parser.parse_metadata_file(file_path)
+ return parser.handlers_package, parser.queues, parser.event_handlers
diff --git a/app/discover/events/event_network_add.py b/app/discover/events/event_network_add.py
new file mode 100644
index 0000000..41fafd4
--- /dev/null
+++ b/app/discover/events/event_network_add.py
@@ -0,0 +1,50 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkAdd(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if network_document:
+ self.log.info('network already existed, aborting network add')
+ return EventResult(result=False, retry=False)
+
+ # build network document for adding network
+ project_name = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ parent_id = project_id + '-networks'
+ network_name = network['name']
+
+ network['environment'] = env
+ network['type'] = 'network'
+ network['id_path'] = "/%s/%s-projects/%s/%s/%s" \
+ % (env, env, project_id, parent_id, network_id)
+ network['cidrs'] = []
+ network['subnet_ids'] = []
+ network['last_scanned'] = notification['timestamp']
+ network['name_path'] = "/%s/Projects/%s/Networks/%s" \
+ % (env, project_name, network_name)
+ network['network'] = network_id
+ network['object_name'] = network_name
+ network['parent_id'] = parent_id
+ network['parent_text'] = "Networks"
+ network['parent_type'] = "networks_folder"
+ network['project'] = project_name
+ network["show_in_tree"] = True
+ network['subnets'] = {}
+
+ self.inv.set(network)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_network_delete.py b/app/discover/events/event_network_delete.py
new file mode 100644
index 0000000..b3277da
--- /dev/null
+++ b/app/discover/events/event_network_delete.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventNetworkDelete(EventDeleteBase):
+
+ def handle(self, env, notification):
+ network_id = notification['payload']['network_id']
+ return self.delete_handler(env, network_id, "network")
diff --git a/app/discover/events/event_network_update.py b/app/discover/events/event_network_update.py
new file mode 100644
index 0000000..3e1432e
--- /dev/null
+++ b/app/discover/events/event_network_update.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkUpdate(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('Network document not found, aborting network update')
+ return EventResult(result=False, retry=True)
+
+ # update network document
+ name = network['name']
+ if name != network_document['name']:
+ network_document['name'] = name
+ network_document['object_name'] = name
+
+ name_path = network_document['name_path']
+ network_document['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ self.inv.values_replace({"environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": network_document['name_path']}})
+
+ network_document['admin_state_up'] = network['admin_state_up']
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_port_add.py b/app/discover/events/event_port_add.py
new file mode 100644
index 0000000..63a5e80
--- /dev/null
+++ b/app/discover/events/event_port_add.py
@@ -0,0 +1,309 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from discover.fetchers.cli.cli_fetch_instance_vnics import CliFetchInstanceVnics
+from discover.fetchers.cli.cli_fetch_instance_vnics_vpp import CliFetchInstanceVnicsVpp
+from discover.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.scanner import Scanner
+
+
+class EventPortAdd(EventBase):
+
+ def get_name_by_id(self, object_id):
+ item = self.inv.get_by_id(self.env, object_id)
+ if item:
+ return item['name']
+ return None
+
+ def add_port_document(self, env, project_name, project_id, network_name, network_id, port):
+ # add other data for port document
+ port['type'] = 'port'
+ port['environment'] = env
+
+ port['parent_id'] = port['network_id'] + '-ports'
+ port['parent_text'] = 'Ports'
+ port['parent_type'] = 'ports_folder'
+
+ port['name'] = port['mac_address']
+ port['object'] = port['name']
+ port['project'] = project_name
+
+ port['id_path'] = "{}/{}-projects/{}/{}-networks/{}/{}-ports/{}" \
+ .format(env, env,
+ project_id, project_id,
+ network_id, network_id, port['id'])
+ port['name_path'] = "/{}/Projects/{}/Networks/{}/Ports/{}" \
+ .format(env, project_name, network_name, port['id'])
+
+ port['show_in_tree'] = True
+ port['last_scanned'] = datetime.datetime.utcnow()
+ self.inv.set(port)
+ self.log.info("add port document for port: {}".format(port['id']))
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "{}/{}-projects/{}/{}-networks/{}/{}-ports/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/{}/Projects/{}/Networks/{}/Ports"
+ .format(env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+ self.inv.set(port_folder)
+ self.log.info("add ports_folder document for network: {}.".format(network_id))
+
+ def add_network_services_folder(self, env, project_id, network_id, network_name):
+ network_services_folder = {
+ "create_object": True,
+ "environment": env,
+ "id": network_id + "-network_services",
+ "id_path": "{}/{}-projects/{}/{}-networks/{}/{}-network_services/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "Network vServices",
+ "name_path": "/{}/Projects/{}/Networks/{}/Network vServices"
+ .format(env, project_id, network_name),
+ "object_name": "Network vServices",
+ "parent_id": network_id,
+ "parent_type": "network",
+ "show_in_tree": True,
+ "text": "Network vServices",
+ "type": "network_services_folder"
+ }
+ self.inv.set(network_services_folder)
+ self.log.info("add network services folder for network:{}".format(network_id))
+
+ def add_dhcp_document(self, env, host, network_id, network_name):
+ dhcp_document = {
+ "environment": env,
+ "host": host['id'],
+ "id": "qdhcp-" + network_id,
+ "id_path": "{}/{}-vservices/{}-vservices-dhcps/qdhcp-{}"
+ .format(host['id_path'], host['id'],
+ host['id'], network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "local_service_id": "qdhcp-" + network_id,
+ "name": "dhcp-" + network_name,
+ "name_path": host['name_path'] + "/Vservices/DHCP servers/dhcp-" + network_name,
+ "network": [network_id],
+ "object_name": "dhcp-" + network_name,
+ "parent_id": host['id'] + "-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp",
+ "show_in_tree": True,
+ "type": "vservice"
+ }
+ self.inv.set(dhcp_document)
+ self.log.info("add DHCP document for network: {}.".format(network_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnics_folder(self,
+ env, host,
+ object_id, network_name='',
+ object_type="dhcp", router_name=''):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ vnics_folder = {
+ "environment": env,
+ "id": "q{}-{}-vnics".format(object_type, object_id),
+ "id_path": "{}/{}-vservices/{}-vservices-{}s/q{}-{}/q{}-{}-vnics"
+ .format(host['id_path'], host['id'], host['id'],
+ object_type, object_type, object_id,
+ object_type, object_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "q{}-{}-vnics".format(object_type, object_id),
+ "name_path": "{}/Vservices/{}/{}/vNICs"
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1]),
+ "object_name": "vNICs",
+ "parent_id": "q{}-{}".format(object_type, object_id),
+ "parent_type": "vservice",
+ "show_in_tree": True,
+ "text": "vNICs",
+ "type": "vnics_folder"
+ }
+ self.inv.set(vnics_folder)
+ self.log.info("add vnics_folder document for q{}-{}-vnics"
+ .format(object_type, object_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnic_document(self,
+ env, host,
+ object_id, network_name='',
+ object_type='dhcp', router_name='',
+ mac_address=None):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ fetcher = CliFetchVserviceVnics()
+ fetcher.set_env(env)
+ namespace = 'q{}-{}'.format(object_type, object_id)
+ vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
+ if not vnic_documents:
+ self.log.info("Vnic document not found in namespace.")
+ return False
+
+ if mac_address is not None:
+ for doc in vnic_documents:
+ if doc['mac_address'] == mac_address:
+ # add a specific vnic document.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], object_type, namespace,
+ namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(mac_address))
+ return True
+
+ self.log.info("Can not find vnic document by mac_address: {}"
+ .format(mac_address))
+ return False
+ else:
+ for doc in vnic_documents:
+ # add all vnic documents.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}" \
+ .format(host['id_path'], host['id'],
+ host['id'], object_type,
+ namespace, namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(doc["mac_address"]))
+ return True
+
+ def handle_dhcp_device(self, env, notification, network_id, network_name, mac_address=None):
+ # add dhcp vservice document.
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ host = self.inv.get_by_id(env, host_id)
+
+ self.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ self.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic document.
+ self.add_vnic_document(env, host, network_id, network_name, mac_address=mac_address)
+
+ def handle(self, env, notification):
+ project = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ payload = notification['payload']
+ port = payload['port']
+ network_id = port['network_id']
+ network_name = self.get_name_by_id(network_id)
+ mac_address = port['mac_address']
+
+ # check ports folder document.
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("ports folder not found, add ports folder first.")
+ self.add_ports_folder(env, project_id, network_id, network_name)
+ self.add_port_document(env, project, project_id, network_name, network_id, port)
+
+ # update the port related documents.
+ if 'compute' in port['device_owner']:
+ # update the instance related document.
+ host_id = port['binding:host_id']
+ instance_id = port['device_id']
+ old_instance_doc = self.inv.get_by_id(env, instance_id)
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instance document not found, aborting port adding')
+ return EventResult(result=False, retry=True)
+
+ # update instance
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+
+ if instance:
+ old_instance_doc['network_info'] = instance['network_info']
+ old_instance_doc['network'] = instance['network']
+ if old_instance_doc.get('mac_address') is None:
+ old_instance_doc['mac_address'] = mac_address
+
+ self.inv.set(old_instance_doc)
+ self.log.info("update instance document")
+
+ # add vnic document.
+ if port['binding:vif_type'] == 'vpp':
+ vnic_fetcher = CliFetchInstanceVnicsVpp()
+ else:
+ # set ovs as default type.
+ vnic_fetcher = CliFetchInstanceVnics()
+
+ vnic_fetcher.set_env(env)
+ vnic_docs = vnic_fetcher.get(instance_id + '-')
+ vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
+
+ if vnic:
+ vnic['environment'] = env
+ vnic['type'] = 'vnic'
+ vnic['name_path'] = old_instance_doc['name_path'] + '/vNICs/' + vnic['name']
+ vnic['id_path'] = '{}/{}/{}'.format(old_instance_doc['id_path'],
+ old_instance_doc['id'],
+ vnic['name'])
+ self.inv.set(vnic)
+ self.log.info("add instance-vnic document, mac_address: {}"
+ .format(mac_address))
+
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [FindLinksForInstanceVnics(), FindLinksForVedges()]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.add_links()
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ port_document = self.inv.get_by_id(env, port['id'])
+ if not port_document:
+ self.log.error("Port {} failed to add".format(port['id']))
+ return EventResult(result=False, retry=True)
+
+ return EventResult(result=True,
+ related_object=port['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_port_delete.py b/app/discover/events/event_port_delete.py
new file mode 100644
index 0000000..1e55870
--- /dev/null
+++ b/app/discover/events/event_port_delete.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+
+
+class EventPortDelete(EventDeleteBase):
+
+ def delete_port(self, env, port_id):
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Port document not found, aborting port deleting.")
+ return EventResult(result=False, retry=False)
+
+ # if port is binding to a instance, instance document needs to be updated.
+ if 'compute' in port_doc['device_owner']:
+ self.log.info("update instance document to which port is binding.")
+ self.update_instance(env, port_doc)
+
+ # delete port document
+ self.inv.delete('inventory', {'id': port_id})
+
+ # delete vnic and related document
+ vnic_doc = self.inv.get_by_field(env, 'vnic', 'mac_address', port_doc['mac_address'], get_single=True)
+ if not vnic_doc:
+ self.log.info("Vnic document not found, aborting vnic deleting.")
+ return EventResult(result=False, retry=False)
+
+ result = self.delete_handler(env, vnic_doc['id'], 'vnic')
+ result.related_object = port_id
+ result.display_context = port_doc.get('network_id')
+ self.log.info('Finished port deleting')
+ return result
+
+ def update_instance(self, env, port_doc):
+ # update instance document if port
+ network_id = port_doc['network_id']
+ instance_doc = self.inv.get_by_field(env, 'instance', 'network_info.id', port_doc['id'], get_single=True)
+ if instance_doc:
+ port_num = 0
+
+ for port in instance_doc['network_info']:
+ if port['network']['id'] == network_id:
+ port_num += 1
+ if port['id'] == port_doc['id']:
+ instance_doc['network_info'].remove(port)
+ self.log.info("update network information of instance document.")
+
+ if port_num == 1:
+ # remove network information only when last port in network will be deleted.
+ instance_doc['network'].remove(network_id)
+
+ # update instance mac address.
+ if port_doc['mac_address'] == instance_doc['mac_address']:
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ host_id = port_doc['binding:host_id']
+ instance_id = port_doc['device_id']
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+ if instance:
+ if 'mac_address' not in instance:
+ instance_doc['mac_address'] = None
+ self.log.info("update mac_address:%s of instance document." % instance_doc['mac_address'])
+
+ self.inv.set(instance_doc)
+ else:
+ self.log.info("No instance document binding to network:%s." % network_id)
+
+ def handle(self, env, notification):
+ port_id = notification['payload']['port_id']
+ return self.delete_port(env, port_id)
diff --git a/app/discover/events/event_port_update.py b/app/discover/events/event_port_update.py
new file mode 100644
index 0000000..298b565
--- /dev/null
+++ b/app/discover/events/event_port_update.py
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventPortUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check port document.
+ port = notification['payload']['port']
+ port_id = port['id']
+ port_document = self.inv.get_by_id(env, port_id)
+ if not port_document:
+ self.log.info('port document does not exist, aborting port update')
+ return EventResult(result=False, retry=True)
+
+ # build port document
+ port_document['name'] = port['name']
+ port_document['admin_state_up'] = port['admin_state_up']
+ if port_document['admin_state_up']:
+ port_document['status'] = 'ACTIVE'
+ else:
+ port_document['status'] = 'DOWN'
+
+ port_document['binding:vnic_type'] = port['binding:vnic_type']
+
+ # update port document.
+ self.inv.set(port_document)
+ return EventResult(result=True,
+ related_object=port_id,
+ display_context=port_document.get('network_id'))
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
new file mode 100644
index 0000000..20e07e5
--- /dev/null
+++ b/app/discover/events/event_router_add.py
@@ -0,0 +1,123 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventRouterAdd(EventBase):
+
+ def add_router_document(self, env, network_id, router_doc, host):
+ router_doc["environment"] = env
+ router_doc["id_path"] = "{}/{}-vservices/{}-vservices-routers/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], router_doc['id'])
+ router_doc['last_scanned'] = datetime.datetime.utcnow()
+ router_doc['name_path'] = "{}/Vservices/Gateways/{}"\
+ .format(host['name_path'],
+ router_doc['name'])
+ router_doc['network'] = []
+ if network_id:
+ router_doc['network'] = [network_id]
+
+ router_doc['object_name'] = router_doc['name']
+ router_doc['parent_id'] = host['id'] + "-vservices-routers"
+ router_doc['show_in_tree'] = True
+ router_doc['type'] = "vservice"
+
+ self.inv.set(router_doc)
+
+ def add_children_documents(self, env, project_id, network_id, host, router_doc):
+
+ network_document = self.inv.get_by_id(env, network_id)
+ network_name = network_document['name']
+ router_id = decode_router_id(router_doc['id'])
+
+ # add port for binding to vservice:router
+ subnet_handler = EventSubnetAdd()
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("Ports_folder not found.")
+ subnet_handler.add_ports_folder(env, project_id, network_id, network_name)
+ add_port_return = subnet_handler.add_port_document(env,
+ router_doc['gw_port_id'],
+ network_name=network_name)
+
+ # add vnics folder and vnic document
+ port_handler = EventPortAdd()
+ add_vnic_folder = partial(port_handler.add_vnics_folder,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+ add_vnic_document = partial(port_handler.add_vnic_document,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+
+ add_vnic_folder()
+ if add_port_return:
+ add_vnic_return = add_vnic_document()
+ if not add_vnic_return:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+ else:
+ # in some cases, port has been created,
+ # but port doc cannot be fetched by OpenStack API
+ self.log.info("Try to add port document again.")
+ # TODO: #AskCheng - this never returns anything!
+ add_port_return = add_vnic_folder()
+ # TODO: #AskCheng - this will never evaluate to True!
+ if add_port_return is False:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+
+ def handle(self, env, values):
+ router = values['payload']['router']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ project_id = values['_context_project_id']
+ router_id = encode_router_id(host_id, router['id'])
+ host = self.inv.get_by_id(env, host_id)
+
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_doc = fetcher.get_vservice(host_id, router_id)
+ gateway_info = router['external_gateway_info']
+
+ if gateway_info:
+ network_id = gateway_info['network_id']
+ self.add_router_document(env, network_id, router_doc, host)
+ self.add_children_documents(env, project_id, network_id, host, router_doc)
+ else:
+ self.add_router_document(env, None, router_doc, host)
+
+ # scan links and cliques
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router added.")
+
+ return EventResult(result=True,
+ related_object=router_id,
+ display_context=router_id)
diff --git a/app/discover/events/event_router_delete.py b/app/discover/events/event_router_delete.py
new file mode 100644
index 0000000..65072d6
--- /dev/null
+++ b/app/discover/events/event_router_delete.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from utils.util import encode_router_id
+
+
+class EventRouterDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+
+ if 'publisher_id' not in values:
+ self.log.error("Publisher_id is not in event values. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ host_id = values['publisher_id'].replace('network.', '', 1)
+ if 'router_id' in payload:
+ router_id = payload['router_id']
+ elif 'id' in payload:
+ router_id = payload['id']
+ else:
+ router_id = payload.get('router', {}).get('id')
+
+ if not router_id:
+ self.log.error("Router id is not in payload. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ router_full_id = encode_router_id(host_id, router_id)
+ return self.delete_handler(env, router_full_id, "vservice")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
new file mode 100644
index 0000000..8dd53f0
--- /dev/null
+++ b/app/discover/events/event_router_update.py
@@ -0,0 +1,82 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_router_add import EventRouterAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import encode_router_id
+
+
+class EventRouterUpdate(EventBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+ router = payload['router']
+
+ project_id = values['_context_project_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = payload['id'] if 'id' in payload else router['id']
+
+ router_full_id = encode_router_id(host_id, router_id)
+ router_doc = self.inv.get_by_id(env, router_full_id)
+ if not router_doc:
+ self.log.info("Router document not found, aborting router updating")
+ return EventResult(result=False, retry=True)
+
+ router_doc['admin_state_up'] = router['admin_state_up']
+ router_doc['name'] = router['name']
+ gateway_info = router.get('external_gateway_info')
+ if gateway_info is None:
+ # when delete gateway, need to delete the port relate document.
+ port_doc = {}
+ if router_doc.get('gw_port_id'):
+ port_doc = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ EventPortDelete().delete_port(env, router_doc['gw_port_id'])
+
+ if router_doc.get('network'):
+ if port_doc:
+ router_doc['network'].remove(port_doc['network_id'])
+ router_doc['gw_port_id'] = None
+
+ # remove related links
+ self.inv.delete('links', {'source_id': router_full_id})
+ else:
+ if 'network' in router_doc:
+ if gateway_info['network_id'] not in router_doc['network']:
+ router_doc['network'].append(gateway_info['network_id'])
+ else:
+ router_doc['network'] = [gateway_info['network_id']]
+ # update static route
+ router_doc['routes'] = router['routes']
+
+ # add gw_port_id info and port document.
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_vservice = fetcher.get_vservice(host_id, router_full_id)
+ if router_vservice.get('gw_port_id'):
+ router_doc['gw_port_id'] = router_vservice['gw_port_id']
+
+ host = self.inv.get_by_id(env, host_id)
+ EventRouterAdd().add_children_documents(env, project_id, gateway_info['network_id'], host, router_doc)
+
+ # rescan the vnic links.
+ FindLinksForVserviceVnics().add_links(search={'parent_id': router_full_id + '-vnics'})
+ self.inv.set(router_doc)
+
+ # update the cliques.
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router update.")
+ return EventResult(result=True,
+ related_object=router_full_id,
+ display_context=router_full_id)
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
new file mode 100644
index 0000000..b519b1c
--- /dev/null
+++ b/app/discover/events/event_subnet_add.py
@@ -0,0 +1,154 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_port import ApiFetchPort
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetAdd(EventBase):
+
+ def add_port_document(self, env, port_id, network_name=None, project_name=''):
+ # when add router-interface port, network_name need to be given to enhance efficiency.
+ # when add gateway port, project_name need to be specified, cause this type of port
+ # document does not has project attribute. In this case, network_name should not be provided.
+
+ fetcher = ApiFetchPort()
+ fetcher.set_env(env)
+ ports = fetcher.get(port_id)
+
+ if ports:
+ port = ports[0]
+ project_id = port['tenant_id']
+ network_id = port['network_id']
+
+ if not network_name:
+ network = self.inv.get_by_id(env, network_id)
+ network_name = network['name']
+
+ port['type'] = "port"
+ port['environment'] = env
+ port_id = port['id']
+ port['id_path'] = "%s/%s-projects/%s/%s-networks/%s/%s-ports/%s" % \
+ (env, env, project_id, project_id, network_id, network_id, port_id)
+ port['last_scanned'] = datetime.datetime.utcnow()
+ if 'project' in port:
+ project_name = port['project']
+ port['name_path'] = "/%s/Projects/%s/Networks/%s/Ports/%s" % \
+ (env, project_name, network_name, port_id)
+ self.inv.set(port)
+ self.log.info("add port document for port:%s" % port_id)
+ return port
+ return False
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "%s/%s-projects/%s/%s-networks/%s/%s-ports/" % (env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/%s/Projects/%s/Networks/%s/Ports" % (env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+
+ self.inv.set(port_folder)
+
+ def add_children_documents(self, env, project_id, network_id, network_name, host_id):
+ # generate port folder data.
+ self.add_ports_folder(env, project_id, network_id, network_name)
+
+ # get ports ID.
+ port_id = DbFetchPort().get_id(network_id)
+
+ # add specific ports documents.
+ self.add_port_document(env, port_id, network_name=network_name)
+
+ port_handler = EventPortAdd()
+
+ # add network_services_folder document.
+ port_handler.add_network_services_folder(env, project_id, network_id, network_name)
+
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+
+ port_handler.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ port_handler.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic docuemnt.
+ port_handler.add_vnic_document(env, host, network_id, network_name)
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project_id = subnet['tenant_id']
+ network_id = subnet['network_id']
+ if 'id' not in subnet:
+ self.log.info('Subnet payload doesn\'t have id, aborting subnet add')
+ return EventResult(result=False, retry=False)
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet add')
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+
+ # build subnet document for adding network
+ if subnet['cidr'] not in network_document['cidrs']:
+ network_document['cidrs'].append(subnet['cidr'])
+ if not network_document.get('subnets'):
+ network_document['subnets'] = {}
+
+ network_document['subnets'][subnet['name']] = subnet
+ if subnet['id'] not in network_document['subnet_ids']:
+ network_document['subnet_ids'].append(subnet['id'])
+ self.inv.set(network_document)
+
+ # Check DHCP enable, if true, scan network.
+ if subnet['enable_dhcp'] is True:
+ # update network
+ # TODO: #AskCheng - why is this necessary?
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add new subnet.")
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ self.add_children_documents(env, project_id, network_id, network_name, host_id)
+
+ # scan links and cliques
+ self.log.info("scanning for links")
+ FindLinksForPnics().add_links()
+ FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
+
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished subnet added.")
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_subnet_delete.py b/app/discover/events/event_subnet_delete.py
new file mode 100644
index 0000000..900e701
--- /dev/null
+++ b/app/discover/events/event_subnet_delete.py
@@ -0,0 +1,57 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventSubnetDelete(EventDeleteBase):
+
+ def delete_children_documents(self, env, vservice_id):
+ vnic_parent_id = vservice_id + '-vnics'
+ vnic = self.inv.get_by_field(env, 'vnic', 'parent_id', vnic_parent_id, get_single=True)
+ if not vnic:
+ self.log.info("Vnic document not found.")
+ return EventResult(result=False, retry=False)
+
+ # delete port and vnic together by mac address.
+ self.inv.delete('inventory', {"mac_address": vnic.get("mac_address")})
+ return self.delete_handler(env, vservice_id, 'vservice')
+
+ def handle(self, env, notification):
+ subnet_id = notification['payload']['subnet_id']
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting subnet deleting")
+ return EventResult(result=False, retry=False)
+
+ # remove subnet_id from subnet_ids array
+ network_document["subnet_ids"].remove(subnet_id)
+
+ # find the subnet in network_document by subnet_id
+ subnet = next(
+ filter(lambda s: s['id'] == subnet_id,
+ network_document['subnets'].values()),
+ None)
+
+ # remove cidr from cidrs and delete subnet document.
+ if subnet:
+ network_document['cidrs'].remove(subnet['cidr'])
+ del network_document['subnets'][subnet['name']]
+
+ self.inv.set(network_document)
+
+ # when network does not have any subnet, delete vservice DHCP, port and vnic documents.
+ if not network_document["subnet_ids"]:
+ vservice_dhcp_id = 'qdhcp-{}'.format(network_document['id'])
+ self.delete_children_documents(env, vservice_dhcp_id)
+
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_document.get('id'))
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
new file mode 100644
index 0000000..9d3c48b
--- /dev/null
+++ b/app/discover/events/event_subnet_update.py
@@ -0,0 +1,102 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project = notification['_context_project_name']
+ host_id = notification['publisher_id'].replace('network.', '', 1)
+ subnet_id = subnet['id']
+ network_id = subnet['network_id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet update')
+ return EventResult(result=False, retry=True)
+
+ # update network document.
+ subnets = network_document['subnets']
+ key = next(filter(lambda k: subnets[k]['id'] == subnet_id, subnets),
+ None)
+
+ if key:
+ if subnet['enable_dhcp'] and subnets[key]['enable_dhcp'] is False:
+ # scan DHCP namespace to add related document.
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+ port_handler = EventPortAdd()
+ port_handler.add_dhcp_document(env, host, network_id,
+ network_document['name'])
+
+ # make sure that self.regions is not empty.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add port binding to DHCP server.")
+ port_id = DbFetchPort(). \
+ get_id_by_field(network_id,
+ """device_owner LIKE "%dhcp" """)
+ port = EventSubnetAdd(). \
+ add_port_document(env, port_id,
+ network_name=network_document['name'],
+ project_name=project)
+ if port:
+ port_handler. \
+ add_vnic_document(env, host, network_id,
+ network_name=network_document['name'],
+ mac_address=port['mac_address'])
+ # add link for vservice - vnic
+ FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ FindLinksForVserviceVnics(). \
+ add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
+ # delete existed related DHCP documents.
+ self.inv.delete("inventory",
+ {'id': "qdhcp-%s" % subnet['network_id']})
+ self.log.info("delete DHCP document: qdhcp-%s" %
+ subnet['network_id'])
+
+ port = self.inv.find_items({'network_id': subnet['network_id'],
+ 'device_owner': 'network:dhcp'},
+ get_single=True)
+ if 'id' in port:
+ EventPortDelete().delete_port(env, port['id'])
+ self.log.info("delete port binding to DHCP server.")
+
+ if subnet['name'] == subnets[key]['name']:
+ subnets[key] = subnet
+ else:
+ # TODO: #AskCheng shouldn't we remove the old one?
+ subnets[subnet['name']] = subnet
+
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/listeners/__init__.py b/app/discover/events/listeners/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/listeners/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/listeners/default_listener.py b/app/discover/events/listeners/default_listener.py
new file mode 100755
index 0000000..a135673
--- /dev/null
+++ b/app/discover/events/listeners/default_listener.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import datetime
+import json
+import os
+import time
+from collections import defaultdict
+from typing import List
+
+from kombu import Connection, Queue, Exchange
+from kombu.mixins import ConsumerMixin
+
+from discover.configuration import Configuration
+from discover.event_handler import EventHandler
+from discover.events.event_base import EventResult
+from discover.events.event_metadata_parser import parse_metadata_file
+from discover.events.listeners.listener_base import ListenerBase
+from messages.message import Message
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.string_utils import stringify_datetime
+from utils.util import SignalHandler, setup_args
+
+
+class DefaultListener(ListenerBase, ConsumerMixin):
+
+ SOURCE_SYSTEM = "OpenStack"
+
+ COMMON_METADATA_FILE = "events.json"
+
+ DEFAULTS = {
+ "env": "Mirantis-Liberty",
+ "mongo_config": "",
+ "metadata_file": "",
+ "inventory": "inventory",
+ "loglevel": "INFO",
+ "environments_collection": "environments_config",
+ "retry_limit": 10,
+ "consume_all": False
+ }
+
+ def __init__(self, connection: Connection,
+ event_handler: EventHandler,
+ event_queues: List,
+ env_name: str = DEFAULTS["env"],
+ inventory_collection: str = DEFAULTS["inventory"],
+ retry_limit: int = DEFAULTS["retry_limit"],
+ consume_all: bool = DEFAULTS["consume_all"]):
+ super().__init__()
+
+ self.connection = connection
+ self.retry_limit = retry_limit
+ self.env_name = env_name
+ self.consume_all = consume_all
+ self.handler = event_handler
+ self.event_queues = event_queues
+ self.failing_messages = defaultdict(int)
+
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ if self.inv.is_feature_supported(self.env_name, EnvironmentFeatures.MONITORING):
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(self.env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ def get_consumers(self, consumer, channel):
+ return [consumer(queues=self.event_queues,
+ accept=['json'],
+ callbacks=[self.process_task])]
+
+ # Determines if message should be processed by a handler
+ # and extracts message body if yes.
+ @staticmethod
+ def _extract_event_data(body):
+ if "event_type" in body:
+ return True, body
+ elif "event_type" in body.get("oslo.message", ""):
+ return True, json.loads(body["oslo.message"])
+ else:
+ return False, None
+
+ def process_task(self, body, message):
+ received_timestamp = stringify_datetime(datetime.datetime.now())
+ processable, event_data = self._extract_event_data(body)
+ # If env listener can't process the message
+ # or it's not intended for env listener to handle,
+ # leave the message in the queue unless "consume_all" flag is set
+ if processable and event_data["event_type"] in self.handler.handlers:
+ with open("/tmp/listener.log", "a") as f:
+ f.write("{}\n".format(event_data))
+ event_result = self.handle_event(event_data["event_type"],
+ event_data)
+ finished_timestamp = stringify_datetime(datetime.datetime.now())
+ self.save_message(message_body=event_data,
+ result=event_result,
+ started=received_timestamp,
+ finished=finished_timestamp)
+
+ # Check whether the event was fully handled
+ # and, if not, whether it should be retried later
+ if event_result.result:
+ message.ack()
+ elif event_result.retry:
+ if 'message_id' not in event_data:
+ message.reject()
+ else:
+ # Track message retry count
+ message_id = event_data['message_id']
+ self.failing_messages[message_id] += 1
+
+ # Retry handling the message
+ if self.failing_messages[message_id] <= self.retry_limit:
+ self.inv.log.info("Retrying handling message " +
+ "with id '{}'".format(message_id))
+ message.requeue()
+ # Discard the message if it's not accepted
+ # after specified number of trials
+ else:
+ self.inv.log.warn("Discarding message with id '{}' ".
+ format(message_id) +
+ "as it's exceeded the retry limit")
+ message.reject()
+ del self.failing_messages[message_id]
+ else:
+ message.reject()
+ elif self.consume_all:
+ message.reject()
+
+ # This method passes the event to its handler.
+ # Returns a (result, retry) tuple:
+ # 'Result' flag is True if handler has finished successfully,
+ # False otherwise
+ # 'Retry' flag specifies if the error is recoverable or not
+ # 'Retry' flag is checked only is 'result' is False
+ def handle_event(self, event_type: str, notification: dict) -> EventResult:
+ print("Got notification.\nEvent_type: {}\nNotification:\n{}".
+ format(event_type, notification))
+ try:
+ result = self.handler.handle(event_name=event_type,
+ notification=notification)
+ return result if result else EventResult(result=False, retry=False)
+ except Exception as e:
+ self.inv.log.exception(e)
+ return EventResult(result=False, retry=False)
+
+ def save_message(self, message_body: dict, result: EventResult,
+ started: str, finished: str):
+ try:
+ message = Message(
+ msg_id=message_body.get('message_id'),
+ env=self.env_name,
+ source=self.SOURCE_SYSTEM,
+ object_id=result.related_object,
+ display_context=result.display_context,
+ level=message_body.get('priority'),
+ msg=message_body,
+ ts=message_body.get('timestamp'),
+ received_ts=started,
+ finished_ts=finished
+ )
+ self.inv.collections['messages'].insert_one(message.get())
+ return True
+ except Exception as e:
+ self.inv.log.error("Failed to save message")
+ self.inv.log.exception(e)
+ return False
+
+ @staticmethod
+ def listen(args: dict = None):
+
+ args = setup_args(args, DefaultListener.DEFAULTS, get_args)
+ if 'process_vars' not in args:
+ args['process_vars'] = {}
+
+ env_name = args["env"]
+ inventory_collection = args["inventory"]
+
+ MongoAccess.set_config_file(args["mongo_config"])
+ conf = Configuration(args["environments_collection"])
+ conf.use_env(env_name)
+
+ event_handler = EventHandler(env_name, inventory_collection)
+ event_queues = []
+
+ env_config = conf.get_env_config()
+ common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
+ 'config',
+ DefaultListener.COMMON_METADATA_FILE)
+
+ # import common metadata
+ import_metadata(event_handler, event_queues, common_metadata_file)
+
+ # import custom metadata if supplied
+ if args["metadata_file"]:
+ import_metadata(event_handler, event_queues, args["metadata_file"])
+
+ inv = InventoryMgr()
+ inv.set_collections(inventory_collection)
+ logger = FullLogger()
+ logger.set_loglevel(args["loglevel"])
+
+ amqp_config = conf.get("AMQP")
+ connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
+ .format(user=amqp_config["user"],
+ pwd=amqp_config["password"],
+ host=amqp_config["host"],
+ port=amqp_config["port"])
+
+ with Connection(connect_url) as conn:
+ try:
+ print(conn)
+ conn.connect()
+ args['process_vars']['operational'] = OperationalStatus.RUNNING
+ terminator = SignalHandler()
+ worker = \
+ DefaultListener(connection=conn,
+ event_handler=event_handler,
+ event_queues=event_queues,
+ retry_limit=args["retry_limit"],
+ consume_all=args["consume_all"],
+ inventory_collection=inventory_collection,
+ env_name=env_name)
+ worker.run()
+ if terminator.terminated:
+ args.get('process_vars', {})['operational'] = \
+ OperationalStatus.STOPPED
+ except KeyboardInterrupt:
+ print('Stopped')
+ args['process_vars']['operational'] = OperationalStatus.STOPPED
+ except Exception as e:
+ logger.log.exception(e)
+ args['process_vars']['operational'] = OperationalStatus.ERROR
+ finally:
+ # This should enable safe saving of shared variables
+ time.sleep(0.1)
+
+
+def get_args():
+ # Read listener config from command line args
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB access details")
+ parser.add_argument("--metadata_file", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["metadata_file"],
+ help="Name of custom configuration metadata file")
+ def_env_collection = DefaultListener.DEFAULTS["environments_collection"]
+ parser.add_argument("-c", "--environments_collection", nargs="?", type=str,
+ default=def_env_collection,
+ help="Name of collection where selected environment " +
+ "is taken from \n(default: {})"
+ .format(def_env_collection))
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["env"],
+ help="Name of target listener environment \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["env"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["inventory"],
+ help="Name of inventory collection \n"" +"
+ "(default: '{}')"
+ .format(DefaultListener.DEFAULTS["inventory"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(DefaultListener.DEFAULTS["loglevel"]))
+ parser.add_argument("-r", "--retry_limit", nargs="?", type=int,
+ default=DefaultListener.DEFAULTS["retry_limit"],
+ help="Maximum number of times the OpenStack message "
+ "should be requeued before being discarded \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["retry_limit"]))
+ parser.add_argument("--consume_all", action="store_true",
+ help="If this flag is set, " +
+ "environment listener will try to consume"
+ "all messages from OpenStack event queue "
+ "and reject incompatible messages."
+ "Otherwise they'll just be ignored.",
+ default=DefaultListener.DEFAULTS["consume_all"])
+ args = parser.parse_args()
+ return args
+
+
+# Imports metadata from file,
+# updates event handler with new handlers
+# and event queues with new queues
+def import_metadata(event_handler: EventHandler,
+ event_queues: List[Queue],
+ metadata_file_path: str) -> None:
+ handlers_package, queues, event_handlers = \
+ parse_metadata_file(metadata_file_path)
+ event_handler.discover_handlers(handlers_package, event_handlers)
+ event_queues.extend([
+ Queue(q['queue'],
+ Exchange(q['exchange'], 'topic', durable=False),
+ durable=False, routing_key='#') for q in queues
+ ])
+
+
+if __name__ == '__main__':
+ DefaultListener.listen()
diff --git a/app/discover/events/listeners/listener_base.py b/app/discover/events/listeners/listener_base.py
new file mode 100644
index 0000000..7052dc9
--- /dev/null
+++ b/app/discover/events/listeners/listener_base.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+
+class ListenerBase(ABC):
+
+ @staticmethod
+ @abstractmethod
+ def listen(self):
+ pass
diff --git a/app/discover/fetch_host_object_types.py b/app/discover/fetch_host_object_types.py
new file mode 100644
index 0000000..da38af7
--- /dev/null
+++ b/app/discover/fetch_host_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchHostObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "instances_root",
+ "type": "instances_folder",
+ "text": "Instances"
+ },
+ {
+ "id": "networks_root",
+ "type": "networks_folder",
+ "text": "Networks"
+ },
+ {
+ "id": "vservices_root",
+ "type": "vservices_folder",
+ "text": "vServices"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetch_region_object_types.py b/app/discover/fetch_region_object_types.py
new file mode 100644
index 0000000..047c84c
--- /dev/null
+++ b/app/discover/fetch_region_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchRegionObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "aggregates_root",
+ "type": "aggregates_folder",
+ "text": "Aggregates"
+ },
+ {
+ "id": "availability_zones_root",
+ "type": "availability_zones_folder",
+ "text": "Availability Zones"
+ },
+ {
+ "id": "network_agents_root",
+ "type": "network_agents_folder",
+ "text": "network Agents"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetcher.py b/app/discover/fetcher.py
new file mode 100644
index 0000000..8d7fdbb
--- /dev/null
+++ b/app/discover/fetcher.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.configuration import Configuration
+from utils.logging.full_logger import FullLogger
+
+
+class Fetcher:
+
+ def __init__(self):
+ super().__init__()
+ self.env = None
+ self.log = FullLogger()
+ self.configuration = None
+
+ @staticmethod
+ def escape(string):
+ return string
+
+ def set_env(self, env):
+ self.env = env
+ self.log.set_env(env)
+ self.configuration = Configuration()
+
+ def get_env(self):
+ return self.env
+
+ def get(self, object_id):
+ return None
diff --git a/app/discover/fetcher_new.py b/app/discover/fetcher_new.py
new file mode 100644
index 0000000..f545554
--- /dev/null
+++ b/app/discover/fetcher_new.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+##old stuff
+class FetchHostObjectTypes(Fetcher):
+
+
+ def get(self, parent):
+ ret = {
+ "type": "host_object_type",
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {"id": "instances_root", "text": "Instances", "descendants": 1},
+ {"id": "networks_root", "text": "Networks", "descendants": 1},
+ {"id": "pnics_root", "text": "pNICs", "descendants": 1},
+ {"id": "vservices_root", "text": "vServices", "descendants": 1}
+ ]
+ }
+ return ret
+
+ ## old/moved
+
diff --git a/app/discover/fetchers/__init__.py b/app/discover/fetchers/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/__init__.py b/app/discover/fetchers/aci/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/aci/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/aci_access.py b/app/discover/fetchers/aci/aci_access.py
new file mode 100644
index 0000000..836e45d
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_access.py
@@ -0,0 +1,200 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import requests
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+
+
+def aci_config_required(default=None):
+ def decorator(func):
+ def wrapper(self, *args, **kwargs):
+ if not self.aci_enabled:
+ return default
+ return func(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+class AciAccess(Fetcher):
+
+ RESPONSE_FORMAT = "json"
+ cookie_token = None
+
+ def __init__(self):
+ super().__init__()
+ self.configuration = Configuration()
+ self.aci_enabled = self.configuration.get_env_config() \
+ .get('aci_enabled', False)
+ self.aci_configuration = None
+ self.host = None
+ if self.aci_enabled:
+ self.aci_configuration = self.configuration.get("ACI")
+ self.host = self.aci_configuration["host"]
+
+ def get_base_url(self):
+ return "https://{}/api".format(self.host)
+
+ # Unwrap ACI response payload
+ # and return an array of desired fields' values.
+ #
+ # Parameters
+ # ----------
+ #
+ # payload: dict
+ # Full json response payload returned by ACI
+ # *field_names: Tuple[str]
+ # Enumeration of fields that are used to traverse ACI "imdata" array
+ # (order is important)
+ #
+ # Returns
+ # ----------
+ # list
+ # List of unwrapped dictionaries (or primitives)
+ #
+ # Example
+ # ----------
+ # Given payload:
+ #
+ # {
+ # "totalCount": "2",
+ # "imdata": [
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value1"
+ # }
+ # }
+ # },
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value2"
+ # }
+ # }
+ # }
+ # ]
+ # }
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb")
+ # will yield the following result:
+ #
+ # >>> [{"ccc": "value1"}, {"ccc": "value2"}]
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb", "ccc")
+ # will yield the following result:
+ #
+ # >>> ["value1", "value2"]
+ #
+ @staticmethod
+ def get_objects_by_field_names(payload, *field_names):
+ results = payload.get("imdata", [])
+ if not results:
+ return []
+
+ for field in field_names:
+ results = [entry[field] for entry in results]
+ return results
+
+ # Set auth tokens in request headers and cookies
+ @staticmethod
+ def _insert_token_into_request(cookies):
+ return dict(cookies, **AciAccess.cookie_token) \
+ if cookies \
+ else AciAccess.cookie_token
+
+ @staticmethod
+ def _set_token(response):
+ tokens = AciAccess.get_objects_by_field_names(response.json(), "aaaLogin", "attributes", "token")
+ token = tokens[0]
+
+ AciAccess.cookie_token = {"APIC-Cookie": token}
+
+ @aci_config_required()
+ def login(self):
+ url = "/".join((self.get_base_url(), "aaaLogin.json"))
+ payload = {
+ "aaaUser": {
+ "attributes": {
+ "name": self.aci_configuration["user"],
+ "pwd": self.aci_configuration["pwd"]
+ }
+ }
+ }
+
+ response = requests.post(url, json=payload, verify=False)
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ # Refresh token or login if token has expired
+ @aci_config_required()
+ def refresh_token(self):
+ # First time login
+ if not AciAccess.cookie_token:
+ self.login()
+ return
+
+ url = "/".join((self.get_base_url(), "aaaRefresh.json"))
+
+ response = requests.get(url, verify=False)
+
+ # Login again if the token has expired
+ if response.status_code == requests.codes.forbidden:
+ self.login()
+ return
+ # Propagate any other error
+ elif response.status_code != requests.codes.ok:
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ @aci_config_required(default={})
+ def send_get(self, url, params, headers, cookies):
+ self.refresh_token()
+
+ cookies = self._insert_token_into_request(cookies)
+
+ response = requests.get(url, params=params, headers=headers,
+ cookies=cookies, verify=False)
+ # Let client handle HTTP errors
+ response.raise_for_status()
+
+ return response.json()
+
+ # Search ACI for Managed Objects (MOs) of a specific class
+ @aci_config_required(default=[])
+ def fetch_objects_by_class(self,
+ class_name: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(),
+ "class", "{cn}.{f}".format(cn=class_name, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return self.get_objects_by_field_names(response_json, class_name)
+
+ # Fetch data for a specific Managed Object (MO)
+ @aci_config_required(default=[])
+ def fetch_mo_data(self,
+ dn: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(), "mo", "topology",
+ "{dn}.{f}".format(dn=dn, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return response_json
diff --git a/app/discover/fetchers/aci/aci_fetch_switch_pnic.py b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
new file mode 100644
index 0000000..a4216ea
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
@@ -0,0 +1,91 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.aci.aci_access import AciAccess, aci_config_required
+from utils.inventory_mgr import InventoryMgr
+from utils.util import encode_aci_dn, get_object_path_part
+
+
+class AciFetchSwitchPnic(AciAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def fetch_pnics_by_mac_address(self, mac_address):
+ mac_filter = "eq(epmMacEp.addr,\"{}\")".format(mac_address)
+ pnic_filter = "wcard(epmMacEp.ifId, \"eth\")"
+ query_filter = "and({},{})".format(mac_filter, pnic_filter)
+
+ pnics = self.fetch_objects_by_class("epmMacEp",
+ {"query-target-filter": query_filter})
+
+ return [pnic["attributes"] for pnic in pnics]
+
+ def fetch_switch_by_id(self, switch_id):
+ dn = "/".join((switch_id, "sys"))
+ response = self.fetch_mo_data(dn)
+ switch_data = self.get_objects_by_field_names(response, "topSystem", "attributes")
+ return switch_data[0] if switch_data else None
+
+ @aci_config_required(default=[])
+ def get(self, pnic_id):
+ environment = self.get_env()
+ pnic = self.inv.get_by_id(environment=environment, item_id=pnic_id)
+ if not pnic:
+ return []
+ mac_address = pnic.get("mac_address")
+ if not mac_address:
+ return []
+
+ switch_pnics = self.fetch_pnics_by_mac_address(mac_address)
+ if not switch_pnics:
+ return []
+ switch_pnic = switch_pnics[0]
+
+ # Prepare and save switch data in inventory
+ aci_id_match = re.match("topology/(.+)/sys", switch_pnic["dn"])
+ if not aci_id_match:
+ raise ValueError("Failed to fetch switch id from pnic dn: {}"
+ .format(switch_pnic["dn"]))
+
+ aci_switch_id = aci_id_match.group(1)
+ db_switch_id = encode_aci_dn(aci_switch_id)
+ if not self.inv.get_by_id(environment, db_switch_id):
+ switch_data = self.fetch_switch_by_id(aci_switch_id)
+ if not switch_data:
+ self.log.warning("No switch found for switch pnic dn: {}"
+ .format(switch_pnic["dn"]))
+ return []
+
+ switch_json = {
+ "id": db_switch_id,
+ "ip_address": switch_data["address"],
+ "type": "switch",
+ "aci_document": switch_data
+ }
+ # Region name is the same as region id
+ region_id = get_object_path_part(pnic["name_path"], "Regions")
+ region = self.inv.get_by_id(environment, region_id)
+ self.inv.save_inventory_object(o=switch_json, parent=region, environment=environment)
+
+ db_pnic_id = "-".join((db_switch_id,
+ encode_aci_dn(switch_pnic["ifId"]),
+ mac_address))
+ pnic_json = {
+ "id": db_pnic_id,
+ "type": "pnic",
+ "pnic_type": "switch",
+ "mac_address": mac_address,
+ "aci_document": switch_pnic
+ }
+ return [pnic_json]
+
diff --git a/app/discover/fetchers/api/__init__.py b/app/discover/fetchers/api/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/api/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py
new file mode 100644
index 0000000..89eeb34
--- /dev/null
+++ b/app/discover/fetchers/api/api_access.py
@@ -0,0 +1,195 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import calendar
+import re
+import requests
+import time
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class ApiAccess(Fetcher):
+ subject_token = None
+ initialized = False
+ regions = {}
+ config = None
+ api_config = None
+
+ host = ""
+ base_url = ""
+ admin_token = ""
+ tokens = {}
+ admin_endpoint = ""
+ admin_project = None
+ auth_response = None
+
+ alternative_services = {
+ "neutron": ["quantum"]
+ }
+
+ # identitity API v2 version with admin token
+ def __init__(self):
+ super(ApiAccess, self).__init__()
+ if ApiAccess.initialized:
+ return
+ ApiAccess.config = Configuration()
+ ApiAccess.api_config = ApiAccess.config.get("OpenStack")
+ host = ApiAccess.api_config["host"]
+ ApiAccess.host = host
+ port = ApiAccess.api_config["port"]
+ if not (host and port):
+ raise ValueError('Missing definition of host or port ' +
+ 'for OpenStack API access')
+ ApiAccess.base_url = "http://" + host + ":" + port
+ ApiAccess.admin_token = ApiAccess.api_config["admin_token"]
+ ApiAccess.admin_project = ApiAccess.api_config["admin_project"] \
+ if "admin_project" in ApiAccess.api_config \
+ else 'admin'
+ ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+
+ token = self.v2_auth_pwd(ApiAccess.admin_project)
+ if not token:
+ raise ValueError("Authentication failed. Failed to obtain token")
+ else:
+ self.subject_token = token
+
+ @staticmethod
+ def parse_time(time_str):
+ try:
+ time_struct = time.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
+ except ValueError:
+ try:
+ time_struct = time.strptime(time_str,
+ "%Y-%m-%dT%H:%M:%S.%fZ")
+ except ValueError:
+ return None
+ return time_struct
+
+ # try to use existing token, if it did not expire
+ def get_existing_token(self, project_id):
+ try:
+ token_details = ApiAccess.tokens[project_id]
+ except KeyError:
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = token_details["token_expiry_time"]
+ now = time.time()
+ if now > token_expiry_time:
+ # token has expired
+ ApiAccess.tokens.pop(project_id)
+ return None
+ return token_details
+
+ def v2_auth(self, project_id, headers, post_body):
+ subject_token = self.get_existing_token(project_id)
+ if subject_token:
+ return subject_token
+ req_url = ApiAccess.base_url + "/v2.0/tokens"
+ response = requests.post(req_url, json=post_body, headers=headers)
+ ApiAccess.auth_response = response.json()
+ if 'error' in self.auth_response:
+ e = self.auth_response['error']
+ self.log.error(str(e['code']) + ' ' + e['title'] + ': ' +
+ e['message'] + ", URL: " + req_url)
+ return None
+ try:
+ token_details = ApiAccess.auth_response["access"]["token"]
+ except KeyError:
+ # assume authentication failed
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = calendar.timegm(token_expiry_time_struct)
+ token_details["token_expiry_time"] = token_expiry_time
+ ApiAccess.tokens[project_id] = token_details
+ return token_details
+
+ def v2_auth_pwd(self, project):
+ user = ApiAccess.api_config["user"]
+ pwd = ApiAccess.api_config["pwd"]
+ post_body = {
+ "auth": {
+ "passwordCredentials": {
+ "username": user,
+ "password": pwd
+ }
+ }
+ }
+ if project is not None:
+ post_body["auth"]["tenantName"] = project
+ project_id = project
+ else:
+ project_id = ""
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json; charset=UTF-8'
+ }
+ return self.v2_auth(project_id, headers, post_body)
+
+ def get_rel_url(self, relative_url, headers):
+ req_url = ApiAccess.base_url + relative_url
+ return self.get_url(req_url, headers)
+
+ def get_url(self, req_url, headers):
+ response = requests.get(req_url, headers=headers)
+ if response.status_code != requests.codes.ok:
+ # some error happened
+ if "reason" in response:
+ msg = ", reason: {}".format(response.reason)
+ else:
+ msg = ", response: {}".format(response.text)
+ self.log.error("req_url: {} {}".format(req_url, msg))
+ return response
+ ret = response.json()
+ return ret
+
+ def get_region_url(self, region_name, service):
+ if region_name not in self.regions:
+ return None
+ region = self.regions[region_name]
+ s = self.get_service_region_endpoints(region, service)
+ if not s:
+ return None
+ orig_url = s["adminURL"]
+ # replace host name with the host found in config
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ return url
+
+ # like get_region_url(), but remove everything starting from the "/v2"
+ def get_region_url_nover(self, region, service):
+ full_url = self.get_region_url(region, service)
+ if not full_url:
+ self.log.error("could not find region URL for region: " + region)
+ exit()
+ url = re.sub(r":([0-9]+)/v[2-9].*", r":\1", full_url)
+ return url
+
+ def get_catalog(self, pretty):
+ return jsonify(self.regions, pretty)
+
+ # find the endpoints for a given service name,
+ # considering also alternative service names
+ def get_service_region_endpoints(self, region, service):
+ alternatives = [service]
+ endpoints = region["endpoints"]
+ if service in self.alternative_services:
+ alternatives.extend(self.alternative_services[service])
+ for sname in alternatives:
+ if sname in endpoints:
+ return endpoints[sname]
+ return None
+
diff --git a/app/discover/fetchers/api/api_fetch_availability_zones.py b/app/discover/fetchers/api/api_fetch_availability_zones.py
new file mode 100644
index 0000000..196893b
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_availability_zones.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchAvailabilityZones(ApiAccess):
+ def __init__(self):
+ super(ApiFetchAvailabilityZones, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(project_id)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(project_id, region, token))
+ return ret
+
+ def get_for_region(self, project, region, token):
+ # we use os-availability-zone/detail rather than os-availability-zone,
+ # because the later does not inclde the "internal" zone in the results
+ endpoint = self.get_region_url_nover(region, "nova")
+ req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
+ "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ ret = []
+ if "availabilityZoneInfo" not in response:
+ return []
+ azs = response["availabilityZoneInfo"]
+ if not azs:
+ return []
+ for doc in azs:
+ doc["id"] = doc["zoneName"]
+ doc["name"] = doc.pop("zoneName")
+ doc["master_parent_type"] = "region"
+ doc["master_parent_id"] = region
+ doc["parent_type"] = "availability_zones_folder"
+ doc["parent_id"] = region + "-availability_zones"
+ doc["parent_text"] = "Availability Zones"
+ doc["available"] = doc["zoneState"]["available"]
+ doc.pop("zoneState")
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_end_points.py b/app/discover/fetchers/api/api_fetch_end_points.py
new file mode 100644
index 0000000..9471c7e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_end_points.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# fetch the end points for a given project (tenant)
+# return list of regions, to allow further recursive scanning
+
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchEndPoints(ApiAccess):
+
+ def get(self, project_id):
+ if project_id != "admin":
+ return [] # XXX currently having problems authenticating to other tenants
+ self.v2_auth_pwd(project_id)
+
+ environment = ApiAccess.config.get_env_name()
+ regions = []
+ services = ApiAccess.auth_response['access']['serviceCatalog']
+ endpoints = []
+ for s in services:
+ if s["type"] != "identity":
+ continue
+ e = s["endpoints"][0]
+ e["environment"] = environment
+ e["project"] = project_id
+ e["type"] = "endpoint"
+ endpoints.append(e)
+ return endpoints
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py
new file mode 100644
index 0000000..56cffda
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_host_instances.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
+ def __init__(self):
+ super(ApiFetchHostInstances, self).__init__()
+ self.inv = InventoryMgr()
+ self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
+ self.projects = None
+ self.db_fetcher = DbFetchInstances()
+
+ def get_projects(self):
+ if not self.projects:
+ projects_list = self.inv.get(self.get_env(), "project", None)
+ self.projects = [p["name"] for p in projects_list]
+
+ def get(self, id):
+ self.get_projects()
+ host_id = id[:id.rindex("-")]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host or "Compute" not in host.get("host_type", ""):
+ return []
+ instances_found = self.get_instances_from_api(host_id)
+ self.db_fetcher.get_instance_data(instances_found)
+ return instances_found
+
+ def get_instances_from_api(self, host_name):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ tenant_id = token["tenant"]["id"]
+ req_url = self.endpoint + "/v2/" + tenant_id + \
+ "/os-hypervisors/" + host_name + "/servers"
+ response = self.get_url(req_url, {"X-Auth-Token": token["id"]})
+ ret = []
+ if not "hypervisors" in response:
+ return []
+ if not "servers" in response["hypervisors"][0]:
+ return []
+ for doc in response["hypervisors"][0]["servers"]:
+ doc["id"] = doc["uuid"]
+ doc["host"] = host_name
+ doc["local_name"] = doc.pop("name")
+ ret.append(doc)
+ self.log.info("found %s instances for host: %s", str(len(ret)), host_name)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_network.py b/app/discover/fetchers/api/api_fetch_network.py
new file mode 100644
index 0000000..889b8a5
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_network.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetwork(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetwork, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+ ret.extend(self.get_for_region(region, token, project_id))
+ return ret
+
+ def get_network(self, region, token, subnet_id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+
+ # get target network network document
+ req_url = endpoint + "/v2.0/networks/" + subnet_id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "network" in response:
+ return []
+ network = response["network"]
+ subnets = network['subnets']
+
+ # get subnets documents.
+ subnets_hash = {}
+ cidrs = []
+ subnet_ids = []
+ for subnet_id in subnets:
+ req_url = endpoint + "/v2.0/subnets/" + subnet_id
+ response = self.get_url(req_url, headers)
+ if "subnet" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnet = response["subnet"]
+ subnets_hash[subnet["name"]] = subnet
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+
+ network["subnets"] = subnets_hash
+ network["cidrs"] = cidrs
+ network["subnet_ids"] = subnet_ids
+
+ network["master_parent_type"] = "project"
+ network["master_parent_id"] = network["tenant_id"]
+ network["parent_type"] = "networks_folder"
+ network["parent_id"] = network["tenant_id"] + "-networks"
+ network["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ network['network'] = network["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
+ if project:
+ network["project"] = project["name"]
+
+ return network
diff --git a/app/discover/fetchers/api/api_fetch_networks.py b/app/discover/fetchers/api/api_fetch_networks.py
new file mode 100644
index 0000000..4b70f65
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_networks.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetworks(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetworks, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id=None):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_networks(region, token))
+ return ret
+
+ def get_networks(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/networks"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "networks" in response:
+ return []
+ networks = response["networks"]
+ req_url = endpoint + "/v2.0/subnets"
+ response = self.get_url(req_url, headers)
+ subnets_hash = {}
+ if "subnets" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnets = response["subnets"]
+ for s in subnets:
+ subnets_hash[s["id"]] = s
+ for doc in networks:
+ doc["master_parent_type"] = "project"
+ project_id = doc["tenant_id"]
+ if not project_id:
+ # find project ID of admin project
+ project = self.inv.get_by_field(self.get_env(),
+ "project", "name",
+ self.admin_project,
+ get_single=True)
+ if not project:
+ self.log.error("failed to find admin project in DB")
+ project_id = project["id"]
+ doc["master_parent_id"] = project_id
+ doc["parent_type"] = "networks_folder"
+ doc["parent_id"] = project_id + "-networks"
+ doc["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ doc['network'] = doc["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), project_id)
+ if project:
+ doc["project"] = project["name"]
+ subnets_details = {}
+ cidrs = []
+ subnet_ids = []
+ for s in doc["subnets"]:
+ try:
+ subnet = subnets_hash[s]
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+ subnets_details[subnet["name"]] = subnet
+ except KeyError:
+ pass
+
+ doc["subnets"] = subnets_details
+ doc["cidrs"] = cidrs
+ doc["subnet_ids"] = subnet_ids
+ return networks
diff --git a/app/discover/fetchers/api/api_fetch_port.py b/app/discover/fetchers/api/api_fetch_port.py
new file mode 100644
index 0000000..f8d9eeb
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_port.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPort(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPort, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ if not project_id:
+ self.log.info("Get method needs ID parameter")
+ return []
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.append(self.get_port(region, token, project_id))
+ if ret == []:
+ self.log.info("ApiFetchPort: Port not found.")
+ return ret
+
+ def get_port(self, region, token, id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports/" + id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "port" in response:
+ return []
+
+ doc = response["port"]
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return doc
diff --git a/app/discover/fetchers/api/api_fetch_ports.py b/app/discover/fetchers/api/api_fetch_ports.py
new file mode 100644
index 0000000..f4c54a6
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_ports.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPorts(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPorts, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_ports_for_region(region, token))
+ return ret
+
+ def get_ports_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "ports" in response:
+ return []
+ ports = response["ports"]
+ for doc in ports:
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return ports
diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py
new file mode 100644
index 0000000..7dc262e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_project_hosts.py
@@ -0,0 +1,144 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+
+
+class ApiFetchProjectHosts(ApiAccess, DbAccess):
+ def __init__(self):
+ super(ApiFetchProjectHosts, self).__init__()
+
+ def get(self, project_id):
+ if project_id != self.admin_project:
+ # do not scan hosts except under project 'admin'
+ return []
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ return ret
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url(region, "nova")
+ ret = []
+ if not token:
+ return []
+ req_url = endpoint + "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ az_info = response["availabilityZoneInfo"]
+ hosts = {}
+ for doc in az_info:
+ az_hosts = self.get_hosts_from_az(doc)
+ for h in az_hosts:
+ if h["name"] in hosts:
+ # merge host_type data between AZs
+ existing_entry = hosts[h["name"]]
+ for t in h["host_type"]:
+ self.add_host_type(existing_entry, t, doc['zoneName'])
+ else:
+ hosts[h["name"]] = h
+ ret.append(h)
+ # get os_id for hosts using the os-hypervisors API call
+ req_url = endpoint + "/os-hypervisors"
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return ret
+ if "hypervisors" not in response:
+ return ret
+ for h in response["hypervisors"]:
+ hvname = h["hypervisor_hostname"]
+ if '.' in hvname and hvname not in hosts:
+ hostname = hvname[:hvname.index('.')]
+ else:
+ hostname = hvname
+ try:
+ doc = hosts[hostname]
+ except KeyError:
+ # TBD - add error output
+ continue
+ doc["os_id"] = str(h["id"])
+ self.fetch_compute_node_ip_address(doc, hvname)
+ # get more network nodes details
+ self.fetch_network_node_details(ret)
+ return ret
+
+ def get_hosts_from_az(self, az):
+ ret = []
+ for h in az["hosts"]:
+ doc = self.get_host_details(az, h)
+ ret.append(doc)
+ return ret
+
+ def get_host_details(self, az, h):
+ # for hosts we use the name
+ services = az["hosts"][h]
+ doc = {
+ "id": h,
+ "host": h,
+ "name": h,
+ "zone": az["zoneName"],
+ "parent_type": "availability_zone",
+ "parent_id": az["zoneName"],
+ "services": services,
+ "host_type": []
+ }
+ if "nova-conductor" in services:
+ s = services["nova-conductor"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Controller", az['zoneName'])
+ if "nova-compute" in services:
+ s = services["nova-compute"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Compute", az['zoneName'])
+ return doc
+
+ # fetch more details of network nodes from neutron.agents table
+ def fetch_network_node_details(self, docs):
+ hosts = {}
+ for doc in docs:
+ hosts[doc["host"]] = doc
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM {}.agents
+ WHERE agent_type IN ('Metadata agent', 'DHCP agent', 'L3 agent')
+ """.format(self.neutron_db)
+ results = self.get_objects_list(query, "")
+ for r in results:
+ host = hosts[r["host"]]
+ host["config"] = json.loads(r["configurations"])
+ self.add_host_type(host, "Network", '')
+
+ # fetch ip_address from nova.compute_nodes table if possible
+ def fetch_compute_node_ip_address(self, doc, h):
+ query = """
+ SELECT host_ip AS ip_address
+ FROM nova.compute_nodes
+ WHERE hypervisor_hostname = %s
+ """
+ results = self.get_objects_list_for_id(query, "", h)
+ for db_row in results:
+ doc.update(db_row)
+
+ def add_host_type(self, doc, type, zone):
+ if not type in doc["host_type"]:
+ doc["host_type"].append(type)
+ if type == 'Compute':
+ doc['zone'] = zone
+ doc['parent_id'] = zone
diff --git a/app/discover/fetchers/api/api_fetch_projects.py b/app/discover/fetchers/api/api_fetch_projects.py
new file mode 100644
index 0000000..4ef8083
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_projects.py
@@ -0,0 +1,66 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchProjects(ApiAccess):
+ def __init__(self):
+ super(ApiFetchProjects, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ if not self.regions:
+ self.log.error('No regions found')
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ projects_for_user = self.get_projects_for_api_user(region, token)
+ return [p for p in ret if p['name'] in projects_for_user] \
+ if projects_for_user else ret
+
+ def get_projects_for_api_user(self, region, token):
+ if not token:
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ endpoint = self.get_region_url_nover(region, "keystone")
+ headers = {
+ 'X-Auth-Project-Id': self.admin_project,
+ 'X-Auth-Token': token['id']
+ }
+ # get the list of projects accessible by the admin user
+ req_url = endpoint + '/v3/projects'
+ response = self.get_url(req_url, headers)
+ if not response or 'projects' not in response:
+ return None
+ response = [p['name'] for p in response['projects']]
+ return response
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "keystone")
+ req_url = endpoint + "/v2.0/tenants"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not isinstance(response, dict):
+ self.log.error('invalid response to /tenants request: not dict')
+ return []
+ tenants_list = response.get("tenants", [])
+ if not isinstance(tenants_list, list):
+ self.log.error('invalid response to /tenants request: '
+ 'tenants value is n ot a list')
+ return []
+ response = [t for t in tenants_list if t.get("name", "") != "services"]
+ return response
diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py
new file mode 100644
index 0000000..dcc558f
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_regions.py
@@ -0,0 +1,51 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchRegions(ApiAccess):
+ def __init__(self):
+ super(ApiFetchRegions, self).__init__()
+ self.endpoint = ApiAccess.base_url
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ # the returned authentication response contains the list of end points
+ # and regions
+ service_catalog = ApiAccess.auth_response.get('access', {}).get('serviceCatalog')
+ if not service_catalog:
+ return []
+ env = self.get_env()
+ ret = []
+ NULL_REGION = "No-Region"
+ for service in service_catalog:
+ for e in service["endpoints"]:
+ if "region" in e:
+ region_name = e.pop("region")
+ region_name = region_name if region_name else NULL_REGION
+ else:
+ region_name = NULL_REGION
+ if region_name in self.regions.keys():
+ region = self.regions[region_name]
+ else:
+ region = {
+ "id": region_name,
+ "name": region_name,
+ "endpoints": {}
+ }
+ ApiAccess.regions[region_name] = region
+ region["parent_type"] = "regions_folder"
+ region["parent_id"] = env + "-regions"
+ e["service_type"] = service["type"]
+ region["endpoints"][service["name"]] = e
+ ret.extend(list(ApiAccess.regions.values()))
+ return ret
diff --git a/app/discover/fetchers/cli/__init__.py b/app/discover/fetchers/cli/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/cli/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
new file mode 100644
index 0000000..1db84ea
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -0,0 +1,206 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+import time
+
+from discover.fetcher import Fetcher
+from utils.binary_converter import BinaryConverter
+from utils.logging.console_logger import ConsoleLogger
+from utils.ssh_conn import SshConn
+
+
+class CliAccess(BinaryConverter, Fetcher):
+ connections = {}
+ ssh_cmd = "ssh -o StrictHostKeyChecking=no "
+ call_count_per_con = {}
+ max_call_count_per_con = 100
+ cache_lifetime = 60 # no. of seconds to cache results
+ cached_commands = {}
+
+ def __init__(self):
+ super().__init__()
+ self.log = ConsoleLogger()
+
+ @staticmethod
+ def is_gateway_host(ssh_to_host):
+ ssh_conn = SshConn(ssh_to_host)
+ return ssh_conn.is_gateway_host(ssh_to_host)
+
+ def run_on_gateway(self, cmd, ssh_to_host="", enable_cache=True,
+ use_sudo=True):
+ self.run(cmd, ssh_to_host=ssh_to_host, enable_cache=enable_cache,
+ on_gateway=True, use_sudo=use_sudo)
+
+ def run(self, cmd, ssh_to_host="", enable_cache=True, on_gateway=False,
+ ssh=None, use_sudo=True):
+ ssh_conn = ssh if ssh else SshConn(ssh_to_host)
+ if use_sudo and not cmd.strip().startswith("sudo "):
+ cmd = "sudo " + cmd
+ if not on_gateway and ssh_to_host \
+ and not ssh_conn.is_gateway_host(ssh_to_host):
+ cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+ curr_time = time.time()
+ cmd_path = ssh_to_host + ',' + cmd
+ if enable_cache and cmd_path in self.cached_commands:
+ # try to re-use output from last call
+ cached = self.cached_commands[cmd_path]
+ if cached["timestamp"] + self.cache_lifetime < curr_time:
+ # result expired
+ self.cached_commands.pop(cmd_path, None)
+ else:
+ # result is good to use - skip the SSH call
+ self.log.info('CliAccess: ****** using cached result, ' +
+ 'host: ' + ssh_to_host + ', cmd: %s ******', cmd)
+ return cached["result"]
+
+ self.log.info('CliAccess: host: %s, cmd: %s', ssh_to_host, cmd)
+ ret = ssh_conn.exec(cmd)
+ self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
+ return ret
+
+ def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
+ out = self.run(cmd, ssh_to_host, enable_cache)
+ if not out:
+ return []
+ # first try to split lines by whitespace
+ ret = out.splitlines()
+ # if split by whitespace did not work, try splitting by "\\n"
+ if len(ret) == 1:
+ ret = [l for l in out.split("\\n") if l != ""]
+ return ret
+
+ # parse command output columns separated by whitespace
+ # since headers can contain whitespace themselves,
+ # it is the caller's responsibility to provide the headers
+ def parse_cmd_result_with_whitespace(self, lines, headers, remove_first):
+ if remove_first:
+ # remove headers line
+ del lines[:1]
+ results = [self.parse_line_with_ws(line, headers)
+ for line in lines]
+ return results
+
+ # parse command output with "|" column separators and "-" row separators
+ def parse_cmd_result_with_separators(self, lines):
+ headers = self.parse_headers_line_with_separators(lines[1])
+ # remove line with headers and formatting lines above it and below it
+ del lines[:3]
+ # remove formatting line in the end
+ lines.pop()
+ results = [self.parse_content_line_with_separators(line, headers)
+ for line in lines]
+ return results
+
+ # parse a line with columns separated by whitespace
+ def parse_line_with_ws(self, line, headers):
+ s = line if isinstance(line, str) else self.binary2str(line)
+ parts = [word.strip() for word in s.split() if word.strip()]
+ ret = {}
+ for i, p in enumerate(parts):
+ header = headers[i]
+ ret[header] = p
+ return ret
+
+ # parse a line with "|" column separators
+ def parse_line_with_separators(self, line):
+ s = self.binary2str(line)
+ parts = [word.strip() for word in s.split("|") if word.strip()]
+ # remove the ID field
+ del parts[:1]
+ return parts
+
+ def parse_headers_line_with_separators(self, line):
+ return self.parse_line_with_separators(line)
+
+ def parse_content_line_with_separators(self, line, headers):
+ content_parts = self.parse_line_with_separators(line)
+ content = {}
+ for i in range(0, len(content_parts)):
+ content[headers[i]] = content_parts[i]
+ return content
+
+ def merge_ws_spillover_lines(self, lines):
+ # with WS-separated output, extra output sometimes spills to next line
+ # detect that and add to the end of the previous line for our procesing
+ pending_line = None
+ fixed_lines = []
+ # remove headers line
+ for l in lines:
+ if l[0] == '\t':
+ # this is a spill-over line
+ if pending_line:
+ # add this line to the end of the previous line
+ pending_line = pending_line.strip() + "," + l.strip()
+ else:
+ # add the previous pending line to the fixed lines list
+ if pending_line:
+ fixed_lines.append(pending_line)
+ # make current line the pending line
+ pending_line = l
+ if pending_line:
+ fixed_lines.append(pending_line)
+ return fixed_lines
+
+ """
+ given output lines from CLI command like 'ip -d link show',
+ find lines belonging to section describing a specific interface
+ parameters:
+ - lines: list of strings, output of command
+ - header_regexp: regexp marking the start of the section
+ - end_regexp: regexp marking the end of the section
+ """
+ def get_section_lines(self, lines, header_regexp, end_regexp):
+ if not lines:
+ return []
+ header_re = re.compile(header_regexp)
+ start_pos = None
+ # find start_pos of section
+ line_count = len(lines)
+ for line_num in range(0, line_count-1):
+ matches = header_re.match(lines[line_num])
+ if matches:
+ start_pos = line_num
+ break
+ if not start_pos:
+ return []
+ # find end of section
+ end_pos = line_count
+ end_re = re.compile(end_regexp)
+ for line_num in range(start_pos+1, end_pos-1):
+ matches = end_re.match(lines[line_num])
+ if matches:
+ end_pos = line_num
+ break
+ return lines[start_pos:end_pos]
+
+ def get_object_data(self, o, lines, regexps):
+ """
+ find object data in output lines from CLI command
+ parameters:
+ - o: object (dict), to which we'll add attributes with the data found
+ - lines: list of strings
+ - regexps: dict, keys are attribute names, values are regexp to match
+ for finding the value of the attribute
+ """
+ for line in lines:
+ self.find_matching_regexps(o, line, regexps)
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ if 'name' not in o and 'default' in regexp_tuple:
+ o[name] = regexp_tuple['default']
+
+ def find_matching_regexps(self, o, line, regexps):
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ regex = regexp_tuple['re']
+ regex = re.compile(regex)
+ matches = regex.search(line)
+ if matches:
+ o[name] = matches.group(1)
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
new file mode 100644
index 0000000..3516e25
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -0,0 +1,122 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostPnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.ethtool_attr = re.compile('^\s+([^:]+):\s(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:?(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 addr:\s*(\S+)(\s.*)?$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 \s*(\S+)(\s.*)?$'}
+ ]
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchHostPnics: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interface_lines = self.run_fetch_lines(cmd, host_id)
+ interfaces = []
+ for line in interface_lines:
+ interface_name = line[line.rindex('/')+1:]
+ interface_name = interface_name.strip()
+ # run ifconfig with specific interface name,
+ # since running it with no name yields a list without inactive pNICs
+ interface = self.find_interface_details(host_id, interface_name)
+ if interface:
+ interfaces.append(interface)
+ return interfaces
+
+ def find_interface_details(self, host_id, interface_name):
+ lines = self.run_fetch_lines("ifconfig " + interface_name, host_id)
+ interface = None
+ status_up = None
+ for line in [l for l in lines if l != '']:
+ tokens = None
+ if interface is None:
+ tokens = line.split()
+ name = tokens[0].strip('- :')
+ name = name.strip()
+ if name == interface_name:
+ line_remainder = line.strip('-')[len(interface_name)+2:]
+ line_remainder = line_remainder.strip(' :')
+ id = interface_name
+ interface = {
+ "host": host_id,
+ "name": id,
+ "local_name": interface_name,
+ "lines": []
+ }
+ self.handle_line(interface, line_remainder)
+ if '<UP,' in line:
+ status_up = True
+ if status_up is None:
+ if tokens is None:
+ tokens = line.split()
+ if 'BROADCAST' in tokens:
+ status_up = 'UP' in tokens
+ if interface:
+ self.handle_line(interface, line)
+ self.set_interface_data(interface)
+ interface['state'] = 'UP' if status_up else 'DOWN'
+ if 'id' not in interface:
+ interface['id'] = interface_name + '-unknown_mac'
+ return interface
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ if 'mac_address' in interface:
+ interface["id"] = interface["name"] + "-" + interface["mac_address"]
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface:
+ return
+ interface["data"] = "\n".join(interface["lines"])
+ interface.pop("lines", None)
+ ethtool_ifname = interface["local_name"]
+ if "@" in interface["local_name"]:
+ pos = interface["local_name"].index("@")
+ ethtool_ifname = ethtool_ifname[pos + 1:]
+ cmd = "ethtool " + ethtool_ifname
+ lines = self.run_fetch_lines(cmd, interface["host"])
+ attr = None
+ for line in lines[1:]:
+ matches = self.ethtool_attr.match(line)
+ if matches:
+ # add this attribute to the interface
+ attr = matches.group(1)
+ value = matches.group(2)
+ interface[attr] = value.strip()
+ else:
+ # add more values to the current attribute as an array
+ if isinstance(interface[attr], str):
+ interface[attr] = [interface[attr], line.strip()]
+ else:
+ interface[attr].append(line.strip())
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
new file mode 100644
index 0000000..69b6413
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+NAME_RE = '^[a-zA-Z]*GigabitEthernet'
+
+class CliFetchHostPnicsVpp(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.name_re = re.compile(NAME_RE)
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ host_id = id[:host_id.rindex("-")]
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge",
+ "host": host_id
+ })
+ ret = []
+ for vedge in vedges:
+ pnic_ports = vedge['ports']
+ for pnic_name in pnic_ports:
+ if not self.name_re.search(pnic_name):
+ continue
+ pnic = pnic_ports[pnic_name]
+ pnic['host'] = host_id
+ pnic['id'] = host_id + "-pnic-" + pnic_name
+ pnic['type'] = 'pnic'
+ pnic['object_name'] = pnic_name
+ pnic['Link detected'] = 'yes' if pnic['state'] == 'up' else 'no'
+ ret.append(pnic)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservice.py b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
new file mode 100644
index 0000000..9f8173f
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.network_agents_list import NetworkAgentsList
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostVservice(CliAccess, DbAccess):
+ def __init__(self):
+ super(CliFetchHostVservice, self).__init__()
+ # match only DHCP agent and router (L3 agent)
+ self.type_re = re.compile("^q(dhcp|router)-")
+ self.inv = InventoryMgr()
+ self.agents_list = NetworkAgentsList()
+
+ def get_vservice(self, host_id, name_space):
+ result = {"local_service_id": name_space}
+ self.set_details(host_id, result)
+ return result
+
+ def set_details(self, host_id, r):
+ # keep the index without prefix
+ id_full = r["local_service_id"].strip()
+ prefix = id_full[1:id_full.index('-')]
+ id_clean = id_full[id_full.index('-') + 1:]
+ r["service_type"] = prefix
+ name = self.get_router_name(r, id_clean) if prefix == "router" \
+ else self.get_network_name(id_clean)
+ r["name"] = prefix + "-" + name
+ r["host"] = host_id
+ r["id"] = host_id + "-" + id_full
+ self.set_agent_type(r)
+
+ def get_network_name(self, id):
+ query = """
+ SELECT name
+ FROM {}.networks
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id)
+ if not list(results):
+ return id
+ for db_row in results:
+ return db_row["name"]
+
+ def get_router_name(self, r, id):
+ query = """
+ SELECT *
+ FROM {}.routers
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id.strip())
+ for db_row in results:
+ r.update(db_row)
+ return r["name"]
+
+ # dynamically create sub-folder for vService by type
+ def set_agent_type(self, o):
+ o["master_parent_id"] = o["host"] + "-vservices"
+ o["master_parent_type"] = "vservices_folder"
+ atype = o["service_type"]
+ agent = self.agents_list.get_type(atype)
+ try:
+ o["parent_id"] = o["master_parent_id"] + "-" + agent["type"] + "s"
+ o["parent_type"] = "vservice_" + agent["type"] + "s_folder"
+ o["parent_text"] = agent["folder_text"]
+ except KeyError:
+ o["parent_id"] = o["master_parent_id"] + "-" + "miscellenaous"
+ o["parent_type"] = "vservice_miscellenaous_folder"
+ o["parent_text"] = "Misc. services"
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservices.py b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
new file mode 100644
index 0000000..9b62dcb
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
@@ -0,0 +1,27 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+
+
+class CliFetchHostVservices(CliFetchHostVservice):
+ def __init__(self):
+ super(CliFetchHostVservices, self).__init__()
+
+ def get(self, host_id):
+ host = self.inv.get_single(self.get_env(), "host", host_id)
+ if "Network" not in host["host_type"]:
+ return []
+ services_ids = [l[:l.index(' ')] if ' ' in l else l
+ for l in self.run_fetch_lines("ip netns", host_id)]
+ results = [{"local_service_id": s} for s in services_ids if self.type_re.match(s)]
+ for r in results:
+ self.set_details(host_id, r)
+ return results
+
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
new file mode 100644
index 0000000..22ac573
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnics(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def set_vnic_properties(self, v, instance):
+ super().set_vnic_properties(v, instance)
+ v["source_bridge"] = v["source"]["@bridge"]
+
+ def get_vnic_name(self, v, instance):
+ return v["target"]["@dev"]
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
new file mode 100644
index 0000000..4de1840
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
@@ -0,0 +1,68 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import xmltodict
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchInstanceVnicsBase(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ instance_uuid = id[:id.rindex('-')]
+ instance = self.inv.get_by_id(self.get_env(), instance_uuid)
+ if not instance:
+ return []
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ if not host or "Compute" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("virsh list", instance["host"])
+ del lines[:2] # remove header
+ virsh_ids = [l.split()[0] for l in lines if l > ""]
+ results = []
+ # Note: there are 2 ids here of instances with local names, which are
+ # not connected to the data we have thus far for the instance
+ # therefore, we will decide whether the instance is the correct one
+ # based on comparison of the uuid in the dumpxml output
+ for id in virsh_ids:
+ results.extend(self.get_vnics_from_dumpxml(id, instance))
+ return results
+
+ def get_vnics_from_dumpxml(self, id, instance):
+ xml_string = self.run("virsh dumpxml " + id, instance["host"])
+ if not xml_string.strip():
+ return []
+ response = xmltodict.parse(xml_string)
+ if instance["uuid"] != response["domain"]["uuid"]:
+ # this is the wrong instance - skip it
+ return []
+ try:
+ vnics = response["domain"]["devices"]["interface"]
+ except KeyError:
+ return []
+ if isinstance(vnics, dict):
+ vnics = [vnics]
+ for v in vnics:
+ self.set_vnic_properties(v, instance)
+ return vnics
+
+ def set_vnic_properties(self, v, instance):
+ v["name"] = self.get_vnic_name(v, instance)
+ v["id"] = v["name"]
+ v["vnic_type"] = "instance_vnic"
+ v["host"] = instance["host"]
+ v["instance_id"] = instance["id"]
+ v["instance_db_id"] = instance["_id"]
+ v["mac_address"] = v["mac"]["@address"]
+ instance["mac_address"] = v["mac_address"]
+ self.inv.set(instance)
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
new file mode 100644
index 0000000..58facd2
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnicsVpp(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def get_vnic_name(self, v, instance):
+ return instance["name"] + "-" + v["@type"] + "-" + v["mac"]["@address"]
diff --git a/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
new file mode 100644
index 0000000..1e65a14
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchOtepsLxb(CliAccess, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, parent_id):
+ vconnector = self.inv.get_by_id(self.get_env(), parent_id)
+ if not vconnector:
+ return []
+ configurations = vconnector['configurations']
+ tunneling_ip = configurations['tunneling_ip']
+ tunnel_types_used = configurations['tunnel_types']
+ if not tunnel_types_used:
+ return []
+ tunnel_type = tunnel_types_used[0]
+ if not tunnel_type:
+ return []
+ # check only interfaces with name matching tunnel type
+ ret = [i for i in vconnector['interfaces'].values()
+ if i['name'].startswith(tunnel_type + '-')]
+ for otep in ret:
+ otep['ip_address'] = tunneling_ip
+ otep['host'] = vconnector['host']
+ self.get_otep_ports(otep)
+ otep['id'] = otep['host'] + '-otep-' + otep['name']
+ otep['name'] = otep['id']
+ otep['vconnector'] = vconnector['name']
+ otep['overlay_type'] = tunnel_type
+ self.get_udp_port(otep)
+ return ret
+
+ """
+ fetch OTEP data from CLI command 'ip -d link show'
+ """
+ def get_otep_ports(self, otep):
+ cmd = 'ip -d link show'
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ header_format = '[0-9]+: ' + otep['name'] + ':'
+ interface_lines = self.get_section_lines(lines, header_format, '\S')
+ otep['data'] = '\n'.join(interface_lines)
+ regexps = [
+ {'name': 'state', 're': ',UP,', 'default': 'DOWN'},
+ {'name': 'mac_address', 're': '.*\slink/ether\s(\S+)\s'},
+ {'name': 'mtu', 're': '.*\smtu\s(\S+)\s'},
+ ]
+ self.get_object_data(otep, interface_lines, regexps)
+ cmd = 'bridge fdb show'
+ dst_line_format = ' dev ' + otep['name'] + ' dst '
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ lines = [l for l in lines if dst_line_format in l]
+ if lines:
+ l = lines[0]
+ otep['bridge dst'] = l[l.index(' dst ')+5:]
+ return otep
+
+ def get_udp_port(self, otep):
+ table_name = "neutron.ml2_" + otep['overlay_type'] + "_endpoints"
+ results = None
+ try:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT udp_port
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", otep['host'])
+ except Exception as e:
+ self.log.error('failed to fetch UDP port for OTEP: ' + str(e))
+ otep['udp_port'] = 0
+ for result in results:
+ otep['udp_port'] = result['udp_port']
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors.py b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
new file mode 100644
index 0000000..78b767a
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABCMeta
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ABCSingleton(ABCMeta, Singleton):
+ pass
+
+
+class CliFetchVconnectors(CliAccess, metaclass=ABCSingleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def get_vconnectors(self, host):
+ raise NotImplementedError("Subclass must override get_vconnectors()")
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchVconnectors: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id + \
+ ", host: " + str(host))
+ return []
+ return self.get_vconnectors(host)
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
new file mode 100644
index 0000000..648dc63
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.cli.cli_fetch_vconnectors_ovs import CliFetchVconnectorsOvs
+from discover.fetchers.db.db_access import DbAccess
+
+
+class CliFetchVconnectorsLxb(CliFetchVconnectorsOvs, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+
+ def get(self, id):
+ ret = super().get(id)
+ for doc in ret:
+ query = """
+ SELECT configurations
+ FROM {}.agents
+ WHERE agent_type="Linux bridge agent" AND host = %s
+ """.format(self.neutron_db)
+ host = doc['host']
+ matches = self.get_objects_list_for_id(query, '', host)
+ if not matches:
+ raise ValueError('No Linux bridge agent in DB for host: {}'.format(host))
+ agent = matches[0]
+ doc['configurations'] = json.loads(agent['configurations'])
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
new file mode 100644
index 0000000..ff37569
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsOvs(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ host_id = host['id']
+ lines = self.run_fetch_lines("brctl show", host_id)
+ headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+ headers_count = len(headers)
+ # since we hard-coded the headers list, remove the headers line
+ del lines[:1]
+
+ # intefaces can spill to next line - need to detect that and add
+ # them to the end of the previous line for our procesing
+ fixed_lines = self.merge_ws_spillover_lines(lines)
+
+ results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
+ ret = []
+ for doc in results:
+ doc["name"] = doc.pop("bridge_name")
+ doc["id"] = doc["name"] + "-" + doc.pop("bridge_id")
+ doc["host"] = host_id
+ doc["connector_type"] = "bridge"
+ if "interfaces" in doc:
+ interfaces = {}
+ interface_names = doc["interfaces"].split(",")
+ for interface_name in interface_names:
+ # find MAC address for this interface from ports list
+ port_id_prefix = interface_name[3:]
+ port = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host_id,
+ "id": {"$regex": r"^" + re.escape(port_id_prefix)}
+ }, get_single=True)
+ mac_address = '' if not port else port['mac_address']
+ interface = {'name': interface_name, 'mac_address': mac_address}
+ interfaces[interface_name] = interface
+ doc["interfaces"] = interfaces
+ doc['interfaces_names'] = list(interfaces.keys())
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
new file mode 100644
index 0000000..479e1db
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
@@ -0,0 +1,64 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsVpp(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ lines = self.run_fetch_lines("vppctl show mode", host['id'])
+ vconnectors = {}
+ for l in lines:
+ if not l.startswith('l2 bridge'):
+ continue
+ line_parts = l.split(' ')
+ name = line_parts[2]
+ bd_id = line_parts[4]
+ if bd_id in vconnectors:
+ vconnector = vconnectors[bd_id]
+ else:
+ vconnector = {
+ 'host': host['id'],
+ 'id': host['id'] + '-vconnector-' + bd_id,
+ 'bd_id': bd_id,
+ 'name': "bridge-domain-" + bd_id,
+ 'interfaces': {},
+ 'interfaces_names': []
+ }
+ vconnectors[bd_id] = vconnector
+ interface = self.get_interface_details(host, name)
+ if interface:
+ vconnector['interfaces'][name] = interface
+ vconnector['interfaces_names'].append(name)
+ return list(vconnectors.values())
+
+ def get_interface_details(self, host, name):
+ # find vconnector interfaces
+ cmd = "vppctl show hardware-int " + name
+ interface_lines = self.run_fetch_lines(cmd, host['id'])
+ # remove header line
+ interface_lines.pop(0)
+ interface = None
+ for l in interface_lines:
+ if not l.strip():
+ continue # ignore empty lines
+ if not l.startswith(' '):
+ details = l.split()
+ interface = {
+ "name": details[0],
+ "hardware": details[3],
+ "state": details[2],
+ "id": details[1],
+ }
+ elif l.startswith(' Ethernet address '):
+ interface['mac_address'] = l[l.rindex(' ') + 1:]
+ return interface
diff --git a/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
new file mode 100644
index 0000000..f9c622d
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# Copyright 2016 cisco Corporation
+#oslo related message handling
+
+from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
+import yaml
+
+from neutronclient.tests.functional import base
+
+
+class TestCLIFormatter(base.ClientTestBase):
+
+## old stuff ..not related to vpp..disregard
+ def setUp(self):
+ super(TestCLIFormatter, self).setUp()
+ self.net_name = 'net-%s' % uuidutils.generate_uuid()
+ self.addCleanup(self.neutron, 'net-delete %s' % self.net_name)
+
+ def _create_net(self, fmt, col_attrs):
+ params = ['-c %s' % attr for attr in col_attrs]
+ params.append('-f %s' % fmt)
+ params.append(self.net_name)
+ param_string = ' '.join(params)
+ return self.neutron('net-create', params=param_string)
+
+ def test_net_create_with_json_formatter(self):
+ result = self._create_net('json', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ jsonutils.loads(result))
+
+ def test_net_create_with_yaml_formatter(self):
+ result = self._create_net('yaml', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ yaml.load(result))
+
+ def test_net_create_with_value_formatter(self):
+ # NOTE(amotoki): In 'value' formatter, there is no guarantee
+ # in the order of attribute, so we use one attribute in this test.
+ result = self._create_net('value', ['name'])
+ self.assertEqual(self.net_name, result.strip())
+
+ def test_net_create_with_shell_formatter(self):
+ result = self._create_net('shell', ['name', 'admin_state_up'])
+ result_lines = set(result.strip().split('\n'))
+ self.assertSetEqual(set(['name="%s"' % self.net_name,
+ 'admin_state_up="True"']),
+result_lines)
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
new file mode 100644
index 0000000..44ac8d6
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -0,0 +1,140 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchVserviceVnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.if_header = re.compile('^[-]?(\S+)\s+(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\sMask:\s?([0-9.]+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\snetmask\s([0-9.]+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 addr: ?\s*([0-9a-f:/]+)(\s.*)?$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 \s*([0-9a-f:/]+)(\s.*)?$'}
+ ]
+
+ def get(self, host_id):
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ if "Network" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("ip netns", host_id)
+ ret = []
+ for l in [l for l in lines
+ if l.startswith("qdhcp") or l.startswith("qrouter")]:
+ service = l.strip()
+ service = service if ' ' not in service \
+ else service[:service.index(' ')]
+ ret.extend(self.handle_service(host_id, service))
+ return ret
+
+ def handle_service(self, host, service, enable_cache=True):
+ cmd = "ip netns exec " + service + " ifconfig"
+ lines = self.run_fetch_lines(cmd, host, enable_cache)
+ interfaces = []
+ current = None
+ for line in lines:
+ matches = self.if_header.match(line)
+ if matches:
+ if current:
+ self.set_interface_data(current)
+ name = matches.group(1).strip(":")
+ # ignore 'lo' interface
+ if name == 'lo':
+ current = None
+ else:
+ line_remainder = matches.group(2)
+ vservice_id = host + "-" + service
+ current = {
+ "id": host + "-" + name,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic",
+ "host": host,
+ "name": name,
+ "master_parent_type": "vservice",
+ "master_parent_id": vservice_id,
+ "parent_type": "vnics_folder",
+ "parent_id": vservice_id + "-vnics",
+ "parent_text": "vNICs",
+ "lines": []
+ }
+ interfaces.append(current)
+ self.handle_line(current, line_remainder)
+ else:
+ if current:
+ self.handle_line(current, line)
+ if current:
+ self.set_interface_data(current)
+ return interfaces
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface or 'IP Address' not in interface or 'netmask' not in interface:
+ return
+
+ interface["data"] = "\n".join(interface.pop("lines", None))
+ interface["cidr"] = self.get_cidr_for_vnic(interface)
+ network = self.inv.get_by_field(self.get_env(), "network", "cidrs",
+ interface["cidr"], get_single=True)
+ if not network:
+ return
+ interface["network"] = network["id"]
+ # set network for the vservice, to check network on clique creation
+ vservice = self.inv.get_by_id(self.get_env(),
+ interface["master_parent_id"])
+ network_id = network["id"]
+ if "network" not in vservice:
+ vservice["network"] = list()
+ if network_id not in vservice["network"]:
+ vservice["network"].append(network_id)
+ self.inv.set(vservice)
+
+ # find CIDR string by IP address and netmask
+ def get_cidr_for_vnic(self, vnic):
+ if "IP Address" not in vnic:
+ vnic["IP Address"] = "No IP Address"
+ return "No IP Address"
+ ipaddr = vnic["IP Address"].split('.')
+ netmask = vnic["netmask"].split('.')
+
+ # calculate network start
+ net_start = []
+ for pos in range(0, 4):
+ net_start.append(str(int(ipaddr[pos]) & int(netmask[pos])))
+
+ cidr_string = '.'.join(net_start) + '/'
+ cidr_string = cidr_string + self.get_net_size(netmask)
+ return cidr_string
+
+ def get_net_size(self, netmask):
+ binary_str = ''
+ for octet in netmask:
+ binary_str += bin(int(octet))[2:].zfill(8)
+ return str(len(binary_str.rstrip('0')))
diff --git a/app/discover/fetchers/db/__init__.py b/app/discover/fetchers/db/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/db/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
new file mode 100644
index 0000000..00bd776
--- /dev/null
+++ b/app/discover/fetchers/db/db_access.py
@@ -0,0 +1,142 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import mysql.connector
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class DbAccess(Fetcher):
+ conn = None
+ query_count_per_con = 0
+
+ # connection timeout set to 30 seconds,
+ # due to problems over long connections
+ TIMEOUT = 30
+
+ def __init__(self):
+ super().__init__()
+ self.config = Configuration()
+ self.conf = self.config.get("mysql")
+ self.connect_to_db()
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ # check if DB schema 'neutron' exists
+ cursor.execute("SELECT COUNT(*) FROM neutron.agents")
+ for row in cursor:
+ pass
+ self.neutron_db = "neutron"
+ except (AttributeError, mysql.connector.errors.ProgrammingError):
+ self.neutron_db = "ml2_neutron"
+
+ def db_connect(self, _host, _port, _user, _password, _database):
+ if DbAccess.conn:
+ return
+ try:
+ connector = mysql.connector
+ DbAccess.conn = connector.connect(host=_host, port=_port,
+ connection_timeout=self.TIMEOUT,
+ user=_user,
+ password=_password,
+ database=_database,
+ raise_on_warnings=True)
+ DbAccess.conn.ping(True) # auto-reconnect if necessary
+ except:
+ self.log.critical("failed to connect to MySQL DB")
+ return
+ DbAccess.query_count_per_con = 0
+
+ def connect_to_db(self, force=False):
+ if DbAccess.conn:
+ if not force:
+ return
+ self.log.info("DbAccess: ****** forcing reconnect, " +
+ "query count: %s ******",
+ DbAccess.query_count_per_con)
+ DbAccess.conn = None
+ self.conf = self.config.get("mysql")
+ cnf = self.conf
+ cnf['schema'] = cnf['schema'] if 'schema' in cnf else 'nova'
+ self.db_connect(cnf["host"], cnf["port"],
+ cnf["user"], cnf["password"],
+ cnf["schema"])
+
+ def get_objects_list_for_id(self, query, object_type, id):
+ self.connect_to_db(DbAccess.query_count_per_con >= 25)
+ DbAccess.query_count_per_con += 1
+ self.log.debug("query count: %s, running query:\n%s\n",
+ str(DbAccess.query_count_per_con), query)
+
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def get_objects_list(self, query, object_type):
+ return self.get_objects_list_for_id(query, object_type, None)
+
+ def get_objects(self, qry, type, id):
+ return jsonify(self.get_objects_list(qry, type))
+
+ def get(self, id):
+ # return list of available fetch types
+ ret = {
+ "description": "List of available fetch calls for this interface",
+ "types": {
+ "regions": "Regions of this environment",
+ "projects": "Projects (tenants) of this environment",
+ "availability_zones": "Availability zones",
+ "aggregates": "Host aggregates",
+ "aggregate_hosts": "Hosts in aggregate X (parameter: id)",
+ "az_hosts": "Host in availability_zone X (parameter: id)"
+ }
+ }
+ return jsonify(ret)
+
+ def exec(self, query, table, field, values):
+ try:
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def set(self, table, field, values):
+ query = """INSERT INTO %s %s VALUES %s"""
+ return self.exec(query, table, field, values)
+
+ def delete(self, table, field, values):
+ query = """DELETE FROM %s WHERE %s=%s"""
+ return self.exec(query, table, field, values)
diff --git a/app/discover/fetchers/db/db_fetch_aggregate_hosts.py b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
new file mode 100644
index 0000000..59ba5d0
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import bson
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchAggregateHosts(DbAccess):
+ def get(self, id):
+ query = """
+ SELECT CONCAT('aggregate-', a.name, '-', host) AS id, host AS name
+ FROM nova.aggregate_hosts ah
+ JOIN nova.aggregates a ON a.id = ah.aggregate_id
+ WHERE ah.deleted = 0 AND aggregate_id = %s
+ """
+ hosts = self.get_objects_list_for_id(query, "host", id)
+ if hosts:
+ inv = InventoryMgr()
+ for host_rec in hosts:
+ host_id = host_rec['name']
+ host = inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error('unable to find host {} '
+ 'from aggregate {} in inventory'
+ .format(host_id, id))
+ continue
+ host_rec['ref_id'] = bson.ObjectId(host['_id'])
+ return hosts
diff --git a/app/discover/fetchers/db/db_fetch_aggregates.py b/app/discover/fetchers/db/db_fetch_aggregates.py
new file mode 100644
index 0000000..da0720b
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregates.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAggregates(DbAccess):
+ def get(self, id):
+ return self.get_objects_list(
+ """
+ SELECT id, name
+ FROM nova.aggregates
+ WHERE deleted = 0
+ """,
+ "host aggregate")
diff --git a/app/discover/fetchers/db/db_fetch_availability_zones.py b/app/discover/fetchers/db/db_fetch_availability_zones.py
new file mode 100644
index 0000000..763d777
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_availability_zones.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+class DbFetchAvailabilityZones(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT availability_zone,
+ availability_zone AS id, COUNT(DISTINCT host) AS descendants
+ FROM nova.instances
+ WHERE availability_zone IS NOT NULL
+ GROUP BY availability_zone
+ """
+ return self.get_objects_list(query, "availability zone")
diff --git a/app/discover/fetchers/db/db_fetch_az_network_hosts.py b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
new file mode 100644
index 0000000..09043ea
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
@@ -0,0 +1,31 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAZNetworkHosts(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM neutron.agents
+ WHERE agent_type = 'Metadata agent'
+ """
+ results = self.get_objects_list(query, "host")
+ for r in results:
+ self.set_host_details(r)
+ return results
+
+ def set_host_details(self, r):
+ config = json.loads(r["configurations"])
+ r["ip_address"] = config["nova_metadata_ip"]
+ r["host_type"] = "Network Node"
diff --git a/app/discover/fetchers/db/db_fetch_host_instances.py b/app/discover/fetchers/db/db_fetch_host_instances.py
new file mode 100644
index 0000000..2245c4a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_instances.py
@@ -0,0 +1,15 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+
+class DbFetchHostInstances(DbFetchInstances):
+
+ def get(self, id):
+ return self.get_instances("host", id)
diff --git a/app/discover/fetchers/db/db_fetch_host_network_agents.py b/app/discover/fetchers/db/db_fetch_host_network_agents.py
new file mode 100644
index 0000000..c323573
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_network_agents.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchHostNetworkAgents(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id):
+ query = """
+ SELECT * FROM {}.agents
+ WHERE host = %s
+ """.format(self.neutron_db)
+ host_id = id[:-1 * len("-network_agents")]
+ results = self.get_objects_list_for_id(query, "network_agent", host_id)
+ mechanism_drivers = self.env_config['mechanism_drivers']
+ id_prefix = mechanism_drivers[0] if mechanism_drivers else 'network_agent'
+ for o in results:
+ o["configurations"] = json.loads(o["configurations"])
+ o["name"] = o["binary"]
+ o['id'] = id_prefix + '-' + o['id']
+ return results
diff --git a/app/discover/fetchers/db/db_fetch_instances.py b/app/discover/fetchers/db/db_fetch_instances.py
new file mode 100644
index 0000000..54c4114
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_instances.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchInstances(DbAccess):
+ def get_instance_data(self, instances):
+ instances_hash = {}
+ for doc in instances:
+ instances_hash[doc["id"]] = doc
+
+ query = """
+ SELECT DISTINCT i.uuid AS id, i.display_name AS name,
+ i.host AS host, host_ip AS ip_address,
+ network_info, project_id,
+ IF(p.name IS NULL, "Unknown", p.name) AS project
+ FROM nova.instances i
+ LEFT JOIN keystone.project p ON p.id = i.project_id
+ JOIN nova.instance_info_caches ic ON i.uuid = ic.instance_uuid
+ JOIN nova.compute_nodes cn ON i.node = cn.hypervisor_hostname
+ WHERE i.deleted = 0
+ """
+ results = self.get_objects_list(query, "instance")
+ for result in results:
+ id = result["id"]
+ if id not in instances_hash:
+ continue
+ self.build_instance_details(result)
+ doc = instances_hash[id]
+ doc.update(result)
+
+ def build_instance_details(self, result):
+ network_info_str = result.pop("network_info", None)
+ result["network_info"] = json.loads(network_info_str)
+
+ # add network as an array to allow constraint checking
+ # when building clique
+ networks = []
+ for net in result["network_info"]:
+ if "network" not in net or "id" not in net["network"]:
+ continue
+ network_id = net["network"]["id"]
+ if network_id in networks:
+ continue
+ networks.append(network_id)
+ result["network"] = networks
+
+ result["type"] = "instance"
+ result["parent_type"] = "instances_folder"
+ result["parent_id"] = result["host"] + "-instances"
+ result["in_project-" + result.pop("project", None)] = "1"
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
new file mode 100644
index 0000000..9055c11
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -0,0 +1,81 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+
+ def get(self, id):
+ vedge = self.inv.get_by_id(self.get_env(), id)
+ tunnel_type = None
+ if "configurations" not in vedge:
+ return []
+ if "tunnel_types" not in vedge["configurations"]:
+ return []
+ if not vedge["configurations"]["tunnel_types"]:
+ return []
+ tunnel_type = vedge["configurations"]["tunnel_types"][0]
+ host_id = vedge["host"]
+ table_name = "neutron.ml2_" + tunnel_type + "_endpoints"
+ env_config = self.config.get_env_config()
+ distribution = env_config["distribution"]
+ if distribution == "Canonical-icehouse":
+ # for Icehouse, we only get IP address from the DB, so take the
+ # host IP address and from the host data in Mongo
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ results = [{"host": host_id, "ip_address": host["ip_address"]}]
+ else:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", host_id)
+ for doc in results:
+ doc["id"] = host_id + "-otep"
+ doc["name"] = doc["id"]
+ doc["host"] = host_id
+ doc["overlay_type"] = tunnel_type
+ doc["ports"] = vedge["tunnel_ports"] if "tunnel_ports" in vedge else []
+ if "udp_port" not in doc:
+ doc["udp_port"] = "67"
+ self.get_vconnector(doc, host_id, vedge)
+
+ return results
+
+ # find matching vConnector by tunneling_ip of vEdge
+ # look for that IP address in ifconfig for the host
+ def get_vconnector(self, doc, host_id, vedge):
+ tunneling_ip = vedge["configurations"]["tunneling_ip"]
+ ifconfig_lines = self.run_fetch_lines("ifconfig", host_id)
+ interface = None
+ ip_string = " " * 10 + "inet addr:" + tunneling_ip + " "
+ vconnector = None
+ for l in ifconfig_lines:
+ if l.startswith(" "):
+ if interface and l.startswith(ip_string):
+ vconnector = interface
+ break
+ else:
+ if " " in l:
+ interface = l[:l.index(" ")]
+
+ if vconnector:
+ doc["vconnector"] = vconnector
diff --git a/app/discover/fetchers/db/db_fetch_port.py b/app/discover/fetchers/db/db_fetch_port.py
new file mode 100644
index 0000000..2cb814a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_port.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchPort(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id=None):
+ query = """SELECT * FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ return self.get_objects_list_for_id(query, "port", id)
+
+ def get_id(self, id=None):
+ query = """SELECT id FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None
+
+ def get_id_by_field(self, id, search=''):
+ query = """SELECT id FROM neutron.ports where network_id = %s AND """ + search
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None \ No newline at end of file
diff --git a/app/discover/fetchers/db/db_fetch_vedges_ovs.py b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
new file mode 100644
index 0000000..24cc9f8
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
@@ -0,0 +1,178 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+ self.port_line_header_prefix = " " * 8 + "Port "
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}.agents
+ WHERE host = %s AND agent_type = 'Open vSwitch agent'
+ """.format(self.neutron_db),
+ "vedge", host_id)
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ vsctl_lines = self.run_fetch_lines("ovs-vsctl show", host["id"])
+ ports = self.fetch_ports(host, vsctl_lines)
+ for doc in results:
+ doc["name"] = doc["host"] + "-OVS"
+ doc["configurations"] = json.loads(doc["configurations"])
+ doc["ports"] = ports
+ doc["tunnel_ports"] = self.get_overlay_tunnels(doc, vsctl_lines)
+ return results
+
+ def fetch_ports(self, host, vsctl_lines):
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return {}
+ ports = self.fetch_ports_from_dpctl(host["id"])
+ self.fetch_port_tags_from_vsctl(vsctl_lines, ports)
+ return ports
+
+ def fetch_ports_from_dpctl(self, host_id):
+ cmd = "ovs-dpctl show"
+ lines = self.run_fetch_lines(cmd, host_id)
+ ports = {}
+ for l in lines:
+ port_matches = self.port_re.match(l)
+ if not port_matches:
+ continue
+ port = {}
+ id = port_matches.group(1)
+ name = port_matches.group(2)
+ is_internal = port_matches.group(3) == " (internal)"
+ port["internal"] = is_internal
+ port["id"] = id
+ port["name"] = name
+ ports[name] = port
+ return ports
+
+ # from ovs-vsctl, fetch tags of ports
+ # example format of ovs-vsctl output for a specific port:
+ # Port "tap9f94d28e-7b"
+ # tag: 5
+ # Interface "tap9f94d28e-7b"
+ # type: internal
+ def fetch_port_tags_from_vsctl(self, vsctl_lines, ports):
+ port = None
+ for l in vsctl_lines:
+ if l.startswith(self.port_line_header_prefix):
+ port = None
+ port_name = l[len(self.port_line_header_prefix):]
+ # remove quotes from port name
+ if '"' in port_name:
+ port_name = port_name[1:][:-1]
+ if port_name in ports:
+ port = ports[port_name]
+ continue
+ if not port:
+ continue
+ if l.startswith(" " * 12 + "tag: "):
+ port["tag"] = l[l.index(":") + 2:]
+ ports[port["name"]] = port
+ return ports
+
+ def get_overlay_tunnels(self, doc, vsctl_lines):
+ if doc["agent_type"] != "Open vSwitch agent":
+ return {}
+ if "tunneling_ip" not in doc["configurations"]:
+ return {}
+ if not doc["configurations"]["tunneling_ip"]:
+ self.get_bridge_pnic(doc)
+ return {}
+
+ # read the 'br-tun' interface ports
+ # this will be used later in the OTEP
+ tunnel_bridge_header = " " * 4 + "Bridge br-tun"
+ try:
+ br_tun_loc = vsctl_lines.index(tunnel_bridge_header)
+ except ValueError:
+ return []
+ lines = vsctl_lines[br_tun_loc + 1:]
+ tunnel_ports = {}
+ port = None
+ for l in lines:
+ # if we have only 4 or less spaces in the beginng,
+ # the br-tun section ended so return
+ if not l.startswith(" " * 5):
+ break
+ if l.startswith(self.port_line_header_prefix):
+ if port:
+ tunnel_ports[port["name"]] = port
+ name = l[len(self.port_line_header_prefix):].strip('" ')
+ port = {"name": name}
+ elif port and l.startswith(" " * 12 + "Interface "):
+ interface = l[10 + len("Interface ") + 1:].strip('" ')
+ port["interface"] = interface
+ elif port and l.startswith(" " * 16):
+ colon_pos = l.index(":")
+ attr = l[:colon_pos].strip()
+ val = l[colon_pos + 2:].strip('" ')
+ if attr == "options":
+ opts = val.strip('{}')
+ val = {}
+ for opt in opts.split(", "):
+ opt_name = opt[:opt.index("=")]
+ opt_val = opt[opt.index("=") + 1:].strip('" ')
+ val[opt_name] = opt_val
+ port[attr] = val
+ if port:
+ tunnel_ports[port["name"]] = port
+ return tunnel_ports
+
+ def get_bridge_pnic(self, doc):
+ conf = doc["configurations"]
+ if "bridge_mappings" not in conf or not conf["bridge_mappings"]:
+ return
+ for v in conf["bridge_mappings"].values(): br = v
+ ifaces_list_lines = self.run_fetch_lines("ovs-vsctl list-ifaces " + br,
+ doc["host"])
+ br_pnic_postfix = br + "--br-"
+ interface = ""
+ for l in ifaces_list_lines:
+ if l.startswith(br_pnic_postfix):
+ interface = l[len(br_pnic_postfix):]
+ break
+ if not interface:
+ return
+ doc["pnic"] = interface
+ # add port ID to pNIC
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": doc["host"],
+ "name": interface
+ }, get_single=True)
+ if not pnic:
+ return
+ port = doc["ports"][interface]
+ pnic["port_id"] = port["id"]
+ self.inv.set(pnic)
diff --git a/app/discover/fetchers/db/db_fetch_vedges_vpp.py b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
new file mode 100644
index 0000000..a1c659e
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesVpp(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ vedge = {
+ 'host': host_id,
+ 'id': host_id + '-VPP',
+ 'name': 'VPP-' + host_id,
+ 'agent_type': 'VPP'
+ }
+ ver = self.run_fetch_lines('vppctl show ver', host_id)
+ if ver:
+ ver = ver[0]
+ vedge['binary'] = ver[:ver.index(' ', ver.index(' ') + 1)]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interfaces = self.run_fetch_lines('vppctl show int', host_id)
+ vedge['ports'] = self.fetch_ports(interfaces)
+ return [vedge]
+
+ def fetch_ports(self, interfaces):
+ ports = {}
+ for i in interfaces:
+ if not i or i.startswith(' '):
+ continue
+ parts = i.split()
+ port = {
+ 'id': parts[1],
+ 'state': parts[2],
+ 'name': parts[0]
+ }
+ ports[port['name']] = port
+ return ports
diff --git a/app/discover/fetchers/folder_fetcher.py b/app/discover/fetchers/folder_fetcher.py
new file mode 100644
index 0000000..e7bb1fa
--- /dev/null
+++ b/app/discover/fetchers/folder_fetcher.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class FolderFetcher(Fetcher):
+ def __init__(self, types_name, parent_type, text="", create_folder=True):
+ super(FolderFetcher, self).__init__()
+ self.types_name = types_name
+ self.parent_type = parent_type
+ self.text = text
+ self.create_folder = create_folder
+ if not self.text:
+ self.text = self.types_name.capitalize()
+
+ def get(self, id):
+ oid = id + "-" + self.types_name
+ root_obj = {
+ "id": oid,
+ "create_object": self.create_folder,
+ "name": oid,
+ "text": self.text,
+ "type": self.types_name + "_folder",
+ "parent_id": id,
+ "parent_type": self.parent_type
+ }
+ return jsonify([root_obj])
diff --git a/app/discover/find_links.py b/app/discover/find_links.py
new file mode 100644
index 0000000..0967a60
--- /dev/null
+++ b/app/discover/find_links.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class FindLinks(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def create_link(self, env, host, source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label="", target_label="",
+ extra_attributes=None):
+ if extra_attributes is None:
+ extra_attributes = {}
+ link = self.inv.create_link(env, host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=extra_attributes)
+ if self.inv.monitoring_setup_manager:
+ self.inv.monitoring_setup_manager.create_setup(link)
diff --git a/app/discover/find_links_for_instance_vnics.py b/app/discover/find_links_for_instance_vnics.py
new file mode 100644
index 0000000..7e081fc
--- /dev/null
+++ b/app/discover/find_links_for_instance_vnics.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForInstanceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding links of type: instance-vnic")
+ vnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "instance_vnic"
+ })
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ instance = self.inv.get_by_id(self.get_env(), v["instance_id"])
+ if "network_info" not in instance:
+ self.log.warn("add_link_for_vnic: " +
+ "network_info missing in instance: %s ",
+ instance["id"])
+ return
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ source = instance["_id"]
+ source_id = instance["id"]
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "instance-vnic"
+ # find related network
+ network_name = None
+ network_id = None
+ for net in instance["network_info"]:
+ if net["devname"] == v["id"]:
+ network_name = net["network"]["label"]
+ network_id = net['network']['id']
+ v['network'] = network_id
+ self.inv.set(v)
+ break
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {} if not network_id else {'network': network_id}
+ self.create_link(self.get_env(), host["name"],
+ source, source_id, target, target_id,
+ link_type, network_name, state, link_weight,
+ extra_attributes=attributes)
diff --git a/app/discover/find_links_for_oteps.py b/app/discover/find_links_for_oteps.py
new file mode 100644
index 0000000..84373a4
--- /dev/null
+++ b/app/discover/find_links_for_oteps.py
@@ -0,0 +1,85 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForOteps(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vedge-otep, otep-vconnector, otep-pnic")
+ oteps = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "otep"
+ })
+ for otep in oteps:
+ self.add_vedge_otep_link(otep)
+ self.add_otep_vconnector_link(otep)
+ self.add_otep_pnic_link(otep)
+
+ def add_vedge_otep_link(self, otep):
+ vedge = self.inv.get_by_id(self.get_env(), otep["parent_id"])
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = otep["_id"]
+ target_id = otep["id"]
+ link_type = "vedge-otep"
+ link_name = vedge["name"] + "-otep"
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_vconnector_link(self, otep):
+ if "vconnector" not in otep:
+ return
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": otep["host"],
+ "name": otep["vconnector"]
+ }, get_single=True)
+ if not vconnector:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "otep-vconnector"
+ link_name = otep["name"] + "-" + otep["vconnector"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_pnic_link(self, otep):
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": otep["host"],
+ "IP Address": otep["ip_address"]
+ }, get_single=True)
+ if not pnic:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "otep-pnic"
+ link_name = otep["host"] + "pnic" + pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_pnics.py b/app/discover/find_links_for_pnics.py
new file mode 100644
index 0000000..19828d0
--- /dev/null
+++ b/app/discover/find_links_for_pnics.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForPnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ pnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "pnic_type": {"$ne": "switch"} # TODO: make a more educated guess
+ })
+ for pnic in pnics:
+ self.add_pnic_network_links(pnic)
+
+ def add_pnic_network_links(self, pnic):
+ self.log.info("adding links of type: pnic-network")
+ host = pnic["host"]
+ # find ports for that host, and fetch just the network ID
+ ports = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host
+ }, {"network_id": 1, "id": 1})
+ networks = {}
+ for port in ports:
+ networks[port["network_id"]] = 1
+ for network_id in networks.keys():
+ network = self.inv.get_by_id(self.get_env(), network_id)
+ if network == []:
+ return
+ source = pnic["_id"]
+ source_id = pnic["id"]
+ target = network["_id"]
+ target_id = network["id"]
+ link_type = "pnic-network"
+ link_name = "Segment-" + str(network["provider:segmentation_id"]) \
+ if "provider:segmentation_id" in network \
+ else "Segment-None"
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ source_label = "port-" + pnic["port_id"] if "port_id" in pnic \
+ else ""
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label,
+ extra_attributes={"network": target_id})
diff --git a/app/discover/find_links_for_vconnectors.py b/app/discover/find_links_for_vconnectors.py
new file mode 100644
index 0000000..3d5cdb0
--- /dev/null
+++ b/app/discover/find_links_for_vconnectors.py
@@ -0,0 +1,88 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVconnectors(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ vconnectors = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector"
+ })
+ self.log.info("adding links of type: vnic-vconnector, vconnector-pnic")
+ for vconnector in vconnectors:
+ for interface in vconnector["interfaces_names"]:
+ self.add_vnic_vconnector_link(vconnector, interface)
+ self.add_vconnector_pnic_link(vconnector, interface)
+
+ def add_vnic_vconnector_link(self, vconnector, interface_name):
+ mechanism_drivers = self.configuration.environment['mechanism_drivers']
+ is_ovs = mechanism_drivers and mechanism_drivers[0] == 'OVS'
+ if is_ovs:
+ # interface ID for OVS
+ vnic = self.inv.get_by_id(self.get_env(), interface_name)
+ else:
+ # interface ID for VPP - match interface MAC address to vNIC MAC
+ interface = vconnector['interfaces'][interface_name]
+ if not interface or 'mac_address' not in interface:
+ return
+ vnic_mac = interface['mac_address']
+ vnic = self.inv.get_by_field(self.get_env(), 'vnic',
+ 'mac_address', vnic_mac,
+ get_single=True)
+ if not vnic:
+ return
+ host = vnic["host"]
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "vnic-vconnector"
+ link_name = vnic["mac_address"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {}
+ if 'network' in vnic:
+ attributes = {'network': vnic['network']}
+ vconnector['network'] = vnic['network']
+ self.inv.set(vconnector)
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=attributes)
+
+ def add_vconnector_pnic_link(self, vconnector, interface):
+ ifname = interface['name'] if isinstance(interface, dict) else interface
+ if "." in ifname:
+ ifname = ifname[:ifname.index(".")]
+ host = vconnector["host"]
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vconnector["host"],
+ "name": ifname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vconnector-pnic"
+ link_name = pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), host,
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vedges.py b/app/discover/find_links_for_vedges.py
new file mode 100644
index 0000000..1235074
--- /dev/null
+++ b/app/discover/find_links_for_vedges.py
@@ -0,0 +1,124 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVedges(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vnic-vedge, vconnector-vedge, vedge-pnic")
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge"
+ })
+ for vedge in vedges:
+ ports = vedge["ports"]
+ for p in ports.values():
+ self.add_link_for_vedge(vedge, p)
+
+ def add_link_for_vedge(self, vedge, port):
+ vnic = self.inv.get_by_id(self.get_env(),
+ vedge['host'] + '-' + port["name"])
+ if not vnic:
+ self.find_matching_vconnector(vedge, port)
+ self.find_matching_pnic(vedge, port)
+ return
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vnic-vedge"
+ link_name = vnic["name"] + "-" + vedge["name"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vnic["mac_address"]
+ target_label = port["id"]
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label)
+
+ def find_matching_vconnector(self, vedge, port):
+ if self.configuration.has_network_plugin('VPP'):
+ vconnector_interface_name = port['name']
+ else:
+ if not port["name"].startswith("qv"):
+ return
+ base_id = port["name"][3:]
+ vconnector_interface_name = "qvb" + base_id
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": vedge['host'],
+ 'interfaces_names': vconnector_interface_name},
+ get_single=True)
+ if not vconnector:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vconnector-vedge"
+ link_name = "port-" + port["id"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vconnector_interface_name
+ target_label = port["name"]
+ mac_address = "Unknown"
+ attributes = {'mac_address': mac_address}
+ for interface in vconnector['interfaces'].values():
+ if vconnector_interface_name != interface['name']:
+ continue
+ if 'mac_address' not in interface:
+ continue
+ mac_address = interface['mac_address']
+ attributes['mac_address'] = mac_address
+ break
+ if 'network' in vconnector:
+ attributes['network'] = vconnector['network']
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label,
+ attributes)
+
+ def find_matching_pnic(self, vedge, port):
+ pname = port["name"]
+ if "pnic" in vedge:
+ if pname != vedge["pnic"]:
+ return
+ elif self.configuration.has_network_plugin('VPP'):
+ pass
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vedge["host"],
+ "name": pname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vedge-pnic"
+ link_name = "Port-" + port["id"]
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vservice_vnics.py b/app/discover/find_links_for_vservice_vnics.py
new file mode 100644
index 0000000..e8a91c8
--- /dev/null
+++ b/app/discover/find_links_for_vservice_vnics.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVserviceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self, search=None):
+ self.log.info("adding links of type: vservice-vnic")
+
+ if search is None:
+ search = {}
+
+ search.update({"environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"})
+
+ vnics = self.inv.find_items(search)
+
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ host = self.inv.get_by_id(self.get_env(), v["host"])
+ if "Network" not in host["host_type"]:
+ return
+ if "network" not in v:
+ return
+ network = self.inv.get_by_id(self.get_env(), v["network"])
+ if network == []:
+ return
+ vservice_id = v["parent_id"]
+ vservice_id = vservice_id[:vservice_id.rindex('-')]
+ vservice = self.inv.get_by_id(self.get_env(), vservice_id)
+ source = vservice["_id"]
+ source_id = vservice_id
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "vservice-vnic"
+ link_name = network["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), v["host"],
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes={'network': v['network']})
diff --git a/app/discover/manager.py b/app/discover/manager.py
new file mode 100644
index 0000000..e37bb31
--- /dev/null
+++ b/app/discover/manager.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+from utils.logging.file_logger import FileLogger
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class Manager(ABC):
+
+ MIN_INTERVAL = 0.1 # To prevent needlessly frequent scans
+
+ def __init__(self, log_directory: str = None,
+ mongo_config_file: str = None):
+ super().__init__()
+ if log_directory:
+ FileLogger.LOG_DIRECTORY = log_directory
+ MongoAccess.config_file = mongo_config_file
+ self.log = FullLogger()
+ self.conf = None
+ self.inv = None
+ self.collection = None
+ self._update_document = None
+ self.interval = self.MIN_INTERVAL
+
+ @abstractmethod
+ def configure(self):
+ pass
+
+ @abstractmethod
+ def do_action(self):
+ pass
+
+ def run(self):
+ self.configure()
+ self.do_action()
diff --git a/app/discover/monitoring_mgr.py b/app/discover/monitoring_mgr.py
new file mode 100644
index 0000000..f3f737f
--- /dev/null
+++ b/app/discover/monitoring_mgr.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+#moved
diff --git a/app/discover/network_agents_list.py b/app/discover/network_agents_list.py
new file mode 100644
index 0000000..c1c1b36
--- /dev/null
+++ b/app/discover/network_agents_list.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.mongo_access import MongoAccess
+
+
+class NetworkAgentsList(MongoAccess):
+ def __init__(self):
+ super(NetworkAgentsList, self).__init__()
+ self.list = MongoAccess.db["network_agent_types"]
+
+ def get_type(self, type):
+ matches = self.list.find({"type": type})
+ for doc in matches:
+ doc["_id"] = str(doc["_id"])
+ return doc
+ return {}
diff --git a/app/discover/plugins/__init__.py b/app/discover/plugins/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/plugins/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/scan.py b/app/discover/scan.py
new file mode 100755
index 0000000..72184ec
--- /dev/null
+++ b/app/discover/scan.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# Scan an object and insert/update in the inventory
+
+# phase 2: either scan default environment, or scan specific object
+
+import argparse
+import sys
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.scan_error import ScanError
+from discover.scanner import Scanner
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import EnvironmentFeatures
+from utils.mongo_access import MongoAccess
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.ssh_connection import SshConnection
+from utils.util import setup_args
+
+
+class ScanPlan:
+ """
+ @DynamicAttrs
+ """
+
+ # Each tuple of COMMON_ATTRIBUTES consists of:
+ # attr_name, arg_name and def_key
+ #
+ # attr_name - name of class attribute to be set
+ # arg_name - corresponding name of argument (equal to attr_name if not set)
+ # def_key - corresponding key in DEFAULTS (equal to attr_name if not set)
+ COMMON_ATTRIBUTES = (("loglevel",),
+ ("inventory_only",),
+ ("links_only",),
+ ("cliques_only",),
+ ("monitoring_setup_only",),
+ ("clear",),
+ ("clear_all",),
+ ("object_type", "type", "type"),
+ ("env",),
+ ("object_id", "id", "env"),
+ ("parent_id",),
+ ("type_to_scan", "parent_type", "parent_type"),
+ ("id_field",),
+ ("scan_self",),
+ ("child_type", "type", "type"))
+
+ def __init__(self, args=None):
+ self.obj = None
+ self.scanner_type = None
+ self.args = args
+ for attribute in self.COMMON_ATTRIBUTES:
+ setattr(self, attribute[0], None)
+
+ if isinstance(args, dict):
+ self._init_from_dict()
+ else:
+ self._init_from_args()
+ self._validate_args()
+
+ def _validate_args(self):
+ errors = []
+ if (self.inventory_only and self.links_only) \
+ or (self.inventory_only and self.cliques_only) \
+ or (self.links_only and self.cliques_only):
+ errors.append("Only one of (inventory_only, links_only, "
+ "cliques_only) can be True.")
+ if errors:
+ raise ScanArgumentsError("\n".join(errors))
+
+ def _set_arg_from_dict(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self, attribute_name,
+ self.args.get(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _set_arg_from_cmd(self, attribute_name, arg_name=None):
+ setattr(self,
+ attribute_name,
+ getattr(self.args, arg_name if arg_name else attribute_name))
+
+ def _set_arg_from_form(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self,
+ attribute_name,
+ self.args.getvalue(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _init_from_dict(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_dict(*arg)
+ self.child_id = None
+
+ def _init_from_args(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_cmd(*arg[:2])
+ self.child_id = None
+
+
+class ScanController(Fetcher):
+ DEFAULTS = {
+ "env": "",
+ "mongo_config": "",
+ "type": "",
+ "inventory": "inventory",
+ "scan_self": False,
+ "parent_id": "",
+ "parent_type": "",
+ "id_field": "id",
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "monitoring_setup_only": False,
+ "clear": False,
+ "clear_all": False
+ }
+
+ def __init__(self):
+ super().__init__()
+ self.conf = None
+ self.inv = None
+
+ def get_args(self):
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=self.DEFAULTS["mongo_config"],
+ help="name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="name of environment to scan \n"
+ "(default: " + self.DEFAULTS["env"] + ")")
+ parser.add_argument("-t", "--type", nargs="?", type=str,
+ default=self.DEFAULTS["type"],
+ help="type of object to scan \n"
+ "(default: environment)")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=self.DEFAULTS["inventory"],
+ help="name of inventory collection \n"
+ "(default: 'inventory')")
+ parser.add_argument("-s", "--scan_self", action="store_true",
+ help="scan changes to a specific object \n"
+ "(default: False)")
+ parser.add_argument("-i", "--id", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="ID of object to scan (when scan_self=true)")
+ parser.add_argument("-p", "--parent_id", nargs="?", type=str,
+ default=self.DEFAULTS["parent_id"],
+ help="ID of parent object (when scan_self=true)")
+ parser.add_argument("-a", "--parent_type", nargs="?", type=str,
+ default=self.DEFAULTS["parent_type"],
+ help="type of parent object (when scan_self=true)")
+ parser.add_argument("-f", "--id_field", nargs="?", type=str,
+ default=self.DEFAULTS["id_field"],
+ help="name of ID field (when scan_self=true) \n"
+ "(default: 'id', use 'name' for projects)")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=self.DEFAULTS["loglevel"],
+ help="logging level \n(default: '{}')"
+ .format(self.DEFAULTS["loglevel"]))
+ parser.add_argument("--clear", action="store_true",
+ help="clear all data related to "
+ "the specified environment prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--clear_all", action="store_true",
+ help="clear all data prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--monitoring_setup_only", action="store_true",
+ help="do only monitoring setup deployment \n"
+ "(default: False)")
+
+ # At most one of these arguments may be present
+ scan_only_group = parser.add_mutually_exclusive_group()
+ scan_only_group.add_argument("--inventory_only", action="store_true",
+ help="do only scan to inventory\n" +
+ "(default: False)")
+ scan_only_group.add_argument("--links_only", action="store_true",
+ help="do only links creation \n" +
+ "(default: False)")
+ scan_only_group.add_argument("--cliques_only", action="store_true",
+ help="do only cliques creation \n" +
+ "(default: False)")
+
+ return parser.parse_args()
+
+ def get_scan_plan(self, args):
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ return self.prepare_scan_plan(ScanPlan(args))
+
+ def prepare_scan_plan(self, plan):
+ # Find out object type if not specified in arguments
+ if not plan.object_type:
+ if not plan.object_id:
+ plan.object_type = "environment"
+ else:
+ # If we scan a specific object, it has to exist in db
+ scanned_object = self.inv.get_by_id(plan.env, plan.object_id)
+ if not scanned_object:
+ exc_msg = "No object found with specified id: '{}'" \
+ .format(plan.object_id)
+ raise ScanArgumentsError(exc_msg)
+ plan.object_type = scanned_object["type"]
+ plan.parent_id = scanned_object["parent_id"]
+ plan.type_to_scan = scanned_object["parent_type"]
+
+ class_module = plan.object_type
+ if not plan.scan_self:
+ plan.scan_self = plan.object_type != "environment"
+
+ plan.object_type = plan.object_type.title().replace("_", "")
+
+ if not plan.scan_self:
+ plan.child_type = None
+ else:
+ plan.child_id = plan.object_id
+ plan.object_id = plan.parent_id
+ if plan.type_to_scan.endswith("_folder"):
+ class_module = plan.child_type + "s_root"
+ else:
+ class_module = plan.type_to_scan
+ plan.object_type = class_module.title().replace("_", "")
+
+ if class_module == "environment":
+ plan.obj = {"id": plan.env}
+ else:
+ # fetch object from inventory
+ obj = self.inv.get_by_id(plan.env, plan.object_id)
+ if not obj:
+ raise ValueError("No match for object ID: {}"
+ .format(plan.object_id))
+ plan.obj = obj
+
+ plan.scanner_type = "Scan" + plan.object_type
+ return plan
+
+ def run(self, args: dict = None):
+ args = setup_args(args, self.DEFAULTS, self.get_args)
+ # After this setup we assume args dictionary has all keys
+ # defined in self.DEFAULTS
+
+ try:
+ MongoAccess.set_config_file(args['mongo_config'])
+ self.inv = InventoryMgr()
+ self.inv.set_collections(args['inventory'])
+ self.conf = Configuration()
+ except FileNotFoundError as e:
+ return False, 'Mongo configuration file not found: {}'\
+ .format(str(e))
+
+ scan_plan = self.get_scan_plan(args)
+ if scan_plan.clear or scan_plan.clear_all:
+ self.inv.clear(scan_plan)
+ self.conf.log.set_loglevel(scan_plan.loglevel)
+
+ env_name = scan_plan.env
+ self.conf.use_env(env_name)
+
+ # generate ScanObject Class and instance.
+ scanner = Scanner()
+ scanner.set_env(env_name)
+
+ # decide what scanning operations to do
+ inventory_only = scan_plan.inventory_only
+ links_only = scan_plan.links_only
+ cliques_only = scan_plan.cliques_only
+ monitoring_setup_only = scan_plan.monitoring_setup_only
+ run_all = False if inventory_only or links_only or cliques_only \
+ or monitoring_setup_only else True
+
+ # setup monitoring server
+ monitoring = \
+ self.inv.is_feature_supported(env_name,
+ EnvironmentFeatures.MONITORING)
+ if monitoring:
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ # do the actual scanning
+ try:
+ if inventory_only or run_all:
+ scanner.run_scan(
+ scan_plan.scanner_type,
+ scan_plan.obj,
+ scan_plan.id_field,
+ scan_plan.child_id,
+ scan_plan.child_type)
+ if links_only or run_all:
+ scanner.scan_links()
+ if cliques_only or run_all:
+ scanner.scan_cliques()
+ if monitoring:
+ if monitoring_setup_only:
+ self.inv.monitoring_setup_manager.simulate_track_changes()
+ if not (inventory_only or links_only or cliques_only):
+ scanner.deploy_monitoring_setup()
+ except ScanError as e:
+ return False, "scan error: " + str(e)
+ SshConnection.disconnect_all()
+ return True, 'ok'
+
+
+if __name__ == '__main__':
+ scan_manager = ScanController()
+ ret, msg = scan_manager.run()
+ if not ret:
+ scan_manager.log.error(msg)
+ sys.exit(0 if ret else 1)
diff --git a/app/discover/scan_error.py b/app/discover/scan_error.py
new file mode 100644
index 0000000..2e04275
--- /dev/null
+++ b/app/discover/scan_error.py
@@ -0,0 +1,11 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class ScanError(Exception):
+ pass
diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py
new file mode 100644
index 0000000..b6ad782
--- /dev/null
+++ b/app/discover/scan_manager.py
@@ -0,0 +1,294 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import datetime
+
+import time
+
+import pymongo
+from functools import partial
+
+from discover.manager import Manager
+from utils.constants import ScanStatus, EnvironmentFeatures
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+from discover.scan import ScanController
+
+
+class ScanManager(Manager):
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "scans": "scans",
+ "scheduled_scans": "scheduled_scans",
+ "environments": "environments_config",
+ "interval": 1,
+ "loglevel": "INFO"
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.environments_collection = None
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["mongo_config"],
+ help="Name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-c", "--scans_collection", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-s", "--scheduled_scans_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["scheduled_scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-e", "--environments_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["environments"],
+ help="Environments collection to update "
+ "after scans")
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=ScanManager.DEFAULTS["interval"],
+ help="Interval between collection polls"
+ "(must be more than {} seconds)"
+ .format(ScanManager.MIN_INTERVAL))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(ScanManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections()
+ self.scans_collection = self.db_client.db[self.args.scans_collection]
+ self.scheduled_scans_collection = \
+ self.db_client.db[self.args.scheduled_scans_collection]
+ self.environments_collection = \
+ self.db_client.db[self.args.environments_collection]
+ self._update_document = \
+ partial(MongoAccess.update_document, self.scans_collection)
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started ScanManager with following configuration:\n"
+ "Mongo config file path: {0.args.mongo_config}\n"
+ "Scans collection: {0.scans_collection.name}\n"
+ "Environments collection: "
+ "{0.environments_collection.name}\n"
+ "Polling interval: {0.interval} second(s)"
+ .format(self))
+
+ def _build_scan_args(self, scan_request: dict):
+ args = {
+ 'mongo_config': self.args.mongo_config
+ }
+
+ def set_arg(name_from: str, name_to: str = None):
+ if name_to is None:
+ name_to = name_from
+ val = scan_request.get(name_from)
+ if val:
+ args[name_to] = val
+
+ set_arg("object_id", "id")
+ set_arg("log_level", "loglevel")
+ set_arg("environment", "env")
+ set_arg("scan_only_inventory", "inventory_only")
+ set_arg("scan_only_links", "links_only")
+ set_arg("scan_only_cliques", "cliques_only")
+ set_arg("inventory")
+ set_arg("clear")
+ set_arg("clear_all")
+
+ return args
+
+ def _finalize_scan(self, scan_request: dict, status: ScanStatus,
+ scanned: bool):
+ scan_request['status'] = status.value
+ self._update_document(scan_request)
+ # If no object id is present, it's a full env scan.
+ # We need to update environments collection
+ # to reflect the scan results.
+ if not scan_request.get('id'):
+ self.environments_collection\
+ .update_one(filter={'name': scan_request.get('environment')},
+ update={'$set': {'scanned': scanned}})
+
+ def _fail_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.FAILED, False)
+
+ def _complete_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.COMPLETED, True)
+
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ def _clean_up(self):
+ # Find and fail all running scans
+ running_scans = list(self
+ .scans_collection
+ .find(filter={'status': ScanStatus.RUNNING.value}))
+ self.scans_collection \
+ .update_many(filter={'_id': {'$in': [scan['_id']
+ for scan
+ in running_scans]}},
+ update={'$set': {'status': ScanStatus.FAILED.value}})
+
+ # Find all environments connected to failed full env scans
+ env_scans = [scan['environment']
+ for scan in running_scans
+ if not scan.get('object_id')
+ and scan.get('environment')]
+
+ # Set 'scanned' flag in those envs to false
+ if env_scans:
+ self.environments_collection\
+ .update_many(filter={'name': {'$in': env_scans}},
+ update={'$set': {'scanned': False}})
+
+ INTERVALS = {
+ 'YEARLY': datetime.timedelta(days=365.25),
+ 'MONTHLY': datetime.timedelta(days=365.25/12),
+ 'WEEKLY': datetime.timedelta(weeks=1),
+ 'DAILY': datetime.timedelta(days=1),
+ 'HOURLY': datetime.timedelta(hours=1)
+ }
+
+ def _submit_scan_request_for_schedule(self, scheduled_scan, interval, ts):
+ scans = self.scans_collection
+ new_scan = {
+ 'status': 'submitted',
+ 'log_level': scheduled_scan['log_level'],
+ 'clear': scheduled_scan['clear'],
+ 'scan_only_inventory': scheduled_scan['scan_only_inventory'],
+ 'scan_only_links': scheduled_scan['scan_only_links'],
+ 'scan_only_cliques': scheduled_scan['scan_only_cliques'],
+ 'submit_timestamp': ts,
+ 'environment': scheduled_scan['environment'],
+ 'inventory': 'inventory'
+ }
+ scans.insert_one(new_scan)
+
+ def _set_scheduled_requests_next_run(self, scheduled_scan, interval, ts):
+ scheduled_scan['scheduled_timestamp'] = ts + self.INTERVALS[interval]
+ doc_id = scheduled_scan.pop('_id')
+ self.scheduled_scans_collection.update({'_id': doc_id}, scheduled_scan)
+
+ def _prepare_scheduled_requests_for_interval(self, interval):
+ now = datetime.datetime.utcnow()
+
+ # first, submit a scan request where the scheduled time has come
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$lte': now}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition) \
+ .sort('scheduled_timestamp', pymongo.ASCENDING)
+ for match in matches:
+ self._submit_scan_request_for_schedule(match, interval, now)
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ # now set scheduled time where it was not set yet (new scheduled scans)
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$exists': False}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition)
+ for match in matches:
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ def _prepare_scheduled_requests(self):
+ # see if any scheduled request is waiting to be submitted
+ for interval in self.INTERVALS.keys():
+ self._prepare_scheduled_requests_for_interval(interval)
+
+ def do_action(self):
+ self._clean_up()
+ try:
+ while True:
+ self._prepare_scheduled_requests()
+
+ # Find a pending request that is waiting the longest time
+ results = self.scans_collection \
+ .find({'status': ScanStatus.PENDING.value,
+ 'submit_timestamp': {'$ne': None}}) \
+ .sort("submit_timestamp", pymongo.ASCENDING) \
+ .limit(1)
+
+ # If no scans are pending, sleep for some time
+ if results.count() == 0:
+ time.sleep(self.interval)
+ else:
+ scan_request = results[0]
+ if not self.inv.is_feature_supported(scan_request.get('environment'),
+ EnvironmentFeatures.SCANNING):
+ self.log.error("Scanning is not supported for env '{}'"
+ .format(scan_request.get('environment')))
+ self._fail_scan(scan_request)
+ continue
+
+ scan_request['start_timestamp'] = datetime.datetime.utcnow()
+ scan_request['status'] = ScanStatus.RUNNING.value
+ self._update_document(scan_request)
+
+ # Prepare scan arguments and run the scan with them
+ try:
+ scan_args = self._build_scan_args(scan_request)
+
+ self.log.info("Starting scan for '{}' environment"
+ .format(scan_args.get('env')))
+ self.log.debug("Scan arguments: {}".format(scan_args))
+ result, message = ScanController().run(scan_args)
+ except ScanArgumentsError as e:
+ self.log.error("Scan request '{id}' "
+ "has invalid arguments. "
+ "Errors:\n{errors}"
+ .format(id=scan_request['_id'],
+ errors=e))
+ self._fail_scan(scan_request)
+ except Exception as e:
+ self.log.exception(e)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ else:
+ # Check is scan returned success
+ if not result:
+ self.log.error(message)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ continue
+
+ # update the status and timestamps.
+ self.log.info("Request '{}' has been scanned."
+ .format(scan_request['_id']))
+ end_time = datetime.datetime.utcnow()
+ scan_request['end_timestamp'] = end_time
+ self._complete_scan(scan_request)
+ finally:
+ self._clean_up()
+
+
+if __name__ == "__main__":
+ ScanManager().run()
diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py
new file mode 100644
index 0000000..df27e18
--- /dev/null
+++ b/app/discover/scan_metadata_parser.py
@@ -0,0 +1,202 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.folder_fetcher import FolderFetcher
+from utils.metadata_parser import MetadataParser
+from utils.mongo_access import MongoAccess
+from utils.util import ClassResolver
+
+
+class ScanMetadataParser(MetadataParser):
+
+ SCANNERS_PACKAGE = 'scanners_package'
+ SCANNERS_FILE = 'scanners.json'
+ SCANNERS = 'scanners'
+
+ TYPE = 'type'
+ FETCHER = 'fetcher'
+ CHILDREN_SCANNER = 'children_scanner'
+ ENVIRONMENT_CONDITION = 'environment_condition'
+ OBJECT_ID_TO_USE_IN_CHILD = 'object_id_to_use_in_child'
+
+ COMMENT = '_comment'
+
+ REQUIRED_SCANNER_ATTRIBUTES = [TYPE, FETCHER]
+ ALLOWED_SCANNER_ATTRIBUTES = [TYPE, FETCHER, CHILDREN_SCANNER,
+ ENVIRONMENT_CONDITION,
+ OBJECT_ID_TO_USE_IN_CHILD]
+
+ MECHANISM_DRIVER = 'mechanism_driver'
+
+ def __init__(self, inventory_mgr):
+ super().__init__()
+ self.inv = inventory_mgr
+ self.constants = {}
+
+ def get_required_fields(self):
+ return [self.SCANNERS_PACKAGE, self.SCANNERS]
+
+ def validate_fetcher(self, scanner_name: str, scan_type: dict,
+ type_index: int, package: str):
+ fetcher = scan_type.get(self.FETCHER, '')
+ if not fetcher:
+ self.add_error('missing or empty fetcher in scanner {} type #{}'
+ .format(scanner_name, str(type_index)))
+ elif isinstance(fetcher, str):
+ try:
+ module_name = ClassResolver.get_module_file_by_class_name(fetcher)
+ fetcher_package = module_name.split("_")[0]
+ if package:
+ fetcher_package = ".".join((package, fetcher_package))
+ instance = ClassResolver.get_instance_of_class(package_name=fetcher_package,
+ module_name=module_name,
+ class_name=fetcher)
+ except ValueError:
+ instance = None
+ if not instance:
+ self.add_error('failed to find fetcher class {} in scanner {}'
+ ' type #{}'
+ .format(fetcher, scanner_name, type_index))
+ scan_type[self.FETCHER] = instance
+ elif isinstance(fetcher, dict):
+ is_folder = fetcher.get('folder', False)
+ if not is_folder:
+ self.add_error('scanner {} type #{}: '
+ 'only folder dict accepted in fetcher'
+ .format(scanner_name, type_index))
+ else:
+ instance = FolderFetcher(fetcher['types_name'],
+ fetcher['parent_type'],
+ fetcher.get('text', ''))
+ scan_type[self.FETCHER] = instance
+ else:
+ self.add_error('incorrect type of fetcher for scanner {} type #{}'
+ .format(scanner_name, type_index))
+
+ def validate_children_scanner(self, scanner_name: str, type_index: int,
+ scanners: dict, scan_type: dict):
+ scanner = scanners[scanner_name]
+ if 'children_scanner' in scan_type:
+ children_scanner = scan_type.get('children_scanner')
+ if not isinstance(children_scanner, str):
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner must be a string'
+ .format(scanner_name, type_index))
+ elif children_scanner not in scanners:
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner {} not found '
+ .format(scanner_name, type_index,
+ children_scanner))
+
+ def validate_environment_condition(self, scanner_name: str, type_index: int,
+ scanner: dict):
+ if self.ENVIRONMENT_CONDITION not in scanner:
+ return
+ condition = scanner[self.ENVIRONMENT_CONDITION]
+ if not isinstance(condition, dict):
+ self.add_error('scanner {} type #{}: condition must be dict'
+ .format(scanner_name, str(type_index)))
+ return
+ if self.MECHANISM_DRIVER in condition.keys():
+ drivers = condition[self.MECHANISM_DRIVER]
+ if not isinstance(drivers, list):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ if not all((isinstance(driver, str) for driver in drivers)):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ else:
+ for driver in drivers:
+ self.validate_constant(scanner_name,
+ driver,
+ 'mechanism_drivers',
+ 'mechanism drivers')
+
+ def validate_scanner(self, scanners: dict, name: str, package: str):
+ scanner = scanners.get(name)
+ if not scanner:
+ self.add_error('failed to find scanner: {}')
+ return
+
+ # make sure only allowed attributes are supplied
+ for i in range(0, len(scanner)):
+ scan_type = scanner[i]
+ self.validate_scan_type(scanners, name, i+1, scan_type, package)
+
+ def validate_scan_type(self, scanners: dict, scanner_name: str,
+ type_index: int, scan_type: dict, package: str):
+ # keep previous error count to know if errors were detected here
+ error_count = len(self.errors)
+ # ignore comments
+ scan_type.pop(self.COMMENT, '')
+ for attribute in scan_type.keys():
+ if attribute not in self.ALLOWED_SCANNER_ATTRIBUTES:
+ self.add_error('unknown attribute {} '
+ 'in scanner {}, type #{}'
+ .format(attribute, scanner_name,
+ str(type_index)))
+
+ # make sure required attributes are supplied
+ for attribute in ScanMetadataParser.REQUIRED_SCANNER_ATTRIBUTES:
+ if attribute not in scan_type:
+ self.add_error('scanner {}, type #{}: '
+ 'missing attribute "{}"'
+ .format(scanner_name, str(type_index),
+ attribute))
+ # the following checks depend on previous checks,
+ # so return if previous checks found errors
+ if len(self.errors) > error_count:
+ return
+
+ # type must be valid object type
+ self.validate_constant(scanner_name, scan_type[self.TYPE],
+ 'scan_object_types', 'types')
+ self.validate_fetcher(scanner_name, scan_type, type_index, package)
+ self.validate_children_scanner(scanner_name, type_index, scanners,
+ scan_type)
+ self.validate_environment_condition(scanner_name, type_index,
+ scan_type)
+
+ def get_constants(self, scanner_name, items_desc, constant_type):
+ if not self.constants.get(constant_type):
+ constants = MongoAccess.db['constants']
+ values_list = constants.find_one({'name': constant_type})
+ if not values_list:
+ raise ValueError('scanner {}: '
+ 'could not find {} list in DB'
+ .format(scanner_name, items_desc))
+ self.constants[constant_type] = values_list
+ return self.constants[constant_type]
+
+ def validate_constant(self,
+ scanner_name: str,
+ value_to_check: str,
+ constant_type: str,
+ items_desc: str = None):
+ values_list = self.get_constants(scanner_name, items_desc,
+ constant_type)
+ values = [t['value'] for t in values_list['data']]
+ if value_to_check not in values:
+ self.add_error('scanner {}: value not in {}: {}'
+ .format(scanner_name, items_desc, value_to_check))
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+ scanners = metadata.get(self.SCANNERS, {})
+ package = metadata.get(self.SCANNERS_PACKAGE)
+ if not scanners:
+ self.add_error('no scanners found in scanners list')
+ else:
+ for name in scanners.keys():
+ self.validate_scanner(scanners, name, package)
+ return len(self.errors) == 0
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
new file mode 100644
index 0000000..1b7cd51
--- /dev/null
+++ b/app/discover/scanner.py
@@ -0,0 +1,253 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# base class for scanners
+
+import json
+import queue
+import os
+import traceback
+
+from discover.clique_finder import CliqueFinder
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_oteps import FindLinksForOteps
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vconnectors import FindLinksForVconnectors
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scan_error import ScanError
+from discover.scan_metadata_parser import ScanMetadataParser
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.util import ClassResolver
+
+
+class Scanner(Fetcher):
+ config = None
+ environment = None
+ env = None
+ root_patern = None
+ scan_queue = queue.Queue()
+ scan_queue_track = {}
+
+ def __init__(self):
+ """
+ Scanner is the base class for scanners.
+ """
+ super().__init__()
+ self.config = Configuration()
+ self.inv = InventoryMgr()
+ self.scanners_package = None
+ self.scanners = {}
+ self.load_metadata()
+
+ def scan(self, scanner_type, obj, id_field="id",
+ limit_to_child_id=None, limit_to_child_type=None):
+ types_to_fetch = self.get_scanner(scanner_type)
+ types_children = []
+ if not limit_to_child_type:
+ limit_to_child_type = []
+ elif isinstance(limit_to_child_type, str):
+ limit_to_child_type = [limit_to_child_type]
+ try:
+ for t in types_to_fetch:
+ if limit_to_child_type and t["type"] not in limit_to_child_type:
+ continue
+ children = self.scan_type(t, obj, id_field)
+ if limit_to_child_id:
+ children = [c for c in children
+ if c[id_field] == limit_to_child_id]
+ if not children:
+ continue
+ types_children.append({"type": t["type"],
+ "children": children})
+ except ValueError:
+ return False
+ if limit_to_child_id and len(types_children) > 0:
+ t = types_children[0]
+ children = t["children"]
+ return children[0]
+ return obj
+
+ def check_type_env(self, type_to_fetch):
+ # check if type is to be run in this environment
+ if "environment_condition" not in type_to_fetch:
+ return True
+ env_cond = type_to_fetch.get("environment_condition", {})
+ if not env_cond:
+ return True
+ if not isinstance(env_cond, dict):
+ self.log.warn('illegal environment_condition given '
+ 'for type {}'.format(type_to_fetch['type']))
+ return True
+ conf = self.config.get_env_config()
+ for attr, required_val in env_cond.items():
+ if attr == "mechanism_drivers":
+ if "mechanism_drivers" not in conf:
+ self.log.warn('illegal environment configuration: '
+ 'missing mechanism_drivers')
+ return False
+ if not isinstance(required_val, list):
+ required_val = [required_val]
+ return bool(set(required_val) & set(conf["mechanism_drivers"]))
+ elif attr not in conf or conf[attr] != required_val:
+ return False
+ # no check failed
+ return True
+
+ def scan_type(self, type_to_fetch, parent, id_field):
+ # check if type is to be run in this environment
+ if not self.check_type_env(type_to_fetch):
+ return []
+
+ if not parent:
+ obj_id = None
+ else:
+ obj_id = str(parent[id_field])
+ if not obj_id or not obj_id.rstrip():
+ raise ValueError("Object missing " + id_field + " attribute")
+
+ # get Fetcher instance
+ fetcher = type_to_fetch["fetcher"]
+ fetcher.set_env(self.get_env())
+
+ # get children_scanner instance
+ children_scanner = type_to_fetch.get("children_scanner")
+
+ escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
+ self.log.info(
+ "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
+ type_to_fetch["type"],
+ parent.get('type', 'environment'),
+ parent.get('name', ''),
+ escaped_id)
+
+ # fetch OpenStack data from environment by CLI, API or MySQL
+ # or physical devices data from ACI API
+ # It depends on the Fetcher's config.
+ try:
+ db_results = fetcher.get(escaped_id)
+ except Exception as e:
+ self.log.error("Error while scanning : " +
+ "fetcher=%s, " +
+ "type=%s, " +
+ "parent: (type=%s, name=%s, id=%s), " +
+ "error: %s",
+ fetcher.__class__.__name__,
+ type_to_fetch["type"],
+ "environment" if "type" not in parent
+ else parent["type"],
+ "" if "name" not in parent else parent["name"],
+ escaped_id,
+ e)
+ traceback.print_exc()
+ raise ScanError(str(e))
+
+ # format results
+ if isinstance(db_results, dict):
+ results = db_results["rows"] if db_results["rows"] else [db_results]
+ elif isinstance(db_results, str):
+ results = json.loads(db_results)
+ else:
+ results = db_results
+
+ # get child_id_field
+ try:
+ child_id_field = type_to_fetch["object_id_to_use_in_child"]
+ except KeyError:
+ child_id_field = "id"
+
+ environment = self.get_env()
+ children = []
+
+ for o in results:
+ saved = self.inv.save_inventory_object(o,
+ parent=parent,
+ environment=environment,
+ type_to_fetch=type_to_fetch)
+
+ if saved:
+ # add objects into children list.
+ children.append(o)
+
+ # put children scanner into queue
+ if children_scanner:
+ self.queue_for_scan(o, child_id_field, children_scanner)
+ return children
+
+ # scanning queued items, rather than going depth-first (DFS)
+ # this is done to allow collecting all required data for objects
+ # before continuing to next level
+ # for example, get host ID from API os-hypervisors call, so later
+ # we can use this ID in the "os-hypervisors/<ID>/servers" call
+ @staticmethod
+ def queue_for_scan(o, child_id_field, children_scanner):
+ if o["id"] in Scanner.scan_queue_track:
+ return
+ Scanner.scan_queue_track[o["type"] + ";" + o["id"]] = 1
+ Scanner.scan_queue.put({"object": o,
+ "child_id_field": child_id_field,
+ "scanner": children_scanner})
+
+ def run_scan(self, scanner_type, obj, id_field, child_id, child_type):
+ results = self.scan(scanner_type, obj, id_field, child_id, child_type)
+
+ # run children scanner from queue.
+ self.scan_from_queue()
+ return results
+
+ def scan_from_queue(self):
+ while not Scanner.scan_queue.empty():
+ item = Scanner.scan_queue.get()
+ scanner_type = item["scanner"]
+
+ # scan the queued item
+ self.scan(scanner_type, item["object"], item["child_id_field"])
+ self.log.info("Scan complete")
+
+ def scan_links(self):
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [
+ FindLinksForPnics(),
+ FindLinksForInstanceVnics(),
+ FindLinksForVserviceVnics(),
+ FindLinksForVconnectors(),
+ FindLinksForVedges(),
+ FindLinksForOteps()
+ ]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.set_env(self.get_env())
+ fetcher.add_links()
+
+ def scan_cliques(self):
+ clique_scanner = CliqueFinder()
+ clique_scanner.set_env(self.get_env())
+ clique_scanner.find_cliques()
+
+ def deploy_monitoring_setup(self):
+ self.inv.monitoring_setup_manager.handle_pending_setup_changes()
+
+ def load_metadata(self):
+ parser = ScanMetadataParser(self.inv)
+ conf = self.config.get_env_config()
+ scanners_file = os.path.join(conf.get('app_path', '/etc/calipso'),
+ 'config',
+ ScanMetadataParser.SCANNERS_FILE)
+
+ metadata = parser.parse_metadata_file(scanners_file)
+ self.scanners_package = metadata[ScanMetadataParser.SCANNERS_PACKAGE]
+ self.scanners = metadata[ScanMetadataParser.SCANNERS]
+
+ def get_scanner_package(self):
+ return self.scanners_package
+
+ def get_scanner(self, scanner_type: str) -> dict:
+ return self.scanners.get(scanner_type)