aboutsummaryrefslogtreecommitdiffstats
path: root/app/discover
diff options
context:
space:
mode:
Diffstat (limited to 'app/discover')
-rw-r--r--app/discover/clique_finder.py192
-rw-r--r--app/discover/configuration.py4
-rw-r--r--app/discover/event_manager.py4
-rw-r--r--app/discover/events/event_base.py3
-rw-r--r--app/discover/events/event_instance_add.py2
-rw-r--r--app/discover/events/event_interface_add.py6
-rw-r--r--app/discover/events/event_port_add.py8
-rw-r--r--app/discover/events/event_port_delete.py2
-rw-r--r--app/discover/events/event_router_add.py4
-rw-r--r--app/discover/events/event_router_update.py4
-rw-r--r--app/discover/events/event_subnet_add.py6
-rw-r--r--app/discover/events/event_subnet_update.py6
-rw-r--r--app/discover/fetcher.py51
-rw-r--r--app/discover/fetchers/api/api_access.py61
-rw-r--r--app/discover/fetchers/api/api_fetch_availability_zones.py11
-rw-r--r--app/discover/fetchers/api/api_fetch_host_instances.py2
-rw-r--r--app/discover/fetchers/api/api_fetch_network.py18
-rw-r--r--app/discover/fetchers/api/api_fetch_networks.py15
-rw-r--r--app/discover/fetchers/api/api_fetch_port.py8
-rw-r--r--app/discover/fetchers/api/api_fetch_ports.py8
-rw-r--r--app/discover/fetchers/api/api_fetch_project_hosts.py44
-rw-r--r--app/discover/fetchers/api/api_fetch_regions.py2
-rw-r--r--app/discover/fetchers/cli/cli_access.py15
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py7
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py54
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py10
-rw-r--r--app/discover/fetchers/db/db_access.py29
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py2
-rw-r--r--app/discover/fetchers/kube/__init__.py9
-rw-r--r--app/discover/fetchers/kube/kube_access.py28
-rw-r--r--app/discover/fetchers/kube/kube_fetch_namespaces.py32
-rw-r--r--app/discover/link_finders/find_implicit_links.py128
-rw-r--r--app/discover/link_finders/find_links.py3
-rw-r--r--app/discover/link_finders/find_links_for_instance_vnics.py2
-rw-r--r--app/discover/link_finders/find_links_for_vedges.py2
-rwxr-xr-xapp/discover/scan.py11
-rw-r--r--app/discover/scan_manager.py129
-rw-r--r--app/discover/scan_metadata_parser.py28
-rw-r--r--app/discover/scanner.py88
39 files changed, 701 insertions, 337 deletions
diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py
index 57b2e3b..4e04e7e 100644
--- a/app/discover/clique_finder.py
+++ b/app/discover/clique_finder.py
@@ -42,67 +42,90 @@ class CliqueFinder(Fetcher):
return self.links.find({'target': db_id})
def find_cliques(self):
- self.log.info("scanning for cliques")
+ self.log.info("Scanning for cliques")
clique_types = self.get_clique_types().values()
for clique_type in clique_types:
self.find_cliques_for_type(clique_type)
- self.log.info("finished scanning for cliques")
+ self.log.info("Finished scanning for cliques")
- # Calculate priority score
- def _get_priority_score(self, clique_type):
- if self.env == clique_type['environment']:
- return 4
- if (self.env_config['distribution'] == clique_type.get('distribution') and
- self.env_config['distribution_version'] == clique_type.get('distribution_version')):
- return 3
- if clique_type.get('mechanism_drivers') in self.env_config['mechanism_drivers']:
- return 2
- if self.env_config['type_drivers'] == clique_type.get('type_drivers'):
- return 1
- else:
+ # Calculate priority score for clique type per environment and configuration
+ def get_priority_score(self, clique_type):
+ # environment-specific clique type takes precedence
+ env = clique_type.get('environment')
+ config = self.env_config
+ # ECT - Clique Type with Environment name
+ if env:
+ if self.env == env:
+ return 2**6
+ if env == 'ANY':
+ # environment=ANY serves as fallback option
+ return 2**0
return 0
+ # NECT - Clique Type without Environment name
+ else:
+ env_type = clique_type.get('environment_type')
+ # TODO: remove backward compatibility ('if not env_type' check)
+ if env_type and env_type != config.get('environment_type'):
+ return 0
- # Get clique type with max priority
- # for given environment configuration and focal point type
- def _get_clique_type(self, focal_point, clique_types):
- # If there's no configuration match for the specified environment,
- # we use the default clique type definition with environment='ANY'
- fallback_type = next(
- filter(lambda t: t['environment'] == 'ANY', clique_types),
- None
- )
- if not fallback_type:
- raise ValueError("No fallback clique type (ANY) "
- "defined for focal point type '{}'"
- .format(focal_point))
+ score = 0
- clique_types.remove(fallback_type)
+ distribution = clique_type.get('distribution')
+ if distribution:
+ if config['distribution'] != distribution:
+ return 0
- priority_scores = [self._get_priority_score(clique_type)
- for clique_type
- in clique_types]
- max_score = max(priority_scores) if priority_scores else 0
+ score += 2**5
- return (fallback_type
- if max_score == 0
- else clique_types[priority_scores.index(max_score)])
+ dv = clique_type.get('distribution_version')
+ if dv:
+ if dv != config['distribution_version']:
+ return 0
+ score += 2**4
- def get_clique_types(self):
- if not self.clique_types_by_type:
- clique_types_by_focal_point = self.clique_types.aggregate([{
- "$group": {
- "_id": "$focal_point_type",
- "types": {"$push": "$$ROOT"}
- }
- }])
+ mechanism_drivers = clique_type.get('mechanism_drivers')
+ if mechanism_drivers:
+ if mechanism_drivers not in config['mechanism_drivers']:
+ return 0
+ score += 2**3
- self.clique_types_by_type = {
- cliques['_id']: self._get_clique_type(cliques['_id'],
- cliques['types'])
- for cliques in
- clique_types_by_focal_point
- }
+ type_drivers = clique_type.get('type_drivers')
+ if type_drivers:
+ if type_drivers != config['type_drivers']:
+ return 0
+ score += 2**2
+
+ # If no configuration is specified, this clique type
+ # is a fallback for its environment type
+ return max(score, 2**1)
+
+ # Get clique type with max priority
+ # for given focal point type
+ def _get_clique_type(self, clique_types):
+ scored_clique_types = [{'score': self.get_priority_score(clique_type),
+ 'clique_type': clique_type}
+ for clique_type in clique_types]
+ max_score = max(scored_clique_types, key=lambda t: t['score'])
+ if max_score['score'] == 0:
+ self.log.warn('No matching clique types '
+ 'for focal point type: {fp_type}'
+ .format(fp_type=clique_types[0].get('focal_point_type')))
+ return None
+ return max_score.get('clique_type')
+ def get_clique_types(self):
+ if not self.clique_types_by_type:
+ clique_types_candidates = {}
+ for clique in self.clique_types.find({}):
+ fp_type = clique.get('focal_point_type', '')
+ if not clique_types_candidates.get(fp_type):
+ clique_types_candidates[fp_type] = []
+ clique_types_candidates[fp_type].append(clique)
+ for t in clique_types_candidates.keys():
+ selected = self._get_clique_type(clique_types_candidates[t])
+ if not selected:
+ continue
+ self.clique_types_by_type[t] = selected
return self.clique_types_by_type
def find_cliques_for_type(self, clique_type):
@@ -125,11 +148,14 @@ class CliqueFinder(Fetcher):
.find_one({"focal_point_type": o['type']})
constraints = [] if not constraint else constraint["constraints"]
clique_types = self.get_clique_types()
- clique_type = clique_types[o['type']]
- new_clique = self.construct_clique_for_focal_point(o, clique_type,
- constraints)
- if not new_clique:
+ clique_type = clique_types.get(o['type'])
+ if not clique_type:
self.cliques.delete({'_id': clique['_id']})
+ else:
+ new_clique = self.construct_clique_for_focal_point(o, clique_type,
+ constraints)
+ if not new_clique:
+ self.cliques.delete({'_id': clique['_id']})
def construct_clique_for_focal_point(self, o, clique_type, constraints):
# keep a hash of nodes in clique that were visited for each type
@@ -146,12 +172,16 @@ class CliqueFinder(Fetcher):
for c in constraints:
val = o[c] if c in o else None
clique["constraints"][c] = val
+ allow_implicit = clique_type.get('use_implicit_links', False)
for link_type in clique_type["link_types"]:
- self.check_link_type(clique, link_type, nodes_of_type)
+ if not self.check_link_type(clique, link_type, nodes_of_type,
+ allow_implicit=allow_implicit):
+ break
# after adding the links to the clique, create/update the clique
if not clique["links"]:
return None
+ clique["clique_type"] = clique_type["_id"]
focal_point_obj = self.inventory.find({"_id": clique["focal_point"]})
if not focal_point_obj:
return None
@@ -198,25 +228,33 @@ class CliqueFinder(Fetcher):
'-'.join(link_type_parts)
return CliqueFinder.link_type_reversed.get(link_type)
- def check_link_type(self, clique, link_type, nodes_of_type):
+ def check_link_type(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
# check if it's backwards
link_type_reversed = self.get_link_type_reversed(link_type)
# handle case of links like T<-->T
self_linked = link_type == link_type_reversed
use_reversed = False
if not self_linked:
- matches = self.links.find_one({
+ link_search_condition = {
"environment": self.env,
"link_type": link_type_reversed
- })
+ }
+ if not allow_implicit:
+ link_search_condition['implicit'] = False
+ matches = self.links.find_one(link_search_condition)
use_reversed = True if matches else False
if self_linked or not use_reversed:
- self.check_link_type_forward(clique, link_type, nodes_of_type)
+ return self.check_link_type_forward(clique, link_type,
+ nodes_of_type,
+ allow_implicit=allow_implicit)
if self_linked or use_reversed:
- self.check_link_type_back(clique, link_type, nodes_of_type)
+ return self.check_link_type_back(clique, link_type, nodes_of_type,
+ allow_implicit=allow_implicit)
def check_link_type_for_direction(self, clique, link_type, nodes_of_type,
- is_reversed=False):
+ is_reversed=False,
+ allow_implicit=False) -> bool:
if is_reversed:
link_type = self.get_link_type_reversed(link_type)
from_type = link_type[:link_type.index("-")]
@@ -225,7 +263,7 @@ class CliqueFinder(Fetcher):
other_side = 'target' if not is_reversed else 'source'
match_type = to_type if is_reversed else from_type
if match_type not in nodes_of_type.keys():
- return
+ return False
other_side_type = to_type if not is_reversed else from_type
nodes_to_add = set()
for match_point in nodes_of_type[match_type]:
@@ -233,21 +271,27 @@ class CliqueFinder(Fetcher):
clique,
link_type,
side_to_match,
- other_side)
+ other_side,
+ allow_implicit=allow_implicit)
nodes_to_add = nodes_to_add | matches
if other_side_type not in nodes_of_type:
nodes_of_type[other_side_type] = set()
nodes_of_type[other_side_type] = \
nodes_of_type[other_side_type] | nodes_to_add
+ return len(nodes_to_add) > 0
def find_matches_for_point(self, match_point, clique, link_type,
- side_to_match, other_side) -> set:
+ side_to_match, other_side,
+ allow_implicit=False) -> set:
nodes_to_add = set()
- matches = self.links.find({
+ link_search_condition = {
"environment": self.env,
"link_type": link_type,
side_to_match: ObjectId(match_point)
- })
+ }
+ if not allow_implicit:
+ link_search_condition['implicit'] = False
+ matches = self.links.find(link_search_condition)
for link in matches:
link_id = link["_id"]
if link_id in clique["links"]:
@@ -260,10 +304,16 @@ class CliqueFinder(Fetcher):
nodes_to_add.add(other_side_point)
return nodes_to_add
- def check_link_type_forward(self, clique, link_type, nodes_of_type):
- self.check_link_type_for_direction(clique, link_type, nodes_of_type,
- is_reversed=False)
+ def check_link_type_forward(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
+ return self.check_link_type_for_direction(clique, link_type,
+ nodes_of_type,
+ is_reversed=False,
+ allow_implicit=allow_implicit)
- def check_link_type_back(self, clique, link_type, nodes_of_type):
- self.check_link_type_for_direction(clique, link_type, nodes_of_type,
- is_reversed=True)
+ def check_link_type_back(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
+ return self.check_link_type_for_direction(clique, link_type,
+ nodes_of_type,
+ is_reversed=True,
+ allow_implicit=allow_implicit)
diff --git a/app/discover/configuration.py b/app/discover/configuration.py
index c7bc0c0..9ec8f96 100644
--- a/app/discover/configuration.py
+++ b/app/discover/configuration.py
@@ -47,6 +47,10 @@ class Configuration(metaclass=Singleton):
def get_env_name(self):
return self.env_name
+ def get_env_type(self):
+ return 'OpenStack' if 'environment_type' not in self.environment \
+ else self.environment['environment_type']
+
def update_env(self, values):
self.collection.update_one({"name": self.env_name},
{'$set': MongoAccess.encode_mongo_keys(values)})
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
index 4855acc..c01916c 100644
--- a/app/discover/event_manager.py
+++ b/app/discover/event_manager.py
@@ -113,8 +113,8 @@ class EventManager(Manager):
def get_listener(self, env: str):
env_config = self.inv.get_env_config(env)
return (self.LISTENERS.get(env_config.get('distribution'), {})
- .get(env_config.get('distribution_version',
- DefaultListener)))
+ .get(env_config.get('distribution_version'),
+ DefaultListener))
def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
listener.listen({
diff --git a/app/discover/events/event_base.py b/app/discover/events/event_base.py
index 6b3b290..4b466e1 100644
--- a/app/discover/events/event_base.py
+++ b/app/discover/events/event_base.py
@@ -11,6 +11,7 @@ from abc import abstractmethod, ABC
from discover.fetcher import Fetcher
from utils.inventory_mgr import InventoryMgr
+from utils.origins import ScanOrigin, ScanOrigins
class EventResult:
@@ -23,6 +24,8 @@ class EventResult:
self.message = message
self.related_object = related_object
self.display_context = display_context
+ self.origin = ScanOrigin(origin_id=None,
+ origin_type=ScanOrigins.EVENT)
class EventBase(Fetcher, ABC):
diff --git a/app/discover/events/event_instance_add.py b/app/discover/events/event_instance_add.py
index 4dd2b20..a8717a5 100644
--- a/app/discover/events/event_instance_add.py
+++ b/app/discover/events/event_instance_add.py
@@ -25,7 +25,7 @@ class EventInstanceAdd(EventBase):
# scan instance
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan("ScanInstancesRoot", instances_root,
limit_to_child_id=instance_id,
limit_to_child_type='instance')
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
index e54bedb..f0ba569 100644
--- a/app/discover/events/event_interface_add.py
+++ b/app/discover/events/event_interface_add.py
@@ -30,7 +30,7 @@ class EventInterfaceAdd(EventBase):
def add_gateway_port(self, env, project, network_name, router_doc, host_id):
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_id = router_doc['id']
router = fetcher.get_vservice(host_id, router_id)
device_id = decode_router_id(router_id)
@@ -101,7 +101,7 @@ class EventInterfaceAdd(EventBase):
# add router-interface port document.
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
port_doc = EventSubnetAdd().add_port_document(env, port_id,
network_name=network_name)
@@ -134,7 +134,7 @@ class EventInterfaceAdd(EventBase):
# update vservice-vnic, vnic-network,
FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router-interface added.")
diff --git a/app/discover/events/event_port_add.py b/app/discover/events/event_port_add.py
index 9220015..e03db34 100644
--- a/app/discover/events/event_port_add.py
+++ b/app/discover/events/event_port_add.py
@@ -168,7 +168,7 @@ class EventPortAdd(EventBase):
"router": ('Gateways', router_name)}
fetcher = CliFetchVserviceVnics()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
namespace = 'q{}-{}'.format(object_type, object_id)
vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
if not vnic_documents:
@@ -258,7 +258,7 @@ class EventPortAdd(EventBase):
# update instance
instance_fetcher = ApiFetchHostInstances()
- instance_fetcher.set_env(env)
+ instance_fetcher.setup(env=env, origin=self.origin)
instance_docs = instance_fetcher.get(host_id + '-')
instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
@@ -278,7 +278,7 @@ class EventPortAdd(EventBase):
# set ovs as default type.
vnic_fetcher = CliFetchInstanceVnics()
- vnic_fetcher.set_env(env)
+ vnic_fetcher.setup(env=env, origin=self.origin)
vnic_docs = vnic_fetcher.get(instance_id + '-')
vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
@@ -298,7 +298,7 @@ class EventPortAdd(EventBase):
for fetcher in fetchers_implementing_add_links:
fetcher.add_links()
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
port_document = self.inv.get_by_id(env, port['id'])
diff --git a/app/discover/events/event_port_delete.py b/app/discover/events/event_port_delete.py
index 1e55870..937d8df 100644
--- a/app/discover/events/event_port_delete.py
+++ b/app/discover/events/event_port_delete.py
@@ -61,7 +61,7 @@ class EventPortDelete(EventDeleteBase):
# update instance mac address.
if port_doc['mac_address'] == instance_doc['mac_address']:
instance_fetcher = ApiFetchHostInstances()
- instance_fetcher.set_env(env)
+ instance_fetcher.setup(env=env, origin=self.origin)
host_id = port_doc['binding:host_id']
instance_id = port_doc['device_id']
instance_docs = instance_fetcher.get(host_id + '-')
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
index 1fb2244..0f8bc05 100644
--- a/app/discover/events/event_router_add.py
+++ b/app/discover/events/event_router_add.py
@@ -100,7 +100,7 @@ class EventRouterAdd(EventBase):
host = self.inv.get_by_id(env, host_id)
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_doc = fetcher.get_vservice(host_id, router_id)
gateway_info = router['external_gateway_info']
@@ -114,7 +114,7 @@ class EventRouterAdd(EventBase):
# scan links and cliques
FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router added.")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
index b63b224..f20f07e 100644
--- a/app/discover/events/event_router_update.py
+++ b/app/discover/events/event_router_update.py
@@ -60,7 +60,7 @@ class EventRouterUpdate(EventBase):
# add gw_port_id info and port document.
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_vservice = fetcher.get_vservice(host_id, router_full_id)
if router_vservice.get('gw_port_id'):
router_doc['gw_port_id'] = router_vservice['gw_port_id']
@@ -74,7 +74,7 @@ class EventRouterUpdate(EventBase):
# update the cliques.
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router update.")
return EventResult(result=True,
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
index 4126e0c..0a91803 100644
--- a/app/discover/events/event_subnet_add.py
+++ b/app/discover/events/event_subnet_add.py
@@ -29,7 +29,7 @@ class EventSubnetAdd(EventBase):
# document does not has project attribute. In this case, network_name should not be provided.
fetcher = ApiFetchPort()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
ports = fetcher.get(port_id)
if ports:
@@ -133,7 +133,7 @@ class EventSubnetAdd(EventBase):
# update network
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
self.log.info("add new subnet.")
@@ -146,7 +146,7 @@ class EventSubnetAdd(EventBase):
FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished subnet added.")
return EventResult(result=True,
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
index 59b0afb..2c58e70 100644
--- a/app/discover/events/event_subnet_update.py
+++ b/app/discover/events/event_subnet_update.py
@@ -50,7 +50,7 @@ class EventSubnetUpdate(EventBase):
# make sure that self.regions is not empty.
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
self.log.info("add port binding to DHCP server.")
@@ -69,12 +69,12 @@ class EventSubnetUpdate(EventBase):
# add link for vservice - vnic
FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
FindLinksForVserviceVnics(). \
add_links(search={"id": "qdhcp-%s" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
diff --git a/app/discover/fetcher.py b/app/discover/fetcher.py
index 8d7fdbb..707cd60 100644
--- a/app/discover/fetcher.py
+++ b/app/discover/fetcher.py
@@ -8,16 +8,21 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from discover.configuration import Configuration
+from utils.origins import Origin
from utils.logging.full_logger import FullLogger
class Fetcher:
+ ENV_TYPE_KUBERNETES = 'Kubernetes'
+ ENV_TYPE_OPENSTACK = 'OpenStack'
+
def __init__(self):
super().__init__()
self.env = None
self.log = FullLogger()
self.configuration = None
+ self.origin = None
@staticmethod
def escape(string):
@@ -25,11 +30,55 @@ class Fetcher:
def set_env(self, env):
self.env = env
- self.log.set_env(env)
+ self.log.setup(env=env)
self.configuration = Configuration()
+ def setup(self, env, origin: Origin = None):
+ self.set_env(env=env)
+ if origin:
+ self.origin = origin
+ self.log.setup(origin=origin)
+
def get_env(self):
return self.env
def get(self, object_id):
return None
+
+ def set_folder_parent(self,
+ o: dict,
+ object_type: str =None,
+ master_parent_type: str =None,
+ master_parent_id: str =None,
+ parent_objects_name=None,
+ parent_type: str =None,
+ parent_id: str =None,
+ parent_text: str =None):
+ if object_type:
+ o['type'] = object_type
+ if not parent_objects_name:
+ parent_objects_name = '{}s'.format(object_type)
+ if not master_parent_type:
+ self.log.error('set_folder_parent: must specify: '
+ 'master_parent_type, master_parent_id, '
+ 'parent_type', 'parent_id')
+ return
+ if not parent_objects_name and not parent_type:
+ self.log.error('set_folder_parent: must specify: '
+ 'either parent_objects_name (e.g. "vedges") '
+ 'or parent_type and parent_id')
+ return
+ if parent_objects_name and not parent_type:
+ parent_type = '{}_folder'.format(parent_objects_name)
+ if parent_objects_name and not parent_id:
+ parent_id = '{}-{}'.format(master_parent_id, parent_objects_name)
+ o.update({
+ 'master_parent_type': master_parent_type,
+ 'master_parent_id': master_parent_id,
+ 'parent_type': parent_type,
+ 'parent_id': parent_id
+ })
+ if parent_text:
+ o['parent_text'] = parent_text
+ elif parent_objects_name:
+ o['parent_text'] = parent_objects_name.capitalize()
diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py
index f685faf..1fca202 100644
--- a/app/discover/fetchers/api/api_access.py
+++ b/app/discover/fetchers/api/api_access.py
@@ -12,21 +12,18 @@ import re
import requests
import time
-from discover.configuration import Configuration
-from discover.fetcher import Fetcher
+from utils.api_access_base import ApiAccessBase
from utils.string_utils import jsonify
-class ApiAccess(Fetcher):
+class ApiAccess(ApiAccessBase):
+
+ ADMIN_PORT = "35357"
+
subject_token = None
initialized = False
regions = {}
- config = None
- api_config = None
- host = ""
- base_url = ""
- admin_token = ""
tokens = {}
admin_endpoint = ""
admin_project = None
@@ -38,28 +35,19 @@ class ApiAccess(Fetcher):
# identity API v2 version with admin token
def __init__(self, config=None):
- super(ApiAccess, self).__init__()
- if ApiAccess.initialized:
+ super().__init__('OpenStack', config)
+ self.base_url = "http://" + self.host + ":" + self.port
+ if self.initialized:
return
- ApiAccess.config = {'OpenStack': config} if config else Configuration()
- ApiAccess.api_config = ApiAccess.config.get("OpenStack")
- host = ApiAccess.api_config.get("host", "")
- ApiAccess.host = host
- port = ApiAccess.api_config.get("port", "")
- if not (host and port):
- raise ValueError('Missing definition of host or port ' +
- 'for OpenStack API access')
- ApiAccess.base_url = "http://" + host + ":" + port
- ApiAccess.admin_token = ApiAccess.api_config.get("admin_token", "")
- ApiAccess.admin_project = ApiAccess.api_config.get("admin_project",
- "admin")
- ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+ ApiAccess.admin_project = self.api_config.get("admin_project", "admin")
+ ApiAccess.admin_endpoint = "http://" + self.host + ":" + self.ADMIN_PORT
token = self.v2_auth_pwd(ApiAccess.admin_project)
if not token:
raise ValueError("Authentication failed. Failed to obtain token")
else:
self.subject_token = token
+ self.initialized = True
@staticmethod
def parse_time(time_str):
@@ -95,9 +83,9 @@ class ApiAccess(Fetcher):
subject_token = self.get_existing_token(project_id)
if subject_token:
return subject_token
- req_url = ApiAccess.base_url + "/v2.0/tokens"
+ req_url = self.base_url + "/v2.0/tokens"
response = requests.post(req_url, json=post_body, headers=headers,
- timeout=5)
+ timeout=self.CONNECT_TIMEOUT)
response = response.json()
ApiAccess.auth_response[project_id] = response
if 'error' in response:
@@ -120,8 +108,8 @@ class ApiAccess(Fetcher):
return token_details
def v2_auth_pwd(self, project):
- user = ApiAccess.api_config["user"]
- pwd = ApiAccess.api_config["pwd"]
+ user = self.api_config["user"]
+ pwd = self.api_config["pwd"]
post_body = {
"auth": {
"passwordCredentials": {
@@ -148,23 +136,6 @@ class ApiAccess(Fetcher):
auth_response = ApiAccess.auth_response.get('admin', {})
return auth_response
- def get_rel_url(self, relative_url, headers):
- req_url = ApiAccess.base_url + relative_url
- return self.get_url(req_url, headers)
-
- def get_url(self, req_url, headers):
- response = requests.get(req_url, headers=headers)
- if response.status_code != requests.codes.ok:
- # some error happened
- if "reason" in response:
- msg = ", reason: {}".format(response.reason)
- else:
- msg = ", response: {}".format(response.text)
- self.log.error("req_url: {} {}".format(req_url, msg))
- return None
- ret = response.json()
- return ret
-
def get_region_url(self, region_name, service):
if region_name not in self.regions:
return None
@@ -174,7 +145,7 @@ class ApiAccess(Fetcher):
return None
orig_url = s["adminURL"]
# replace host name with the host found in config
- url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + self.host, orig_url)
return url
# like get_region_url(), but remove everything starting from the "/v2"
diff --git a/app/discover/fetchers/api/api_fetch_availability_zones.py b/app/discover/fetchers/api/api_fetch_availability_zones.py
index 196893b..ad9550e 100644
--- a/app/discover/fetchers/api/api_fetch_availability_zones.py
+++ b/app/discover/fetchers/api/api_fetch_availability_zones.py
@@ -28,7 +28,7 @@ class ApiFetchAvailabilityZones(ApiAccess):
# because the later does not inclde the "internal" zone in the results
endpoint = self.get_region_url_nover(region, "nova")
req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
- "/os-availability-zone/detail"
+ "/os-availability-zone/detail"
headers = {
"X-Auth-Project-Id": project,
"X-Auth-Token": token["id"]
@@ -45,11 +45,10 @@ class ApiFetchAvailabilityZones(ApiAccess):
for doc in azs:
doc["id"] = doc["zoneName"]
doc["name"] = doc.pop("zoneName")
- doc["master_parent_type"] = "region"
- doc["master_parent_id"] = region
- doc["parent_type"] = "availability_zones_folder"
- doc["parent_id"] = region + "-availability_zones"
- doc["parent_text"] = "Availability Zones"
+ self.set_folder_parent(doc, object_type="availability_zone",
+ master_parent_type="region",
+ master_parent_id=region,
+ parent_text="Availability Zones")
doc["available"] = doc["zoneState"]["available"]
doc.pop("zoneState")
ret.append(doc)
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py
index 56cffda..bf8513a 100644
--- a/app/discover/fetchers/api/api_fetch_host_instances.py
+++ b/app/discover/fetchers/api/api_fetch_host_instances.py
@@ -18,7 +18,7 @@ class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
def __init__(self):
super(ApiFetchHostInstances, self).__init__()
self.inv = InventoryMgr()
- self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
+ self.endpoint = self.base_url.replace(":5000", ":8774")
self.projects = None
self.db_fetcher = DbFetchInstances()
diff --git a/app/discover/fetchers/api/api_fetch_network.py b/app/discover/fetchers/api/api_fetch_network.py
index 889b8a5..b253773 100644
--- a/app/discover/fetchers/api/api_fetch_network.py
+++ b/app/discover/fetchers/api/api_fetch_network.py
@@ -23,7 +23,8 @@ class ApiFetchNetwork(ApiAccess):
return []
ret = []
for region in self.regions:
- # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+ # TODO: refactor legacy code
+ # (Unresolved reference - self.get_for_region)
ret.extend(self.get_for_region(region, token, project_id))
return ret
@@ -37,7 +38,7 @@ class ApiFetchNetwork(ApiAccess):
"X-Auth-Token": token["id"]
}
response = self.get_url(req_url, headers)
- if not "network" in response:
+ if "network" not in response:
return []
network = response["network"]
subnets = network['subnets']
@@ -60,13 +61,12 @@ class ApiFetchNetwork(ApiAccess):
network["cidrs"] = cidrs
network["subnet_ids"] = subnet_ids
- network["master_parent_type"] = "project"
- network["master_parent_id"] = network["tenant_id"]
- network["parent_type"] = "networks_folder"
- network["parent_id"] = network["tenant_id"] + "-networks"
- network["parent_text"] = "Networks"
- # set the 'network' attribute for network objects to the name of network,
- # to allow setting constraint on network when creating network clique
+ self.set_folder_parent(network, object_type="network",
+ master_parent_type="project",
+ master_parent_id=network["tenant_id"])
+ # set the 'network' attribute for network objects to the name of
+ # network, to allow setting constraint on network when creating
+ # network clique
network['network'] = network["id"]
# get the project name
project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
diff --git a/app/discover/fetchers/api/api_fetch_networks.py b/app/discover/fetchers/api/api_fetch_networks.py
index 4b70f65..f76517a 100644
--- a/app/discover/fetchers/api/api_fetch_networks.py
+++ b/app/discover/fetchers/api/api_fetch_networks.py
@@ -34,7 +34,7 @@ class ApiFetchNetworks(ApiAccess):
"X-Auth-Token": token["id"]
}
response = self.get_url(req_url, headers)
- if not "networks" in response:
+ if "networks" not in response:
return []
networks = response["networks"]
req_url = endpoint + "/v2.0/subnets"
@@ -46,7 +46,6 @@ class ApiFetchNetworks(ApiAccess):
for s in subnets:
subnets_hash[s["id"]] = s
for doc in networks:
- doc["master_parent_type"] = "project"
project_id = doc["tenant_id"]
if not project_id:
# find project ID of admin project
@@ -57,12 +56,12 @@ class ApiFetchNetworks(ApiAccess):
if not project:
self.log.error("failed to find admin project in DB")
project_id = project["id"]
- doc["master_parent_id"] = project_id
- doc["parent_type"] = "networks_folder"
- doc["parent_id"] = project_id + "-networks"
- doc["parent_text"] = "Networks"
- # set the 'network' attribute for network objects to the name of network,
- # to allow setting constraint on network when creating network clique
+ self.set_folder_parent(doc, object_type='network',
+ master_parent_id=project_id,
+ master_parent_type='project')
+ # set the 'network' attribute for network objects to the name of
+ # network, to allow setting constraint on network when creating
+ # network clique
doc['network'] = doc["id"]
# get the project name
project = self.inv.get_by_id(self.get_env(), project_id)
diff --git a/app/discover/fetchers/api/api_fetch_port.py b/app/discover/fetchers/api/api_fetch_port.py
index f8d9eeb..8de1452 100644
--- a/app/discover/fetchers/api/api_fetch_port.py
+++ b/app/discover/fetchers/api/api_fetch_port.py
@@ -43,11 +43,9 @@ class ApiFetchPort(ApiAccess):
return []
doc = response["port"]
- doc["master_parent_type"] = "network"
- doc["master_parent_id"] = doc["network_id"]
- doc["parent_type"] = "ports_folder"
- doc["parent_id"] = doc["network_id"] + "-ports"
- doc["parent_text"] = "Ports"
+ self.set_folder_parent(doc, object_type="port",
+ master_parent_type="network",
+ master_parent_id=doc["network_id"])
# get the project name
net = self.inv.get_by_id(self.get_env(), doc["network_id"])
if net:
diff --git a/app/discover/fetchers/api/api_fetch_ports.py b/app/discover/fetchers/api/api_fetch_ports.py
index f4c54a6..5e44c1b 100644
--- a/app/discover/fetchers/api/api_fetch_ports.py
+++ b/app/discover/fetchers/api/api_fetch_ports.py
@@ -38,11 +38,9 @@ class ApiFetchPorts(ApiAccess):
return []
ports = response["ports"]
for doc in ports:
- doc["master_parent_type"] = "network"
- doc["master_parent_id"] = doc["network_id"]
- doc["parent_type"] = "ports_folder"
- doc["parent_id"] = doc["network_id"] + "-ports"
- doc["parent_text"] = "Ports"
+ self.set_folder_parent(doc, object_type="port",
+ master_parent_type="network",
+ master_parent_id=doc["network_id"])
# get the project name
net = self.inv.get_by_id(self.get_env(), doc["network_id"])
if net:
diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py
index 5b911f5..1059600 100644
--- a/app/discover/fetchers/api/api_fetch_project_hosts.py
+++ b/app/discover/fetchers/api/api_fetch_project_hosts.py
@@ -11,9 +11,11 @@ import json
from discover.fetchers.api.api_access import ApiAccess
from discover.fetchers.db.db_access import DbAccess
+from discover.fetchers.cli.cli_fetch_host_details import CliFetchHostDetails
+from utils.ssh_connection import SshError
-class ApiFetchProjectHosts(ApiAccess, DbAccess):
+class ApiFetchProjectHosts(ApiAccess, DbAccess, CliFetchHostDetails):
def __init__(self):
super(ApiFetchProjectHosts, self).__init__()
@@ -107,6 +109,7 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
s = services["nova-compute"]
if s["available"] and s["active"]:
self.add_host_type(doc, "Compute", az['zoneName'])
+ self.fetch_host_os_details(doc)
return doc
# fetch more details of network nodes from neutron DB agents table
@@ -121,7 +124,12 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
""".format(self.neutron_db)
results = self.get_objects_list(query, "")
for r in results:
- host = hosts[r["host"]]
+ host = r["host"]
+ if host not in hosts:
+ self.log.error("host from agents table not in hosts list: {}"
+ .format(host))
+ continue
+ host = hosts[host]
host["config"] = json.loads(r["configurations"])
self.add_host_type(host, "Network", '')
@@ -136,9 +144,33 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
for db_row in results:
doc.update(db_row)
- def add_host_type(self, doc, type, zone):
- if not type in doc["host_type"]:
- doc["host_type"].append(type)
- if type == 'Compute':
+ @staticmethod
+ def add_host_type(doc, host_type, zone):
+ if host_type not in doc["host_type"]:
+ doc["host_type"].append(host_type)
+ if host_type == 'Compute':
doc['zone'] = zone
doc['parent_id'] = zone
+
+ def fetch_host_os_details(self, doc):
+ cmd = 'cat /etc/os-release && echo "ARCHITECURE=`arch`"'
+ try:
+ lines = self.run_fetch_lines(cmd, ssh_to_host=doc['host'])
+ except SshError as e:
+ self.log.error('{}: {}', cmd, str(e))
+ os_attributes = {}
+ attributes_to_fetch = {
+ 'NAME': 'name',
+ 'VERSION': 'version',
+ 'ID': 'ID',
+ 'ID_LIKE': 'ID_LIKE',
+ 'ARCHITECURE': 'architecure'
+ }
+ for attr in attributes_to_fetch:
+ matches = [l for l in lines if l.startswith(attr + '=')]
+ if matches:
+ line = matches[0]
+ attr_name = attributes_to_fetch[attr]
+ os_attributes[attr_name] = line[line.index('=')+1:].strip('"')
+ if os_attributes:
+ doc['OS'] = os_attributes
diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py
index 23a3736..4e83b01 100644
--- a/app/discover/fetchers/api/api_fetch_regions.py
+++ b/app/discover/fetchers/api/api_fetch_regions.py
@@ -13,7 +13,7 @@ from discover.fetchers.api.api_access import ApiAccess
class ApiFetchRegions(ApiAccess):
def __init__(self):
super(ApiFetchRegions, self).__init__()
- self.endpoint = ApiAccess.base_url
+ self.endpoint = self.base_url
def get(self, regions_folder_id):
token = self.v2_auth_pwd(self.admin_project)
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
index c77b22a..68b81c8 100644
--- a/app/discover/fetchers/cli/cli_access.py
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -17,7 +17,7 @@ from utils.logging.console_logger import ConsoleLogger
from utils.ssh_conn import SshConn
-class CliAccess(BinaryConverter, Fetcher):
+class CliAccess(Fetcher, BinaryConverter):
connections = {}
ssh_cmd = "ssh -q -o StrictHostKeyChecking=no "
call_count_per_con = {}
@@ -71,8 +71,9 @@ class CliAccess(BinaryConverter, Fetcher):
self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
return ret
- def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
- out = self.run(cmd, ssh_to_host, enable_cache)
+ def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True,
+ use_sudo=True):
+ out = self.run(cmd, ssh_to_host, enable_cache, use_sudo=use_sudo)
if not out:
return []
# first try to split lines by whitespace
@@ -236,7 +237,7 @@ class CliAccess(BinaryConverter, Fetcher):
self.find_matching_regexps(o, line, regexps)
for regexp_tuple in regexps:
name = regexp_tuple['name']
- if 'name' not in o and 'default' in regexp_tuple:
+ if name not in o and 'default' in regexp_tuple:
o[name] = regexp_tuple['default']
@staticmethod
@@ -247,4 +248,8 @@ class CliAccess(BinaryConverter, Fetcher):
regex = re.compile(regex)
matches = regex.search(line)
if matches and name not in o:
- o[name] = matches.group(1)
+ try:
+ o[name] = matches.group(1)
+ except IndexError as e:
+ self.log.error('failed to find group 1 in match, {}'
+ .format(str(regexp_tuple)))
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
index 26cd603..81d164d 100644
--- a/app/discover/fetchers/cli/cli_fetch_host_pnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -27,8 +27,8 @@ class CliFetchHostPnics(CliAccess):
'description': 'IPv6 Address'}
]
- def get(self, id):
- host_id = id[:id.rindex("-")]
+ def get(self, parent_id):
+ host_id = parent_id[:parent_id.rindex("-")]
cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
host = self.inv.get_by_id(self.get_env(), host_id)
if not host:
@@ -39,7 +39,8 @@ class CliFetchHostPnics(CliAccess):
", host: " + str(host))
return []
host_types = host["host_type"]
- if "Network" not in host_types and "Compute" not in host_types:
+ accepted_host_types = ['Network', 'Compute', 'Kube-node']
+ if not [t for t in accepted_host_types if t in host_types]:
return []
interface_lines = self.run_fetch_lines(cmd, host_id)
interfaces = []
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
index ff37569..ac04568 100644
--- a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
@@ -18,8 +18,8 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
def get_vconnectors(self, host):
host_id = host['id']
- lines = self.run_fetch_lines("brctl show", host_id)
- headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+ lines = self.run_fetch_lines('brctl show', host_id)
+ headers = ['bridge_name', 'bridge_id', 'stp_enabled', 'interfaces']
headers_count = len(headers)
# since we hard-coded the headers list, remove the headers line
del lines[:1]
@@ -31,26 +31,32 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
ret = []
for doc in results:
- doc["name"] = doc.pop("bridge_name")
- doc["id"] = doc["name"] + "-" + doc.pop("bridge_id")
- doc["host"] = host_id
- doc["connector_type"] = "bridge"
- if "interfaces" in doc:
- interfaces = {}
- interface_names = doc["interfaces"].split(",")
- for interface_name in interface_names:
- # find MAC address for this interface from ports list
- port_id_prefix = interface_name[3:]
- port = self.inv.find_items({
- "environment": self.get_env(),
- "type": "port",
- "binding:host_id": host_id,
- "id": {"$regex": r"^" + re.escape(port_id_prefix)}
- }, get_single=True)
- mac_address = '' if not port else port['mac_address']
- interface = {'name': interface_name, 'mac_address': mac_address}
- interfaces[interface_name] = interface
- doc["interfaces"] = interfaces
- doc['interfaces_names'] = list(interfaces.keys())
- ret.append(doc)
+ doc['name'] = '{}-{}'.format(host_id, doc['bridge_name'])
+ doc['id'] = '{}-{}'.format(doc['name'], doc.pop('bridge_id'))
+ doc['host'] = host_id
+ doc['connector_type'] = 'bridge'
+ self.get_vconnector_interfaces(doc, host_id)
+ ret.append(doc)
return ret
+
+ def get_vconnector_interfaces(self, doc, host_id):
+ if 'interfaces' not in doc:
+ doc['interfaces'] = {}
+ doc['interfaces_names'] = []
+ return
+ interfaces = {}
+ interface_names = doc['interfaces'].split(',')
+ for interface_name in interface_names:
+ # find MAC address for this interface from ports list
+ port_id_prefix = interface_name[3:]
+ port = self.inv.find_items({
+ 'environment': self.get_env(),
+ 'type': 'port',
+ 'binding:host_id': host_id,
+ 'id': {'$regex': r'^' + re.escape(port_id_prefix)}
+ }, get_single=True)
+ mac_address = '' if not port else port['mac_address']
+ interface = {'name': interface_name, 'mac_address': mac_address}
+ interfaces[interface_name] = interface
+ doc['interfaces'] = interfaces
+ doc['interfaces_names'] = list(interfaces.keys())
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
index 3bc3a5b..0129d3b 100644
--- a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -66,17 +66,15 @@ class CliFetchVserviceVnics(CliAccess):
master_parent_id = "{}-{}".format(host, service)
current = {
"id": host + "-" + name,
- "type": "vnic",
"vnic_type": "vservice_vnic",
"host": host,
"name": name,
- "master_parent_type": "vservice",
- "master_parent_id": master_parent_id,
- "parent_type": "vnics_folder",
- "parent_id": "{}-vnics".format(master_parent_id),
- "parent_text": "vNICs",
"lines": []
}
+ self.set_folder_parent(current, object_type="vnic",
+ master_parent_type="vservice",
+ master_parent_id=master_parent_id,
+ parent_text="vNICs")
interfaces.append(current)
self.handle_line(current, line_remainder)
else:
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
index 090ab84..5ff49d5 100644
--- a/app/discover/fetchers/db/db_access.py
+++ b/app/discover/fetchers/db/db_access.py
@@ -38,8 +38,7 @@ class DbAccess(Fetcher):
conn = None
query_count_per_con = 0
- # connection timeout set to 30 seconds,
- # due to problems over long connections
+ # connection timeout set to 5 seconds
TIMEOUT = 5
def __init__(self, mysql_config=None):
@@ -47,6 +46,9 @@ class DbAccess(Fetcher):
self.config = {'mysql': mysql_config} if mysql_config \
else Configuration()
self.conf = self.config.get("mysql")
+ self.connect_timeout = int(self.conf['connect_timeout']) \
+ if 'connect_timeout' in self.conf \
+ else self.TIMEOUT
self.connect_to_db()
self.neutron_db = self.get_neutron_db_name()
@@ -55,16 +57,18 @@ class DbAccess(Fetcher):
return
try:
connector = mysql.connector
- DbAccess.conn = connector.connect(host=_host, port=_port,
- connection_timeout=self.TIMEOUT,
- user=_user,
- password=_pwd,
- database=_database,
- raise_on_warnings=True)
+ conn = connector.connect(host=_host, port=_port,
+ connection_timeout=self.connect_timeout,
+ user=_user,
+ password=_pwd,
+ database=_database,
+ raise_on_warnings=True)
+ DbAccess.conn = conn
DbAccess.conn.ping(True) # auto-reconnect if necessary
except Exception as e:
- self.log.critical("failed to connect to MySQL DB: {}"
- .format(str(e)))
+ msg = "failed to connect to MySQL DB: {}".format(str(e))
+ self.log.critical(msg)
+ raise ScanError(msg)
return
DbAccess.query_count_per_con = 0
@@ -93,8 +97,11 @@ class DbAccess(Fetcher):
DbAccess.conn = None
self.conf = self.config.get("mysql")
cnf = self.conf
+ pwd = cnf.get('pwd', '')
+ if not pwd:
+ raise ScanError('db_access: attribute pwd is missing')
self.db_connect(cnf.get('host', ''), cnf.get('port', ''),
- cnf.get('user', ''), cnf.get('pwd', ''),
+ cnf.get('user', ''), pwd,
cnf.get('schema', 'nova'))
@with_cursor
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
index 85376ed..7721136 100644
--- a/app/discover/fetchers/db/db_fetch_oteps.py
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -82,4 +82,4 @@ class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
interface = l.split(":")[1].strip()
if vconnector:
- doc["vconnector"] = vconnector
+ doc["vconnector"] = '{}-{}'.format(host_id, vconnector)
diff --git a/app/discover/fetchers/kube/__init__.py b/app/discover/fetchers/kube/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/kube/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/kube/kube_access.py b/app/discover/fetchers/kube/kube_access.py
new file mode 100644
index 0000000..38bb978
--- /dev/null
+++ b/app/discover/fetchers/kube/kube_access.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from kubernetes.client import Configuration as KubConf, CoreV1Api
+
+from utils.api_access_base import ApiAccessBase
+
+
+class KubeAccess(ApiAccessBase):
+
+ def __init__(self, config=None):
+ super().__init__('Kubernetes', config)
+ self.base_url = 'https://{}:{}'.format(self.host, self.port)
+ self.bearer_token = self.api_config.get('token', '')
+ conf = KubConf()
+ conf.host = self.base_url
+ conf.user = self.api_config.get('user')
+ conf.api_key_prefix['authorization'] = 'Bearer'
+ conf.api_key['authorization'] = self.bearer_token
+ conf.verify_ssl = False
+ self.api = CoreV1Api()
+
diff --git a/app/discover/fetchers/kube/kube_fetch_namespaces.py b/app/discover/fetchers/kube/kube_fetch_namespaces.py
new file mode 100644
index 0000000..951ddb8
--- /dev/null
+++ b/app/discover/fetchers/kube/kube_fetch_namespaces.py
@@ -0,0 +1,32 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.kube.kube_access import KubeAccess
+
+
+class KubeFetchNamespaces(KubeAccess):
+
+ def __init__(self, config=None):
+ super().__init__(config)
+
+ def get(self, object_id):
+ namespaces = self.api.list_namespace()
+ return [self.get_namespace(i) for i in namespaces.items]
+
+ @staticmethod
+ def get_namespace(namespace):
+ attrs = ['creation_timestamp', 'self_link', 'uid']
+ namespace_details = {
+ 'name': namespace.metadata.name,
+ 'status': namespace.status.phase
+ }
+ namespace_details.update({x: getattr(namespace.metadata, x, '')
+ for x in attrs})
+ namespace_details['id'] = namespace_details['uid']
+ return namespace_details
diff --git a/app/discover/link_finders/find_implicit_links.py b/app/discover/link_finders/find_implicit_links.py
new file mode 100644
index 0000000..01eaa7b
--- /dev/null
+++ b/app/discover/link_finders/find_implicit_links.py
@@ -0,0 +1,128 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.link_finders.find_links import FindLinks
+
+
+class FindImplicitLinks(FindLinks):
+
+ def __init__(self):
+ super().__init__()
+ self.links = []
+ self.constraint_attributes = self.get_constraint_attributes()
+
+ def add_links(self):
+ self.log.info('adding implicit links')
+ self.get_existing_links()
+ self.get_transitive_closure()
+
+ def get_constraint_attributes(self) -> list:
+ attributes = set()
+ for c in self.inv.find({'environment': self.get_env()},
+ collection='clique_constraints'):
+ for a in c['constraints']:
+ attributes.add(a)
+ return list(attributes)
+
+ def get_existing_links(self):
+ self.log.info('fetching existing links')
+ existing_links = self.inv.find({'environment': self.get_env()},
+ collection='links')
+ for l in existing_links:
+ self.links.append({'pass': 0, 'link': l})
+
+ def constraints_match(self, link1, link2):
+ if 'attributes' not in link1 or 'attributes' not in link2:
+ return True
+ attr1 = link1['attributes']
+ attr2 = link2['attributes']
+ for a in self.constraint_attributes:
+ if a in attr1 and a in attr2 and attr1[a] != attr2[a]:
+ return False
+ return True
+
+ def links_match(self, start, dest):
+ if start['link_type'] == dest['link_type']:
+ return False # obviously we cannot make an implicit link of this
+ if start['source_id'] == dest['target_id']:
+ return False # avoid cyclic links
+ if not self.constraints_match(start, dest):
+ return False
+ return start['target_id'] == dest['source_id']
+
+ def add_matching_links(self, link, pass_no):
+ self.log.debug('looking for matches for link: {};{}'
+ .format(link['source_id'], link['target_id']))
+ matches = [l for l in self.links
+ if l['pass'] == 0 # take only original links
+ and self.links_match(link, l['link'])]
+ for l in matches:
+ implicit = self.add_implicit_link(link, l['link'])
+ self.links.append({'pass': pass_no, 'link': implicit})
+ return len(matches)
+
+ def get_link_constraint_attributes(self, link1, link2) -> dict:
+ attributes = {}
+ for a in self.constraint_attributes:
+ # constraints_match() verified the attribute values don't conflict
+ if a in link1.get('attributes', {}):
+ attributes[a] = link1['attributes'][a]
+ elif a in link2.get('attributes', {}):
+ attributes[a] = link2['attributes'][a]
+ return attributes
+
+ @staticmethod
+ def get_attr(attr, link1, link2):
+ if attr not in link1 and attr not in link2:
+ return None
+ if attr not in link1:
+ return link2[attr]
+ if attr not in link2 or link1[attr] == link2[attr]:
+ return link1[attr]
+ return None
+
+ def add_implicit_link(self, link1, link2):
+ link_type_from = link1['link_type'].split('-')[0]
+ link_type_to = link2['link_type'].split('-')[1]
+ link_type = '{}-{}'.format(link_type_from, link_type_to)
+ link_name = ''
+ state = 'down' \
+ if link1['state'] == 'down' or link2['state'] == 'down' \
+ else 'up'
+ link_weight = 0 # TBD
+ host = self.get_attr('host', link1, link2)
+ switch = self.get_attr('switch', link1, link2)
+ extra_attributes = self.get_link_constraint_attributes(link1, link2)
+ self.log.debug('adding implicit link: link type: {}, from: {}, to: {}'
+ .format(link_type,
+ link1['source_id'],
+ link2['target_id']))
+ implicit = self.create_link(self.get_env(),
+ link1['source'], link1['source_id'],
+ link2['target'], link2['target_id'],
+ link_type, link_name, state, link_weight,
+ host=host, switch=switch,
+ implicit=True,
+ extra_attributes=extra_attributes)
+ return implicit
+
+ def get_transitive_closure(self):
+ pass_no = 1
+ while True:
+ match_count = 0
+ last_pass_links = [l for l in self.links if l['pass'] == pass_no-1]
+ for l in last_pass_links:
+ match_count += self.add_matching_links(l['link'], pass_no)
+ self.log.info('Transitive closure pass #{}: '
+ 'found {} implicit links'
+ .format(pass_no, match_count))
+ if match_count == 0:
+ break
+ pass_no += 1
+ self.log.info('done adding implicit links')
diff --git a/app/discover/link_finders/find_links.py b/app/discover/link_finders/find_links.py
index d234479..31d39e5 100644
--- a/app/discover/link_finders/find_links.py
+++ b/app/discover/link_finders/find_links.py
@@ -19,6 +19,7 @@ class FindLinks(Fetcher):
def create_link(self, env, source, source_id, target, target_id,
link_type, link_name, state, link_weight,
host=None, switch=None,
+ implicit=False,
extra_attributes=None):
if extra_attributes is None:
extra_attributes = {}
@@ -27,9 +28,11 @@ class FindLinks(Fetcher):
link = self.inv.create_link(env,
source, source_id, target, target_id,
link_type, link_name, state, link_weight,
+ implicit=implicit,
source_label=source_label,
target_label=target_label,
host=host, switch=switch,
extra_attributes=extra_attributes)
if self.inv.monitoring_setup_manager:
self.inv.monitoring_setup_manager.create_setup(link)
+ return link
diff --git a/app/discover/link_finders/find_links_for_instance_vnics.py b/app/discover/link_finders/find_links_for_instance_vnics.py
index 975ab1a..1dfb818 100644
--- a/app/discover/link_finders/find_links_for_instance_vnics.py
+++ b/app/discover/link_finders/find_links_for_instance_vnics.py
@@ -49,6 +49,8 @@ class FindLinksForInstanceVnics(FindLinks):
network_id = net['network']['id']
v['network'] = network_id
self.inv.set(v)
+ if self.inv.monitoring_setup_manager:
+ self.inv.monitoring_setup_manager.create_setup(instance)
break
state = "up" # TBD
link_weight = 0 # TBD
diff --git a/app/discover/link_finders/find_links_for_vedges.py b/app/discover/link_finders/find_links_for_vedges.py
index f9719b4..afabdbe 100644
--- a/app/discover/link_finders/find_links_for_vedges.py
+++ b/app/discover/link_finders/find_links_for_vedges.py
@@ -104,8 +104,6 @@ class FindLinksForVedges(FindLinks):
if "pnic" in vedge:
if pname != vedge["pnic"]:
return
- elif self.configuration.has_network_plugin('VPP'):
- pass
pnic = self.inv.find_items({
"environment": self.get_env(),
"type": "host_pnic",
diff --git a/app/discover/scan.py b/app/discover/scan.py
index 49f37ff..fb5e833 100755
--- a/app/discover/scan.py
+++ b/app/discover/scan.py
@@ -22,6 +22,7 @@ from discover.scan_error import ScanError
from discover.scanner import Scanner
from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
from utils.constants import EnvironmentFeatures
+from utils.origins import ScanOrigin, ScanOrigins
from utils.mongo_access import MongoAccess
from utils.exceptions import ScanArgumentsError
from utils.inventory_mgr import InventoryMgr
@@ -112,6 +113,7 @@ class ScanPlan:
class ScanController(Fetcher):
DEFAULTS = {
+ "_id": None,
"env": "",
"mongo_config": "",
"type": "",
@@ -126,7 +128,8 @@ class ScanController(Fetcher):
"cliques_only": False,
"monitoring_setup_only": False,
"clear": False,
- "clear_all": False
+ "clear_all": False,
+ "scheduled": False
}
def __init__(self):
@@ -274,9 +277,13 @@ class ScanController(Fetcher):
self.conf.use_env(env_name)
# generate ScanObject Class and instance.
+ origin = ScanOrigin(origin_id=args['_id'],
+ origin_type=ScanOrigins.SCHEDULED
+ if args["scheduled"]
+ else ScanOrigins.MANUAL)
scanner = Scanner()
scanner.log.set_loglevel(args['loglevel'])
- scanner.set_env(env_name)
+ scanner.setup(env=env_name, origin=origin)
scanner.found_errors[env_name] = False
# decide what scanning operations to do
diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py
index 6c46d47..6e31bbd 100644
--- a/app/discover/scan_manager.py
+++ b/app/discover/scan_manager.py
@@ -103,7 +103,8 @@ class ScanManager(Manager):
def _build_scan_args(self, scan_request: dict):
args = {
- 'mongo_config': self.args.mongo_config
+ 'mongo_config': self.args.mongo_config,
+ 'scheduled': True if scan_request.get('interval') else False
}
def set_arg(name_from: str, name_to: str = None):
@@ -113,6 +114,7 @@ class ScanManager(Manager):
if val:
args[name_to] = val
+ set_arg("_id")
set_arg("object_id", "id")
set_arg("log_level", "loglevel")
set_arg("environment", "env")
@@ -219,71 +221,74 @@ class ScanManager(Manager):
for interval in self.INTERVALS.keys():
self._prepare_scheduled_requests_for_interval(interval)
+ def handle_scans(self):
+ self._prepare_scheduled_requests()
+
+ # Find a pending request that is waiting the longest time
+ results = self.scans_collection \
+ .find({'status': ScanStatus.PENDING.value,
+ 'submit_timestamp': {'$ne': None}}) \
+ .sort("submit_timestamp", pymongo.ASCENDING) \
+ .limit(1)
+
+ # If no scans are pending, sleep for some time
+ if results.count() == 0:
+ time.sleep(self.interval)
+ else:
+ scan_request = results[0]
+ env = scan_request.get('environment')
+ scan_feature = EnvironmentFeatures.SCANNING
+ if not self.inv.is_feature_supported(env, scan_feature):
+ self.log.error("Scanning is not supported for env '{}'"
+ .format(scan_request.get('environment')))
+ self._fail_scan(scan_request)
+ return
+
+ scan_request['start_timestamp'] = datetime.datetime.utcnow()
+ scan_request['status'] = ScanStatus.RUNNING.value
+ self._update_document(scan_request)
+
+ # Prepare scan arguments and run the scan with them
+ try:
+ scan_args = self._build_scan_args(scan_request)
+
+ self.log.info("Starting scan for '{}' environment"
+ .format(scan_args.get('env')))
+ self.log.debug("Scan arguments: {}".format(scan_args))
+ result, message = ScanController().run(scan_args)
+ except ScanArgumentsError as e:
+ self.log.error("Scan request '{id}' "
+ "has invalid arguments. "
+ "Errors:\n{errors}"
+ .format(id=scan_request['_id'],
+ errors=e))
+ self._fail_scan(scan_request)
+ except Exception as e:
+ self.log.exception(e)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ else:
+ # Check is scan returned success
+ if not result:
+ self.log.error(message)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ return
+
+ # update the status and timestamps.
+ self.log.info("Request '{}' has been scanned. ({})"
+ .format(scan_request['_id'], message))
+ end_time = datetime.datetime.utcnow()
+ scan_request['end_timestamp'] = end_time
+ self._complete_scan(scan_request, message)
+
def do_action(self):
self._clean_up()
try:
while True:
- self._prepare_scheduled_requests()
-
- # Find a pending request that is waiting the longest time
- results = self.scans_collection \
- .find({'status': ScanStatus.PENDING.value,
- 'submit_timestamp': {'$ne': None}}) \
- .sort("submit_timestamp", pymongo.ASCENDING) \
- .limit(1)
-
- # If no scans are pending, sleep for some time
- if results.count() == 0:
- time.sleep(self.interval)
- else:
- scan_request = results[0]
- env = scan_request.get('environment')
- scan_feature = EnvironmentFeatures.SCANNING
- if not self.inv.is_feature_supported(env, scan_feature):
- self.log.error("Scanning is not supported for env '{}'"
- .format(scan_request.get('environment')))
- self._fail_scan(scan_request)
- continue
-
- scan_request['start_timestamp'] = datetime.datetime.utcnow()
- scan_request['status'] = ScanStatus.RUNNING.value
- self._update_document(scan_request)
-
- # Prepare scan arguments and run the scan with them
- try:
- scan_args = self._build_scan_args(scan_request)
-
- self.log.info("Starting scan for '{}' environment"
- .format(scan_args.get('env')))
- self.log.debug("Scan arguments: {}".format(scan_args))
- result, message = ScanController().run(scan_args)
- except ScanArgumentsError as e:
- self.log.error("Scan request '{id}' "
- "has invalid arguments. "
- "Errors:\n{errors}"
- .format(id=scan_request['_id'],
- errors=e))
- self._fail_scan(scan_request)
- except Exception as e:
- self.log.exception(e)
- self.log.error("Scan request '{}' has failed."
- .format(scan_request['_id']))
- self._fail_scan(scan_request)
- else:
- # Check is scan returned success
- if not result:
- self.log.error(message)
- self.log.error("Scan request '{}' has failed."
- .format(scan_request['_id']))
- self._fail_scan(scan_request)
- continue
-
- # update the status and timestamps.
- self.log.info("Request '{}' has been scanned. ({})"
- .format(scan_request['_id'], message))
- end_time = datetime.datetime.utcnow()
- scan_request['end_timestamp'] = end_time
- self._complete_scan(scan_request, message)
+ self.handle_scans()
finally:
self._clean_up()
diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py
index df27e18..8757f79 100644
--- a/app/discover/scan_metadata_parser.py
+++ b/app/discover/scan_metadata_parser.py
@@ -49,21 +49,28 @@ class ScanMetadataParser(MetadataParser):
self.add_error('missing or empty fetcher in scanner {} type #{}'
.format(scanner_name, str(type_index)))
elif isinstance(fetcher, str):
+ error_str = None
try:
- module_name = ClassResolver.get_module_file_by_class_name(fetcher)
+ get_module = ClassResolver.get_module_file_by_class_name
+ module_name = get_module(fetcher)
fetcher_package = module_name.split("_")[0]
if package:
fetcher_package = ".".join((package, fetcher_package))
- instance = ClassResolver.get_instance_of_class(package_name=fetcher_package,
- module_name=module_name,
- class_name=fetcher)
- except ValueError:
- instance = None
- if not instance:
+ # get the fetcher qualified class but not a class instance
+ # instances will be created just-in-time (before fetching):
+ # this avoids init of access classes not needed in some envs
+ get_class = ClassResolver.get_fully_qualified_class
+ class_qualified = get_class(fetcher, fetcher_package,
+ module_name)
+ except ValueError as e:
+ class_qualified = None
+ error_str = str(e)
+ if not class_qualified:
self.add_error('failed to find fetcher class {} in scanner {}'
- ' type #{}'
- .format(fetcher, scanner_name, type_index))
- scan_type[self.FETCHER] = instance
+ ' type #{} ({})'
+ .format(fetcher, scanner_name, type_index,
+ error_str))
+ scan_type[self.FETCHER] = class_qualified
elif isinstance(fetcher, dict):
is_folder = fetcher.get('folder', False)
if not is_folder:
@@ -81,7 +88,6 @@ class ScanMetadataParser(MetadataParser):
def validate_children_scanner(self, scanner_name: str, type_index: int,
scanners: dict, scan_type: dict):
- scanner = scanners[scanner_name]
if 'children_scanner' in scan_type:
children_scanner = scan_type.get('children_scanner')
if not isinstance(children_scanner, str):
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
index 1fbcc68..8d36baf 100644
--- a/app/discover/scanner.py
+++ b/app/discover/scanner.py
@@ -10,6 +10,7 @@
# base class for scanners
import json
+
import os
import queue
import traceback
@@ -26,6 +27,7 @@ from utils.ssh_connection import SshError
class Scanner(Fetcher):
+
config = None
environment = None
env = None
@@ -82,27 +84,42 @@ class Scanner(Fetcher):
def check_type_env(self, type_to_fetch):
# check if type is to be run in this environment
- if "environment_condition" not in type_to_fetch:
- return True
- env_cond = type_to_fetch.get("environment_condition", {})
+ basic_cond = {'environment_type': self.ENV_TYPE_OPENSTACK}
+ env_cond = type_to_fetch.get("environment_condition", {}) \
+ if "environment_condition" in type_to_fetch \
+ else basic_cond
if not env_cond:
- return True
+ env_cond = basic_cond
+ if 'environment_type' not in env_cond.keys():
+ env_cond.update(basic_cond)
if not isinstance(env_cond, dict):
- self.log.warn('illegal environment_condition given '
- 'for type {}'.format(type_to_fetch['type']))
+ self.log.warn('Illegal environment_condition given '
+ 'for type {type}'.format(type=type_to_fetch['type']))
return True
conf = self.config.get_env_config()
+ if 'environment_type' not in conf:
+ conf.update(basic_cond)
for attr, required_val in env_cond.items():
if attr == "mechanism_drivers":
if "mechanism_drivers" not in conf:
- self.log.warn('illegal environment configuration: '
+ self.log.warn('Illegal environment configuration: '
'missing mechanism_drivers')
return False
if not isinstance(required_val, list):
required_val = [required_val]
- return bool(set(required_val) & set(conf["mechanism_drivers"]))
- elif attr not in conf or conf[attr] != required_val:
+ value_ok = bool(set(required_val) &
+ set(conf["mechanism_drivers"]))
+ if not value_ok:
+ return False
+ elif attr not in conf:
return False
+ else:
+ if isinstance(required_val, list):
+ if conf[attr] not in required_val:
+ return False
+ else:
+ if conf[attr] != required_val:
+ return False
# no check failed
return True
@@ -120,18 +137,23 @@ class Scanner(Fetcher):
# get Fetcher instance
fetcher = type_to_fetch["fetcher"]
- fetcher.set_env(self.get_env())
+ if not isinstance(fetcher, Fetcher):
+ type_to_fetch['fetcher'] = fetcher() # make it an instance
+ fetcher = type_to_fetch["fetcher"]
+ fetcher.setup(env=self.get_env(), origin=self.origin)
# get children_scanner instance
children_scanner = type_to_fetch.get("children_scanner")
escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
self.log.info(
- "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
- type_to_fetch["type"],
- parent.get('type', 'environment'),
- parent.get('name', ''),
- escaped_id)
+ "Scanning: type={type}, "
+ "parent: (type={parent_type}, "
+ "name={parent_name}, "
+ "id={parent_id})".format(type=type_to_fetch["type"],
+ parent_type=parent.get('type', 'environment'),
+ parent_name=parent.get('name', ''),
+ parent_id=escaped_id))
# fetch OpenStack data from environment by CLI, API or MySQL
# or physical devices data from ACI API
@@ -142,18 +164,21 @@ class Scanner(Fetcher):
self.found_errors[self.get_env()] = True
return []
except Exception as e:
- self.log.error("Error while scanning : " +
- "fetcher=%s, " +
- "type=%s, " +
- "parent: (type=%s, name=%s, id=%s), " +
- "error: %s",
- fetcher.__class__.__name__,
- type_to_fetch["type"],
- "environment" if "type" not in parent
- else parent["type"],
- "" if "name" not in parent else parent["name"],
- escaped_id,
- e)
+ self.log.error(
+ "Error while scanning: fetcher={fetcher}, type={type}, "
+ "parent: (type={parent_type}, name={parent_name}, "
+ "id={parent_id}), "
+ "error: {error}".format(fetcher=fetcher.__class__.__name__,
+ type=type_to_fetch["type"],
+ parent_type="environment"
+ if "type" not in parent
+ else parent["type"],
+ parent_name=""
+ if "name" not in parent
+ else parent["name"],
+ parent_id=escaped_id,
+ error=e))
+
traceback.print_exc()
raise ScanError(str(e))
@@ -220,14 +245,16 @@ class Scanner(Fetcher):
self.log.info("Scan complete")
def scan_links(self):
- self.log.info("scanning for links")
+ self.log.info("Scanning for links")
for fetcher in self.link_finders:
- fetcher.set_env(self.get_env())
+ fetcher.setup(env=self.get_env(),
+ origin=self.origin)
fetcher.add_links()
def scan_cliques(self):
clique_scanner = CliqueFinder()
- clique_scanner.set_env(self.get_env())
+ clique_scanner.setup(env=self.get_env(),
+ origin=self.origin)
clique_scanner.find_cliques()
def deploy_monitoring_setup(self):
@@ -254,7 +281,6 @@ class Scanner(Fetcher):
def load_link_finders_metadata(self):
parser = FindLinksMetadataParser()
- conf = self.config.get_env_config()
finders_file = os.path.join(self.get_run_app_path(),
'config',
FindLinksMetadataParser.FINDERS_FILE)