aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--INFO2
-rw-r--r--INFO.yaml47
-rw-r--r--LICENSE2
-rw-r--r--README.md2
-rw-r--r--app/api/responders/resource/clique_types.py140
-rw-r--r--app/api/responders/resource/environment_configs.py76
-rw-r--r--app/api/responders/responder_base.py6
-rw-r--r--app/api/validation/data_validate.py4
-rw-r--r--app/config/link_finders.json3
-rw-r--r--app/config/scanners.json25
-rw-r--r--app/connection_test/connection_test.py283
-rw-r--r--app/discover/clique_finder.py192
-rw-r--r--app/discover/configuration.py4
-rw-r--r--app/discover/event_manager.py4
-rw-r--r--app/discover/events/event_base.py3
-rw-r--r--app/discover/events/event_instance_add.py2
-rw-r--r--app/discover/events/event_interface_add.py6
-rw-r--r--app/discover/events/event_port_add.py8
-rw-r--r--app/discover/events/event_port_delete.py2
-rw-r--r--app/discover/events/event_router_add.py4
-rw-r--r--app/discover/events/event_router_update.py4
-rw-r--r--app/discover/events/event_subnet_add.py6
-rw-r--r--app/discover/events/event_subnet_update.py6
-rw-r--r--app/discover/fetcher.py51
-rw-r--r--app/discover/fetchers/api/api_access.py61
-rw-r--r--app/discover/fetchers/api/api_fetch_availability_zones.py11
-rw-r--r--app/discover/fetchers/api/api_fetch_host_instances.py2
-rw-r--r--app/discover/fetchers/api/api_fetch_network.py18
-rw-r--r--app/discover/fetchers/api/api_fetch_networks.py15
-rw-r--r--app/discover/fetchers/api/api_fetch_port.py8
-rw-r--r--app/discover/fetchers/api/api_fetch_ports.py8
-rw-r--r--app/discover/fetchers/api/api_fetch_project_hosts.py44
-rw-r--r--app/discover/fetchers/api/api_fetch_regions.py2
-rw-r--r--app/discover/fetchers/cli/cli_access.py15
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py7
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py54
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py10
-rw-r--r--app/discover/fetchers/db/db_access.py29
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py2
-rw-r--r--app/discover/fetchers/kube/__init__.py9
-rw-r--r--app/discover/fetchers/kube/kube_access.py28
-rw-r--r--app/discover/fetchers/kube/kube_fetch_namespaces.py32
-rw-r--r--app/discover/link_finders/find_implicit_links.py128
-rw-r--r--app/discover/link_finders/find_links.py3
-rw-r--r--app/discover/link_finders/find_links_for_instance_vnics.py2
-rw-r--r--app/discover/link_finders/find_links_for_vedges.py2
-rwxr-xr-xapp/discover/scan.py11
-rw-r--r--app/discover/scan_manager.py129
-rw-r--r--app/discover/scan_metadata_parser.py28
-rw-r--r--app/discover/scanner.py88
-rw-r--r--app/install/calipso-installer.py92
-rw-r--r--app/install/db/clique_types.json26
-rw-r--r--app/install/db/constants.json33
-rw-r--r--app/install/db/environments_config.json94
-rw-r--r--app/install/db/monitoring_config_templates.json52
-rw-r--r--app/install/db/supported_environments.json15
-rw-r--r--app/messages/message.py43
-rw-r--r--app/monitoring/checks/check_instance_communictions.py85
-rwxr-xr-xapp/monitoring/checks/check_ping.py2
-rwxr-xr-xapp/monitoring/checks/check_pnic_ovs.py4
-rwxr-xr-xapp/monitoring/checks/check_pnic_vpp.py2
-rw-r--r--app/monitoring/checks/check_vconnector.py50
-rwxr-xr-xapp/monitoring/checks/check_vedge_ovs.py2
-rwxr-xr-xapp/monitoring/checks/check_vedge_vpp.py2
-rwxr-xr-xapp/monitoring/checks/check_vnic_vconnector.py2
-rwxr-xr-xapp/monitoring/checks/check_vnic_vpp.py2
-rw-r--r--app/monitoring/checks/check_vservice.py2
-rw-r--r--app/monitoring/handlers/handle_otep.py2
-rw-r--r--app/monitoring/handlers/handle_vconnector.py28
-rwxr-xr-xapp/monitoring/handlers/monitor.py83
-rw-r--r--app/monitoring/handlers/monitoring_check_handler.py36
-rw-r--r--app/monitoring/setup/monitoring_check_handler.py15
-rw-r--r--app/monitoring/setup/monitoring_handler.py27
-rw-r--r--app/monitoring/setup/monitoring_host.py11
-rw-r--r--app/monitoring/setup/monitoring_instance.py67
-rw-r--r--app/monitoring/setup/monitoring_setup_manager.py4
-rw-r--r--app/monitoring/setup/monitoring_vconnector.py24
-rw-r--r--app/monitoring/setup/sensu_client_installer.py158
-rw-r--r--app/test/api/responders_test/resource/test_clique_types.py108
-rw-r--r--app/test/api/responders_test/resource/test_environment_configs.py311
-rw-r--r--app/test/api/responders_test/test_data/base.py71
-rw-r--r--app/test/api/responders_test/test_data/clique_types.py201
-rw-r--r--app/test/api/responders_test/test_data/environment_configs.py6
-rw-r--r--app/test/api/test_base.py28
-rw-r--r--app/test/fetch/api_fetch/test_api_access.py73
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_project_hosts.py43
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py21
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_networks.py1
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_ports.py3
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_oteps.py2
-rw-r--r--app/test/fetch/link_finders/__init__.py9
-rw-r--r--app/test/fetch/link_finders/test_data/__init__.py9
-rw-r--r--app/test/fetch/link_finders/test_data/test_find_implicit_links.py303
-rw-r--r--app/test/fetch/link_finders/test_find_implicit_links.py107
-rw-r--r--app/test/scan/test_data/configurations.py1
-rw-r--r--app/test/scan/test_data/scanner.py76
-rw-r--r--app/test/scan/test_scan_metadata_parser.py2
-rw-r--r--app/test/scan/test_scanner.py27
-rwxr-xr-xapp/test/verify.sh17
-rw-r--r--app/utils/api_access_base.py51
-rw-r--r--app/utils/dict_naming_converter.py35
-rw-r--r--app/utils/inventory_mgr.py89
-rw-r--r--app/utils/logging/console_logger.py1
-rw-r--r--app/utils/logging/full_logger.py44
-rw-r--r--app/utils/logging/logger.py7
-rw-r--r--app/utils/logging/message_logger.py14
-rw-r--r--app/utils/logging/mongo_logging_handler.py20
-rw-r--r--app/utils/mongo_access.py6
-rw-r--r--app/utils/ssh_connection.py6
-rw-r--r--app/utils/util.py22
-rwxr-xr-xcalipso/tests/functest/smoke_test.py2
-rwxr-xr-xdocs/_static/favicon.icobin15086 -> 0 bytes
-rw-r--r--docs/_static/my-styles.css33
-rw-r--r--docs/_static/opnfv-logo.pngbin2829 -> 0 bytes
-rw-r--r--docs/conf.py283
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/development/apex-scenario-guide.rst (renamed from docs/release/scenarios/os-nosdn-calipso-noha/apex-scenario-guide.rst)0
-rw-r--r--docs/index.rst2
-rw-r--r--docs/release/Calipso-usage-stories.rst446
-rw-r--r--docs/release/apex-scenario-guide.rst282
-rw-r--r--docs/release/developer-guide.pdfbin0 -> 252310 bytes
-rw-r--r--docs/release/developer-guide.rst1338
-rw-r--r--docs/release/install-guide.pdfbin151053 -> 152041 bytes
-rw-r--r--docs/release/install-guide.rst189
-rw-r--r--docs/release/media/image101.pngbin0 -> 119090 bytes
-rw-r--r--docs/release/media/image102.pngbin0 -> 104849 bytes
-rw-r--r--docs/release/media/image103.pngbin0 -> 10664 bytes
-rw-r--r--docs/release/media/image104.pngbin0 -> 37854 bytes
-rw-r--r--docs/release/media/image105.pngbin0 -> 23555 bytes
-rw-r--r--docs/release/media/image106.pngbin0 -> 58686 bytes
-rw-r--r--docs/release/media/image107.pngbin0 -> 89583 bytes
-rw-r--r--docs/release/monitoring-guide.pdfbin259887 -> 260593 bytes
-rw-r--r--docs/release/monitoring-guide.rst16
-rw-r--r--docs/release/scenarios/os-nosdn-calipso-noha/index.rst15
-rw-r--r--docs/requirements.txt2
-rw-r--r--tox.ini6
137 files changed, 5323 insertions, 1601 deletions
diff --git a/.gitignore b/.gitignore
index 0b8f528..9dd102d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,5 @@
mongo*.conf
mongo_access.log
.DS_Store
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
index 83fdb69..3a83585 100644
--- a/INFO
+++ b/INFO
@@ -1,4 +1,4 @@
-Project: Virtual Infrastructure Network Assurance (Calipso)
+Project: Calipso - Virtual Infrastructure Networking Assurance
Project Creation Date: March 21st 2017
Project Category:
Lifecycle State:
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..84b06c8
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,47 @@
+---
+project: 'Virtual Infrastructure Network Assurance (Calipso)'
+project_creation_date: 'March 21st 2017'
+project_category: ''
+lifecycle_state: ''
+project_lead: &opnfv_calipso_ptl
+ name: 'Koren Lev'
+ email: 'korenlev@gmail.com'
+ company: 'gmail.com'
+ id: 'KorenLev'
+primary_contact: *opnfv_calipso_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/CALIPSO'
+ key: 'CALIPSO'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[calipso]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-calipso'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'calipso'
+committers:
+ - <<: *opnfv_calipso_ptl
+ - name: 'Frank Brockners'
+ email: 'fbrockne@cisco.com'
+ company: 'cisco.com'
+ id: 'brockners'
+ - name: 'Yaron Yogev'
+ email: 'yaronyogev@gmail.com'
+ company: 'gmail.com'
+ id: 'yaronyogev'
+tsc:
+ # yamllint disable rule:line-length
+ approval: ''
+ # yamllint enable rule:line-length
diff --git a/LICENSE b/LICENSE
index 9455af2..7f9c19c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
+Copyright (c) 2018 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
and others
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/README.md b/README.md
index 9336f86..7d39caa 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
+Copyright (c) 2018 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
and others
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/app/api/responders/resource/clique_types.py b/app/api/responders/resource/clique_types.py
index ff42f8c..e2e9e71 100644
--- a/app/api/responders/resource/clique_types.py
+++ b/app/api/responders/resource/clique_types.py
@@ -21,31 +21,53 @@ class CliqueTypes(ResponderBase):
"focal_point_type": True,
"link_types": True,
"environment": True,
- "name": True
+ "name": True,
+ "distribution": True,
+ "distribution_version": True,
+ "mechanism_drivers": True,
+ "type_drivers": True,
+ "use_implicit_links": True
}
RESERVED_NAMES = ["ANY"]
+ def __init__(self):
+ super().__init__()
+ self.focal_point_types = self.get_constants_by_name("object_types")
+ self.link_types = self.get_constants_by_name("link_types")
+ self.mechanism_drivers = self.get_constants_by_name("mechanism_drivers")
+ self.type_drivers = self.get_constants_by_name("type_drivers")
+
def on_get(self, req, resp):
self.log.debug("Getting clique types")
filters = self.parse_query_params(req)
- focal_point_types = self.get_constants_by_name("object_types")
- link_types = self.get_constants_by_name("link_types")
filters_requirements = {
- 'env_name': self.require(str, mandatory=True),
+ 'env_name': self.require(str),
'id': self.require(ObjectId, convert_to_type=True),
+ 'distribution': self.require(str),
+ 'distribution_version': self.require(str),
+ 'mechanism_drivers': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.mechanism_drivers),
+ 'type_drivers': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.type_drivers),
'focal_point_type': self.require(str,
validate=DataValidate.LIST,
- requirement=focal_point_types),
+ requirement=self.focal_point_types),
'link_type': self.require([list, str],
validate=DataValidate.LIST,
- requirement=link_types),
+ requirement=self.link_types),
'name': self.require(str),
'page': self.require(int, convert_to_type=True),
'page_size': self.require(int, convert_to_type=True)
}
self.validate_query_data(filters, filters_requirements)
+ if 'distribution_version' in filters and 'distribution' not in filters:
+ self.bad_request("Distribution version without distribution "
+ "is not allowed")
+
page, page_size = self.get_pagination(filters)
query = self.build_query(filters)
if self.ID in query:
@@ -64,40 +86,46 @@ class CliqueTypes(ResponderBase):
error, clique_type = self.get_content_from_request(req)
if error:
self.bad_request(error)
- focal_point_types = self.get_constants_by_name("object_types")
- link_types = self.get_constants_by_name("link_types")
+
clique_type_requirements = {
- 'environment': self.require(str, mandatory=True),
+ 'environment': self.require(str),
'focal_point_type': self.require(str,
mandatory=True,
validate=DataValidate.LIST,
- requirement=focal_point_types),
+ requirement=self.focal_point_types),
'link_types': self.require(list,
mandatory=True,
validate=DataValidate.LIST,
- requirement=link_types),
- 'name': self.require(str, mandatory=True)
+ requirement=self.link_types),
+ 'name': self.require(str, mandatory=True),
+ 'distribution': self.require(str),
+ 'distribution_version': self.require(str),
+ 'mechanism_drivers': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.mechanism_drivers),
+ 'type_drivers': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.type_drivers),
+ 'use_implicit_links': self.require(bool)
}
self.validate_query_data(clique_type, clique_type_requirements)
-
- env_name = clique_type['environment']
- if not self.check_environment_name(env_name):
- self.bad_request("Unknown environment: {}".format(env_name))
- elif env_name.upper() in self.RESERVED_NAMES:
- self.bad_request("Environment name '{}' is reserved".format(env_name))
+ self.validate_required_fields(clique_type)
+ self.validate_focal_point_type(clique_type)
+ self.validate_duplicate_configuration(clique_type)
self.write(clique_type, self.COLLECTION)
self.set_successful_response(resp,
- {"message": "created a new clique_type "
- "for environment {0}"
- .format(env_name)},
+ {"message": "created a new clique_type"},
"201")
def build_query(self, filters):
query = {}
- filters_keys = ['name', 'focal_point_type']
+ filters_keys = ['name', 'focal_point_type',
+ 'distribution', 'distribution_version',
+ 'mechanism_drivers', 'type_drivers']
self.update_query_with_filters(filters, filters_keys, query)
+
link_types = filters.get('link_type')
if link_types:
if type(link_types) != list:
@@ -107,5 +135,71 @@ class CliqueTypes(ResponderBase):
if _id:
query[self.ID] = _id
- query['environment'] = filters['env_name']
+ env_name = filters.get('env_name')
+ if env_name:
+ query['environment'] = filters['env_name']
return query
+
+ def validate_required_fields(self, clique_type):
+ env_name = clique_type.get('environment')
+ distribution = clique_type.get('distribution')
+ distribution_version = clique_type.get('distribution_version')
+ if distribution_version and not distribution:
+ self.bad_request("Distribution version without distribution "
+ "is not allowed")
+
+ configuration_specified = ((distribution and distribution_version)
+ or clique_type.get('mechanism_drivers')
+ or clique_type.get('type_drivers'))
+ if env_name:
+ if configuration_specified:
+ self.bad_request("Either environment or configuration "
+ "should be specified (not both).")
+
+ if not self.check_environment_name(env_name):
+ self.bad_request("Unknown environment: {}".format(env_name))
+ elif env_name.upper() in self.RESERVED_NAMES:
+ self.bad_request(
+ "Environment name '{}' is reserved".format(env_name))
+ elif not configuration_specified:
+ self.bad_request("Either environment or configuration "
+ "should be specified.")
+
+ def validate_focal_point_type(self, clique_type):
+ focal_point_type = clique_type['focal_point_type']
+ environment = clique_type.get('environment')
+ if environment:
+ env_match = self.read(
+ matches={"environment": environment,
+ "focal_point_type": focal_point_type},
+ collection="clique_types"
+ )
+ if env_match:
+ self.bad_request("Clique type with focal point {} "
+ "is already registered for environment {}"
+ .format(focal_point_type, environment))
+ else:
+ pass
+
+ def validate_duplicate_configuration(self, clique_type):
+ if clique_type.get('environment'):
+ return
+
+ search = {'focal_point_type': clique_type['focal_point_type']}
+ for field in ['distribution', 'mechanism_drivers', 'type_drivers']:
+ value = clique_type.get(field)
+ if value:
+ search[field] = value
+ if field == 'distribution':
+ dv = clique_type.get('distribution_version')
+ if dv:
+ search['distribution_version'] = dv
+ # Got a match with higher score, no need to look further
+ break
+
+ env_match = self.read(matches=search,
+ collection="clique_types")
+ if env_match:
+ self.bad_request("Clique type with configuration '{}' "
+ "is already registered"
+ .format(search))
diff --git a/app/api/responders/resource/environment_configs.py b/app/api/responders/resource/environment_configs.py
index c24aec8..76cc8a9 100644
--- a/app/api/responders/resource/environment_configs.py
+++ b/app/api/responders/resource/environment_configs.py
@@ -13,7 +13,6 @@ from api.responders.responder_base import ResponderBase
from bson.objectid import ObjectId
from datetime import datetime
from utils.constants import EnvironmentFeatures
-from utils.inventory_mgr import InventoryMgr
class EnvironmentConfigs(ResponderBase):
@@ -27,9 +26,13 @@ class EnvironmentConfigs(ResponderBase):
"distribution": True
}
CONFIGURATIONS_NAMES = ["mysql", "OpenStack", "CLI", "AMQP",
- "Monitoring", "NFV_provider", "ACI"]
- OPTIONAL_CONFIGURATIONS_NAMES = ["AMQP", "Monitoring",
- "NFV_provider", "ACI"]
+ "Monitoring", "NFV_provider", "ACI",
+ "Kubernetes", "VMware", "Bare-metal"]
+ REQUIRED_CONFIGURATIONS_NAMES = {
+ "OpenStack": ["OpenStack", "mysql", "CLI"],
+ "Kubernetes": ["Kubernetes", "CLI"],
+ }
+ DEFAULT_ENV_TYPE = "OpenStack"
def __init__(self):
super().__init__()
@@ -49,6 +52,8 @@ class EnvironmentConfigs(ResponderBase):
get_constants_by_name("environment_operational_status")
self.type_drivers = self.\
get_constants_by_name("type_drivers")
+ self.environment_types = self.\
+ get_constants_by_name("environment_types")
self.CONFIGURATIONS_REQUIREMENTS = {
"mysql": {
@@ -108,6 +113,7 @@ class EnvironmentConfigs(ResponderBase):
},
"Monitoring": {
"name": self.require(str, mandatory=True),
+ "install_monitoring_client": self.require(bool),
"config_folder": self.require(str,
mandatory=True,
validate=DataValidate.REGEX,
@@ -169,6 +175,20 @@ class EnvironmentConfigs(ResponderBase):
requirement=[regex.IP, regex.HOSTNAME]),
"user": self.require(str, mandatory=True),
"pwd": self.require(str, mandatory=True)
+ },
+ "Kubernetes": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ mandatory=True,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME]),
+ "port": self.require(int,
+ mandatory=True,
+ convert_to_type=True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT),
+ "user": self.require(str, mandatory=True),
+ "token": self.require(str, mandatory=True)
}
}
self.AUTH_REQUIREMENTS = {
@@ -201,6 +221,9 @@ class EnvironmentConfigs(ResponderBase):
"operational": self.require(str,
validate=DataValidate.LIST,
requirement=self.operational_values),
+ "environment_type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.environment_types),
"page": self.require(int, convert_to_type=True),
"page_size": self.require(int, convert_to_type=True)
}
@@ -223,7 +246,8 @@ class EnvironmentConfigs(ResponderBase):
query = {}
filters_keys = ["name", "distribution", "distribution_version",
"type_drivers", "user", "listen",
- "monitoring_setup_done", "scanned", "operational"]
+ "monitoring_setup_done", "scanned", "operational",
+ "environment_type"]
self.update_query_with_filters(filters, filters_keys, query)
mechanism_drivers = filters.get("mechanism_drivers")
if mechanism_drivers:
@@ -272,16 +296,26 @@ class EnvironmentConfigs(ResponderBase):
"enable_monitoring": self.require(bool, convert_to_type=True),
"monitoring_setup_done": self.require(bool, convert_to_type=True),
"auth": self.require(dict),
- "aci_enabled": self.require(bool, convert_to_type=True)
+ "aci_enabled": self.require(bool, convert_to_type=True),
+ "environment_type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.environment_types),
}
self.validate_query_data(env_config,
environment_config_requirement,
- can_be_empty_keys=["last_scanned"]
- )
+ can_be_empty_keys=["last_scanned",
+ "environment_type"])
self.check_and_convert_datetime("last_scanned", env_config)
+
# validate the configurations
+ environment_type = env_config.get("environment_type")
+ if not environment_type:
+ environment_type = self.DEFAULT_ENV_TYPE
configurations = env_config['configuration']
- config_validation = self.validate_environment_config(configurations)
+ config_validation = (
+ self.validate_environment_config(configurations=configurations,
+ environment_type=environment_type)
+ )
if not config_validation['passed']:
self.bad_request(config_validation['error_message'])
@@ -310,12 +344,11 @@ class EnvironmentConfigs(ResponderBase):
.format(env_config["name"])},
"201")
- def validate_environment_config(self, configurations,
+ def validate_environment_config(self, configurations, environment_type=None,
require_mandatory=True):
configurations_of_names = {}
validation = {"passed": True}
- if [config for config in configurations
- if 'name' not in config]:
+ if any('name' not in config for config in configurations):
validation['passed'] = False
validation['error_message'] = "configuration must have name"
return validation
@@ -338,12 +371,19 @@ class EnvironmentConfigs(ResponderBase):
"configuration for {0}".format(name)
return validation
configurations_of_names[name] = configs[0]
- elif require_mandatory:
- if name not in self.OPTIONAL_CONFIGURATIONS_NAMES:
- validation["passed"] = False
- validation['error_message'] = "configuration for {0} " \
- "is mandatory".format(name)
- return validation
+
+ if require_mandatory:
+ required_list = (
+ self.REQUIRED_CONFIGURATIONS_NAMES.get(environment_type, [])
+ )
+ if any(required_conf not in configurations_of_names
+ for required_conf
+ in required_list):
+ validation["passed"] = False
+ validation['error_message'] = ("configurations for ({})"
+ "are mandatory for "
+ "this environment type"
+ .format(", ".join(required_list)))
for name, config in configurations_of_names.items():
error_message = self.validate_configuration(name, config)
diff --git a/app/api/responders/responder_base.py b/app/api/responders/responder_base.py
index e59f4cf..0ac08d6 100644
--- a/app/api/responders/responder_base.py
+++ b/app/api/responders/responder_base.py
@@ -71,7 +71,7 @@ class ResponderBase(DataValidate, DictNamingConverter):
def validate_query_data(self, data, data_requirements,
additional_key_reg=None,
- can_be_empty_keys=[]):
+ can_be_empty_keys=None):
error_message = self.validate_data(data, data_requirements,
additional_key_reg,
can_be_empty_keys)
@@ -197,7 +197,9 @@ class ResponderBase(DataValidate, DictNamingConverter):
': no "value" key for data: ' + str(d))
return consts
- def read(self, collection, matches={}, projection=None, skip=0, limit=1000):
+ def read(self, collection, matches=None, projection=None, skip=0, limit=1000):
+ if matches is None:
+ matches = {}
collection = self.get_collection_by_name(collection)
skip *= limit
query = collection.find(matches, projection).skip(skip).limit(limit)
diff --git a/app/api/validation/data_validate.py b/app/api/validation/data_validate.py
index 6928c4b..4dfb214 100644
--- a/app/api/validation/data_validate.py
+++ b/app/api/validation/data_validate.py
@@ -75,7 +75,9 @@ class DataValidate:
def validate_data(self, data, requirements,
additional_key_re=None,
- can_be_empty_keys=[]):
+ can_be_empty_keys=None):
+ if can_be_empty_keys is None:
+ can_be_empty_keys = []
illegal_keys = [key for key in data.keys()
if key not in requirements.keys()]
diff --git a/app/config/link_finders.json b/app/config/link_finders.json
index 55c31f6..b421ee9 100644
--- a/app/config/link_finders.json
+++ b/app/config/link_finders.json
@@ -7,6 +7,7 @@
"FindLinksForVconnectors",
"FindLinksForVedges",
"FindLinksForVserviceVnics",
- "FindLinksForPnics"
+ "FindLinksForPnics",
+ "FindImplicitLinks"
]
} \ No newline at end of file
diff --git a/app/config/scanners.json b/app/config/scanners.json
index c5efb06..a96029a 100644
--- a/app/config/scanners.json
+++ b/app/config/scanners.json
@@ -36,7 +36,8 @@
"types_name": "regions",
"parent_type": "environment"
},
- "children_scanner": "ScanRegionsRoot"
+ "children_scanner": "ScanRegionsRoot",
+ "environment_condition": {"environment_type": "OpenStack"}
},
{
"type": "projects_folder",
@@ -45,7 +46,20 @@
"types_name": "projects",
"parent_type": "environment"
},
- "children_scanner": "ScanProjectsRoot"
+ "children_scanner": "ScanProjectsRoot",
+ "environment_condition": {"environment_type": "OpenStack"}
+ },
+ {
+ "type": "namespaces_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "namespaces",
+ "parent_type": "environment"
+ },
+ "children_scanner": "ScanNamespacesRoot",
+ "environment_condition": {
+ "environment_type": "Kubernetes"
+ }
}
],
"ScanHostNetworkAgentsRoot": [
@@ -377,6 +391,13 @@
"type": "vservice",
"fetcher": "CliFetchHostVservices"
}
+ ],
+ "ScanNamespacesRoot": [
+ {
+ "type": "namespace",
+ "fetcher": "KubeFetchNamespaces",
+ "environment_condition": {"environment_type": "Kubernetes"}
+ }
]
}
}
diff --git a/app/connection_test/connection_test.py b/app/connection_test/connection_test.py
deleted file mode 100644
index d9d6af7..0000000
--- a/app/connection_test/connection_test.py
+++ /dev/null
@@ -1,283 +0,0 @@
-###############################################################################
-# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
-# and others #
-# #
-# All rights reserved. This program and the accompanying materials #
-# are made available under the terms of the Apache License, Version 2.0 #
-# which accompanies this distribution, and is available at #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-###############################################################################
-import argparse
-import datetime
-from kombu import Connection
-
-import time
-
-import pymongo
-from functools import partial
-
-from discover.fetchers.api.api_access import ApiAccess
-from discover.fetchers.db.db_access import DbAccess
-from discover.manager import Manager
-from utils.constants import ConnectionTestStatus, ConnectionTestType
-from utils.logging.file_logger import FileLogger
-from utils.mongo_access import MongoAccess
-from utils.ssh_connection import *
-
-
-def test_openstack(config, test_request):
- try:
- api = ApiAccess(config)
- ConnectionTest.report_success(test_request,
- ConnectionTestType.OPENSTACK.value)
- if api:
- pass
- except ValueError:
- pass
-
-
-def test_mysql(config, test_request):
- db_access = DbAccess(config)
- ConnectionTest.report_success(test_request, ConnectionTestType.MYSQL.value)
- if db_access:
- pass
-
-
-def test_ssh_connect(config) -> bool:
- ssh = SshConnection(config.get('host', ''),
- config.get('user', ''),
- _pwd=config.get('pwd'),
- _key=config.get('key'),
- _port=int(config.get('port',
- SshConnection.DEFAULT_PORT)))
- ret = ssh.connect()
- return ret
-
-
-def test_cli(config, test_request):
- ret = test_ssh_connect(config)
- ConnectionTest.set_test_result(test_request,
- ConnectionTestType.CLI.value,
- ret)
-
-
-def test_amqp_connect(config):
- connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
- .format(user=config.get("user", ''),
- pwd=config.get('pwd', ''),
- host=config.get('host', ''),
- port=int(config.get('port', 5671)))
- conn = Connection(connect_url)
- conn.connect()
-
-
-def test_amqp(config, test_request):
- test_amqp_connect(config)
- ConnectionTest.report_success(test_request, ConnectionTestType.AMQP.value)
-
-
-def test_monitoring(config, test_request):
- # for monitoring configuration test, need to test:
- # 1. SSH access
- # 2. RabbitMQ access
- ssh_config = {
- 'host': config.get('server_ip'),
- 'user': config.get('ssh_user'),
- 'pwd': config.get('ssh_password'),
- 'port': int(config.get('ssh_port', 0))
- }
- if not test_ssh_connect(ssh_config):
- return
- amqp_connect_config = {
- 'user': config.get('rabbitmq_user', ''),
- 'pwd': config.get('rabbitmq_pass', ''),
- 'host': config.get('server_ip'),
- 'port': int(config.get('rabbitmq_port', 5672)),
- }
- test_amqp_connect(amqp_connect_config)
- ConnectionTest.report_success(test_request, ConnectionTestType.AMQP.value)
-
-
-def test_aci(config, test_request):
- pass
-
-
-TEST_HANDLERS = {
- ConnectionTestType.OPENSTACK.value: test_openstack,
- ConnectionTestType.MYSQL.value: test_mysql,
- ConnectionTestType.CLI.value: test_cli,
- ConnectionTestType.AMQP.value: test_amqp,
- ConnectionTestType.ACI.value: test_aci,
- ConnectionTestType.MONITORING.value: test_monitoring
-}
-
-
-class ConnectionTest(Manager):
-
- DEFAULTS = {
- 'mongo_config': '',
- 'connection_tests': 'connection_tests',
- 'environments': 'environments_config',
- 'interval': 1,
- 'loglevel': 'WARNING'
- }
-
- def __init__(self):
- self.args = self.get_args()
- super().__init__(log_directory=self.args.log_directory,
- mongo_config_file=self.args.mongo_config)
- self.db_client = None
- self.connection_tests_collection = None
- self.environments_collection = None
-
- @staticmethod
- def get_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('-m', '--mongo_config', nargs='?', type=str,
- default=ConnectionTest.DEFAULTS['mongo_config'],
- help='Name of config file ' +
- 'with MongoDB server access details')
- parser.add_argument('-c', '--connection_tests_collection', nargs='?',
- type=str,
- default=ConnectionTest.DEFAULTS['connection_tests'],
- help='connection_tests collection to read from')
- parser.add_argument('-e', '--environments_collection', nargs='?',
- type=str,
- default=ConnectionTest.DEFAULTS['environments'],
- help='Environments collection to update '
- 'after tests')
- parser.add_argument('-i', '--interval', nargs='?', type=float,
- default=ConnectionTest.DEFAULTS['interval'],
- help='Interval between collection polls'
- '(must be more than {} seconds)'
- .format(ConnectionTest.MIN_INTERVAL))
- parser.add_argument('-l', '--loglevel', nargs='?', type=str,
- default=ConnectionTest.DEFAULTS['loglevel'],
- help='Logging level \n(default: {})'
- .format(ConnectionTest.DEFAULTS['loglevel']))
- parser.add_argument('-d', '--log_directory', nargs='?', type=str,
- default=FileLogger.LOG_DIRECTORY,
- help='File logger directory \n(default: {})'
- .format(FileLogger.LOG_DIRECTORY))
- args = parser.parse_args()
- return args
-
- def configure(self):
- self.db_client = MongoAccess()
- self.connection_tests_collection = \
- self.db_client.db[self.args.connection_tests_collection]
- self.environments_collection = \
- self.db_client.db[self.args.environments_collection]
- self._update_document = \
- partial(MongoAccess.update_document,
- self.connection_tests_collection)
- self.interval = max(self.MIN_INTERVAL, self.args.interval)
- self.log.set_loglevel(self.args.loglevel)
-
- self.log.info('Started ConnectionTest with following configuration:\n'
- 'Mongo config file path: {0.args.mongo_config}\n'
- 'connection_tests collection: '
- '{0.connection_tests_collection.name}\n'
- 'Polling interval: {0.interval} second(s)'
- .format(self))
-
- def _build_test_args(self, test_request: dict):
- args = {
- 'mongo_config': self.args.mongo_config
- }
-
- def set_arg(name_from: str, name_to: str = None):
- if name_to is None:
- name_to = name_from
- val = test_request.get(name_from)
- if val:
- args[name_to] = val
-
- set_arg('object_id', 'id')
- set_arg('log_level', 'loglevel')
- set_arg('environment', 'env')
- set_arg('scan_only_inventory', 'inventory_only')
- set_arg('scan_only_links', 'links_only')
- set_arg('scan_only_cliques', 'cliques_only')
- set_arg('inventory')
- set_arg('clear')
- set_arg('clear_all')
-
- return args
-
- def _finalize_test(self, test_request: dict):
- # update the status and timestamps.
- self.log.info('Request {} has been tested.'
- .format(test_request['_id']))
- start_time = test_request['submit_timestamp']
- end_time = datetime.datetime.utcnow()
- test_request['response_timestamp'] = end_time
- test_request['response_time'] = \
- str(end_time - start_time.replace(tzinfo=None))
- test_request['status'] = ConnectionTestStatus.RESPONSE.value
- self._update_document(test_request)
-
- @staticmethod
- def set_test_result(test_request, target, result):
- test_request.get('test_results', {})[target] = result
-
- @staticmethod
- def report_success(test_request, target):
- ConnectionTest.set_test_result(test_request, target, True)
-
- @staticmethod
- def handle_test_target(target, test_request):
- targets_config = test_request.get('targets_configuration', [])
- try:
- config = next(t for t in targets_config if t['name'] == target)
- except StopIteration:
- raise ValueError('failed to find {} in targets_configuration'
- .format(target))
- handler = TEST_HANDLERS.get(target)
- if not handler:
- raise ValueError('unknown test target: {}'.format(target))
- handler(config, test_request)
-
- def do_test(self, test_request):
- targets = [t for t in test_request.get('test_targets', [])]
- test_request['test_results'] = {t: False for t in targets}
- for test_target in test_request.get('test_targets', []):
- self.log.info('testing connection to: {}'.format(test_target))
- try:
- self.handle_test_target(test_target, test_request)
- except Exception as e:
- self.log.exception(e)
- if 'errors' not in test_request:
- test_request['errors'] = {}
- test_request['errors'][test_target] = str(e)
- self.log.error('Test of target {} failed (id: {}):\n{}'
- .format(test_target,
- test_request['_id'],
- str(e)))
- self._finalize_test(test_request)
- self._set_env_operational(test_request['environment'])
-
- # if environment_config document for this specific environment exists,
- # update the value of the 'operational' field to 'running'
- def _set_env_operational(self, env):
- self.environments_collection. \
- update_one({'name': env}, {'$set': {'operational': 'running'}})
-
- def do_action(self):
- while True:
- # Find a pending request that is waiting the longest time
- results = self.connection_tests_collection \
- .find({'status': ConnectionTestStatus.REQUEST.value,
- 'submit_timestamp': {'$ne': None}}) \
- .sort('submit_timestamp', pymongo.ASCENDING) \
- .limit(1)
-
- # If no connection tests are pending, sleep for some time
- if results.count() == 0:
- time.sleep(self.interval)
- else:
- self.do_test(results[0])
-
-
-if __name__ == '__main__':
- ConnectionTest().run()
diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py
index 57b2e3b..4e04e7e 100644
--- a/app/discover/clique_finder.py
+++ b/app/discover/clique_finder.py
@@ -42,67 +42,90 @@ class CliqueFinder(Fetcher):
return self.links.find({'target': db_id})
def find_cliques(self):
- self.log.info("scanning for cliques")
+ self.log.info("Scanning for cliques")
clique_types = self.get_clique_types().values()
for clique_type in clique_types:
self.find_cliques_for_type(clique_type)
- self.log.info("finished scanning for cliques")
+ self.log.info("Finished scanning for cliques")
- # Calculate priority score
- def _get_priority_score(self, clique_type):
- if self.env == clique_type['environment']:
- return 4
- if (self.env_config['distribution'] == clique_type.get('distribution') and
- self.env_config['distribution_version'] == clique_type.get('distribution_version')):
- return 3
- if clique_type.get('mechanism_drivers') in self.env_config['mechanism_drivers']:
- return 2
- if self.env_config['type_drivers'] == clique_type.get('type_drivers'):
- return 1
- else:
+ # Calculate priority score for clique type per environment and configuration
+ def get_priority_score(self, clique_type):
+ # environment-specific clique type takes precedence
+ env = clique_type.get('environment')
+ config = self.env_config
+ # ECT - Clique Type with Environment name
+ if env:
+ if self.env == env:
+ return 2**6
+ if env == 'ANY':
+ # environment=ANY serves as fallback option
+ return 2**0
return 0
+ # NECT - Clique Type without Environment name
+ else:
+ env_type = clique_type.get('environment_type')
+ # TODO: remove backward compatibility ('if not env_type' check)
+ if env_type and env_type != config.get('environment_type'):
+ return 0
- # Get clique type with max priority
- # for given environment configuration and focal point type
- def _get_clique_type(self, focal_point, clique_types):
- # If there's no configuration match for the specified environment,
- # we use the default clique type definition with environment='ANY'
- fallback_type = next(
- filter(lambda t: t['environment'] == 'ANY', clique_types),
- None
- )
- if not fallback_type:
- raise ValueError("No fallback clique type (ANY) "
- "defined for focal point type '{}'"
- .format(focal_point))
+ score = 0
- clique_types.remove(fallback_type)
+ distribution = clique_type.get('distribution')
+ if distribution:
+ if config['distribution'] != distribution:
+ return 0
- priority_scores = [self._get_priority_score(clique_type)
- for clique_type
- in clique_types]
- max_score = max(priority_scores) if priority_scores else 0
+ score += 2**5
- return (fallback_type
- if max_score == 0
- else clique_types[priority_scores.index(max_score)])
+ dv = clique_type.get('distribution_version')
+ if dv:
+ if dv != config['distribution_version']:
+ return 0
+ score += 2**4
- def get_clique_types(self):
- if not self.clique_types_by_type:
- clique_types_by_focal_point = self.clique_types.aggregate([{
- "$group": {
- "_id": "$focal_point_type",
- "types": {"$push": "$$ROOT"}
- }
- }])
+ mechanism_drivers = clique_type.get('mechanism_drivers')
+ if mechanism_drivers:
+ if mechanism_drivers not in config['mechanism_drivers']:
+ return 0
+ score += 2**3
- self.clique_types_by_type = {
- cliques['_id']: self._get_clique_type(cliques['_id'],
- cliques['types'])
- for cliques in
- clique_types_by_focal_point
- }
+ type_drivers = clique_type.get('type_drivers')
+ if type_drivers:
+ if type_drivers != config['type_drivers']:
+ return 0
+ score += 2**2
+
+ # If no configuration is specified, this clique type
+ # is a fallback for its environment type
+ return max(score, 2**1)
+
+ # Get clique type with max priority
+ # for given focal point type
+ def _get_clique_type(self, clique_types):
+ scored_clique_types = [{'score': self.get_priority_score(clique_type),
+ 'clique_type': clique_type}
+ for clique_type in clique_types]
+ max_score = max(scored_clique_types, key=lambda t: t['score'])
+ if max_score['score'] == 0:
+ self.log.warn('No matching clique types '
+ 'for focal point type: {fp_type}'
+ .format(fp_type=clique_types[0].get('focal_point_type')))
+ return None
+ return max_score.get('clique_type')
+ def get_clique_types(self):
+ if not self.clique_types_by_type:
+ clique_types_candidates = {}
+ for clique in self.clique_types.find({}):
+ fp_type = clique.get('focal_point_type', '')
+ if not clique_types_candidates.get(fp_type):
+ clique_types_candidates[fp_type] = []
+ clique_types_candidates[fp_type].append(clique)
+ for t in clique_types_candidates.keys():
+ selected = self._get_clique_type(clique_types_candidates[t])
+ if not selected:
+ continue
+ self.clique_types_by_type[t] = selected
return self.clique_types_by_type
def find_cliques_for_type(self, clique_type):
@@ -125,11 +148,14 @@ class CliqueFinder(Fetcher):
.find_one({"focal_point_type": o['type']})
constraints = [] if not constraint else constraint["constraints"]
clique_types = self.get_clique_types()
- clique_type = clique_types[o['type']]
- new_clique = self.construct_clique_for_focal_point(o, clique_type,
- constraints)
- if not new_clique:
+ clique_type = clique_types.get(o['type'])
+ if not clique_type:
self.cliques.delete({'_id': clique['_id']})
+ else:
+ new_clique = self.construct_clique_for_focal_point(o, clique_type,
+ constraints)
+ if not new_clique:
+ self.cliques.delete({'_id': clique['_id']})
def construct_clique_for_focal_point(self, o, clique_type, constraints):
# keep a hash of nodes in clique that were visited for each type
@@ -146,12 +172,16 @@ class CliqueFinder(Fetcher):
for c in constraints:
val = o[c] if c in o else None
clique["constraints"][c] = val
+ allow_implicit = clique_type.get('use_implicit_links', False)
for link_type in clique_type["link_types"]:
- self.check_link_type(clique, link_type, nodes_of_type)
+ if not self.check_link_type(clique, link_type, nodes_of_type,
+ allow_implicit=allow_implicit):
+ break
# after adding the links to the clique, create/update the clique
if not clique["links"]:
return None
+ clique["clique_type"] = clique_type["_id"]
focal_point_obj = self.inventory.find({"_id": clique["focal_point"]})
if not focal_point_obj:
return None
@@ -198,25 +228,33 @@ class CliqueFinder(Fetcher):
'-'.join(link_type_parts)
return CliqueFinder.link_type_reversed.get(link_type)
- def check_link_type(self, clique, link_type, nodes_of_type):
+ def check_link_type(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
# check if it's backwards
link_type_reversed = self.get_link_type_reversed(link_type)
# handle case of links like T<-->T
self_linked = link_type == link_type_reversed
use_reversed = False
if not self_linked:
- matches = self.links.find_one({
+ link_search_condition = {
"environment": self.env,
"link_type": link_type_reversed
- })
+ }
+ if not allow_implicit:
+ link_search_condition['implicit'] = False
+ matches = self.links.find_one(link_search_condition)
use_reversed = True if matches else False
if self_linked or not use_reversed:
- self.check_link_type_forward(clique, link_type, nodes_of_type)
+ return self.check_link_type_forward(clique, link_type,
+ nodes_of_type,
+ allow_implicit=allow_implicit)
if self_linked or use_reversed:
- self.check_link_type_back(clique, link_type, nodes_of_type)
+ return self.check_link_type_back(clique, link_type, nodes_of_type,
+ allow_implicit=allow_implicit)
def check_link_type_for_direction(self, clique, link_type, nodes_of_type,
- is_reversed=False):
+ is_reversed=False,
+ allow_implicit=False) -> bool:
if is_reversed:
link_type = self.get_link_type_reversed(link_type)
from_type = link_type[:link_type.index("-")]
@@ -225,7 +263,7 @@ class CliqueFinder(Fetcher):
other_side = 'target' if not is_reversed else 'source'
match_type = to_type if is_reversed else from_type
if match_type not in nodes_of_type.keys():
- return
+ return False
other_side_type = to_type if not is_reversed else from_type
nodes_to_add = set()
for match_point in nodes_of_type[match_type]:
@@ -233,21 +271,27 @@ class CliqueFinder(Fetcher):
clique,
link_type,
side_to_match,
- other_side)
+ other_side,
+ allow_implicit=allow_implicit)
nodes_to_add = nodes_to_add | matches
if other_side_type not in nodes_of_type:
nodes_of_type[other_side_type] = set()
nodes_of_type[other_side_type] = \
nodes_of_type[other_side_type] | nodes_to_add
+ return len(nodes_to_add) > 0
def find_matches_for_point(self, match_point, clique, link_type,
- side_to_match, other_side) -> set:
+ side_to_match, other_side,
+ allow_implicit=False) -> set:
nodes_to_add = set()
- matches = self.links.find({
+ link_search_condition = {
"environment": self.env,
"link_type": link_type,
side_to_match: ObjectId(match_point)
- })
+ }
+ if not allow_implicit:
+ link_search_condition['implicit'] = False
+ matches = self.links.find(link_search_condition)
for link in matches:
link_id = link["_id"]
if link_id in clique["links"]:
@@ -260,10 +304,16 @@ class CliqueFinder(Fetcher):
nodes_to_add.add(other_side_point)
return nodes_to_add
- def check_link_type_forward(self, clique, link_type, nodes_of_type):
- self.check_link_type_for_direction(clique, link_type, nodes_of_type,
- is_reversed=False)
+ def check_link_type_forward(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
+ return self.check_link_type_for_direction(clique, link_type,
+ nodes_of_type,
+ is_reversed=False,
+ allow_implicit=allow_implicit)
- def check_link_type_back(self, clique, link_type, nodes_of_type):
- self.check_link_type_for_direction(clique, link_type, nodes_of_type,
- is_reversed=True)
+ def check_link_type_back(self, clique, link_type, nodes_of_type,
+ allow_implicit=False) -> bool:
+ return self.check_link_type_for_direction(clique, link_type,
+ nodes_of_type,
+ is_reversed=True,
+ allow_implicit=allow_implicit)
diff --git a/app/discover/configuration.py b/app/discover/configuration.py
index c7bc0c0..9ec8f96 100644
--- a/app/discover/configuration.py
+++ b/app/discover/configuration.py
@@ -47,6 +47,10 @@ class Configuration(metaclass=Singleton):
def get_env_name(self):
return self.env_name
+ def get_env_type(self):
+ return 'OpenStack' if 'environment_type' not in self.environment \
+ else self.environment['environment_type']
+
def update_env(self, values):
self.collection.update_one({"name": self.env_name},
{'$set': MongoAccess.encode_mongo_keys(values)})
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
index 4855acc..c01916c 100644
--- a/app/discover/event_manager.py
+++ b/app/discover/event_manager.py
@@ -113,8 +113,8 @@ class EventManager(Manager):
def get_listener(self, env: str):
env_config = self.inv.get_env_config(env)
return (self.LISTENERS.get(env_config.get('distribution'), {})
- .get(env_config.get('distribution_version',
- DefaultListener)))
+ .get(env_config.get('distribution_version'),
+ DefaultListener))
def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
listener.listen({
diff --git a/app/discover/events/event_base.py b/app/discover/events/event_base.py
index 6b3b290..4b466e1 100644
--- a/app/discover/events/event_base.py
+++ b/app/discover/events/event_base.py
@@ -11,6 +11,7 @@ from abc import abstractmethod, ABC
from discover.fetcher import Fetcher
from utils.inventory_mgr import InventoryMgr
+from utils.origins import ScanOrigin, ScanOrigins
class EventResult:
@@ -23,6 +24,8 @@ class EventResult:
self.message = message
self.related_object = related_object
self.display_context = display_context
+ self.origin = ScanOrigin(origin_id=None,
+ origin_type=ScanOrigins.EVENT)
class EventBase(Fetcher, ABC):
diff --git a/app/discover/events/event_instance_add.py b/app/discover/events/event_instance_add.py
index 4dd2b20..a8717a5 100644
--- a/app/discover/events/event_instance_add.py
+++ b/app/discover/events/event_instance_add.py
@@ -25,7 +25,7 @@ class EventInstanceAdd(EventBase):
# scan instance
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan("ScanInstancesRoot", instances_root,
limit_to_child_id=instance_id,
limit_to_child_type='instance')
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
index e54bedb..f0ba569 100644
--- a/app/discover/events/event_interface_add.py
+++ b/app/discover/events/event_interface_add.py
@@ -30,7 +30,7 @@ class EventInterfaceAdd(EventBase):
def add_gateway_port(self, env, project, network_name, router_doc, host_id):
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_id = router_doc['id']
router = fetcher.get_vservice(host_id, router_id)
device_id = decode_router_id(router_id)
@@ -101,7 +101,7 @@ class EventInterfaceAdd(EventBase):
# add router-interface port document.
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
port_doc = EventSubnetAdd().add_port_document(env, port_id,
network_name=network_name)
@@ -134,7 +134,7 @@ class EventInterfaceAdd(EventBase):
# update vservice-vnic, vnic-network,
FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router-interface added.")
diff --git a/app/discover/events/event_port_add.py b/app/discover/events/event_port_add.py
index 9220015..e03db34 100644
--- a/app/discover/events/event_port_add.py
+++ b/app/discover/events/event_port_add.py
@@ -168,7 +168,7 @@ class EventPortAdd(EventBase):
"router": ('Gateways', router_name)}
fetcher = CliFetchVserviceVnics()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
namespace = 'q{}-{}'.format(object_type, object_id)
vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
if not vnic_documents:
@@ -258,7 +258,7 @@ class EventPortAdd(EventBase):
# update instance
instance_fetcher = ApiFetchHostInstances()
- instance_fetcher.set_env(env)
+ instance_fetcher.setup(env=env, origin=self.origin)
instance_docs = instance_fetcher.get(host_id + '-')
instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
@@ -278,7 +278,7 @@ class EventPortAdd(EventBase):
# set ovs as default type.
vnic_fetcher = CliFetchInstanceVnics()
- vnic_fetcher.set_env(env)
+ vnic_fetcher.setup(env=env, origin=self.origin)
vnic_docs = vnic_fetcher.get(instance_id + '-')
vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
@@ -298,7 +298,7 @@ class EventPortAdd(EventBase):
for fetcher in fetchers_implementing_add_links:
fetcher.add_links()
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
port_document = self.inv.get_by_id(env, port['id'])
diff --git a/app/discover/events/event_port_delete.py b/app/discover/events/event_port_delete.py
index 1e55870..937d8df 100644
--- a/app/discover/events/event_port_delete.py
+++ b/app/discover/events/event_port_delete.py
@@ -61,7 +61,7 @@ class EventPortDelete(EventDeleteBase):
# update instance mac address.
if port_doc['mac_address'] == instance_doc['mac_address']:
instance_fetcher = ApiFetchHostInstances()
- instance_fetcher.set_env(env)
+ instance_fetcher.setup(env=env, origin=self.origin)
host_id = port_doc['binding:host_id']
instance_id = port_doc['device_id']
instance_docs = instance_fetcher.get(host_id + '-')
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
index 1fb2244..0f8bc05 100644
--- a/app/discover/events/event_router_add.py
+++ b/app/discover/events/event_router_add.py
@@ -100,7 +100,7 @@ class EventRouterAdd(EventBase):
host = self.inv.get_by_id(env, host_id)
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_doc = fetcher.get_vservice(host_id, router_id)
gateway_info = router['external_gateway_info']
@@ -114,7 +114,7 @@ class EventRouterAdd(EventBase):
# scan links and cliques
FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router added.")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
index b63b224..f20f07e 100644
--- a/app/discover/events/event_router_update.py
+++ b/app/discover/events/event_router_update.py
@@ -60,7 +60,7 @@ class EventRouterUpdate(EventBase):
# add gw_port_id info and port document.
fetcher = CliFetchHostVservice()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
router_vservice = fetcher.get_vservice(host_id, router_full_id)
if router_vservice.get('gw_port_id'):
router_doc['gw_port_id'] = router_vservice['gw_port_id']
@@ -74,7 +74,7 @@ class EventRouterUpdate(EventBase):
# update the cliques.
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished router update.")
return EventResult(result=True,
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
index 4126e0c..0a91803 100644
--- a/app/discover/events/event_subnet_add.py
+++ b/app/discover/events/event_subnet_add.py
@@ -29,7 +29,7 @@ class EventSubnetAdd(EventBase):
# document does not has project attribute. In this case, network_name should not be provided.
fetcher = ApiFetchPort()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
ports = fetcher.get(port_id)
if ports:
@@ -133,7 +133,7 @@ class EventSubnetAdd(EventBase):
# update network
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
self.log.info("add new subnet.")
@@ -146,7 +146,7 @@ class EventSubnetAdd(EventBase):
FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
self.log.info("Finished subnet added.")
return EventResult(result=True,
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
index 59b0afb..2c58e70 100644
--- a/app/discover/events/event_subnet_update.py
+++ b/app/discover/events/event_subnet_update.py
@@ -50,7 +50,7 @@ class EventSubnetUpdate(EventBase):
# make sure that self.regions is not empty.
if not ApiAccess.regions:
fetcher = ApiFetchRegions()
- fetcher.set_env(env)
+ fetcher.setup(env=env, origin=self.origin)
fetcher.get(project_id)
self.log.info("add port binding to DHCP server.")
@@ -69,12 +69,12 @@ class EventSubnetUpdate(EventBase):
# add link for vservice - vnic
FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
FindLinksForVserviceVnics(). \
add_links(search={"id": "qdhcp-%s" % network_id})
scanner = Scanner()
- scanner.set_env(env)
+ scanner.setup(env=env, origin=self.origin)
scanner.scan_cliques()
if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
diff --git a/app/discover/fetcher.py b/app/discover/fetcher.py
index 8d7fdbb..707cd60 100644
--- a/app/discover/fetcher.py
+++ b/app/discover/fetcher.py
@@ -8,16 +8,21 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from discover.configuration import Configuration
+from utils.origins import Origin
from utils.logging.full_logger import FullLogger
class Fetcher:
+ ENV_TYPE_KUBERNETES = 'Kubernetes'
+ ENV_TYPE_OPENSTACK = 'OpenStack'
+
def __init__(self):
super().__init__()
self.env = None
self.log = FullLogger()
self.configuration = None
+ self.origin = None
@staticmethod
def escape(string):
@@ -25,11 +30,55 @@ class Fetcher:
def set_env(self, env):
self.env = env
- self.log.set_env(env)
+ self.log.setup(env=env)
self.configuration = Configuration()
+ def setup(self, env, origin: Origin = None):
+ self.set_env(env=env)
+ if origin:
+ self.origin = origin
+ self.log.setup(origin=origin)
+
def get_env(self):
return self.env
def get(self, object_id):
return None
+
+ def set_folder_parent(self,
+ o: dict,
+ object_type: str =None,
+ master_parent_type: str =None,
+ master_parent_id: str =None,
+ parent_objects_name=None,
+ parent_type: str =None,
+ parent_id: str =None,
+ parent_text: str =None):
+ if object_type:
+ o['type'] = object_type
+ if not parent_objects_name:
+ parent_objects_name = '{}s'.format(object_type)
+ if not master_parent_type:
+ self.log.error('set_folder_parent: must specify: '
+ 'master_parent_type, master_parent_id, '
+ 'parent_type', 'parent_id')
+ return
+ if not parent_objects_name and not parent_type:
+ self.log.error('set_folder_parent: must specify: '
+ 'either parent_objects_name (e.g. "vedges") '
+ 'or parent_type and parent_id')
+ return
+ if parent_objects_name and not parent_type:
+ parent_type = '{}_folder'.format(parent_objects_name)
+ if parent_objects_name and not parent_id:
+ parent_id = '{}-{}'.format(master_parent_id, parent_objects_name)
+ o.update({
+ 'master_parent_type': master_parent_type,
+ 'master_parent_id': master_parent_id,
+ 'parent_type': parent_type,
+ 'parent_id': parent_id
+ })
+ if parent_text:
+ o['parent_text'] = parent_text
+ elif parent_objects_name:
+ o['parent_text'] = parent_objects_name.capitalize()
diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py
index f685faf..1fca202 100644
--- a/app/discover/fetchers/api/api_access.py
+++ b/app/discover/fetchers/api/api_access.py
@@ -12,21 +12,18 @@ import re
import requests
import time
-from discover.configuration import Configuration
-from discover.fetcher import Fetcher
+from utils.api_access_base import ApiAccessBase
from utils.string_utils import jsonify
-class ApiAccess(Fetcher):
+class ApiAccess(ApiAccessBase):
+
+ ADMIN_PORT = "35357"
+
subject_token = None
initialized = False
regions = {}
- config = None
- api_config = None
- host = ""
- base_url = ""
- admin_token = ""
tokens = {}
admin_endpoint = ""
admin_project = None
@@ -38,28 +35,19 @@ class ApiAccess(Fetcher):
# identity API v2 version with admin token
def __init__(self, config=None):
- super(ApiAccess, self).__init__()
- if ApiAccess.initialized:
+ super().__init__('OpenStack', config)
+ self.base_url = "http://" + self.host + ":" + self.port
+ if self.initialized:
return
- ApiAccess.config = {'OpenStack': config} if config else Configuration()
- ApiAccess.api_config = ApiAccess.config.get("OpenStack")
- host = ApiAccess.api_config.get("host", "")
- ApiAccess.host = host
- port = ApiAccess.api_config.get("port", "")
- if not (host and port):
- raise ValueError('Missing definition of host or port ' +
- 'for OpenStack API access')
- ApiAccess.base_url = "http://" + host + ":" + port
- ApiAccess.admin_token = ApiAccess.api_config.get("admin_token", "")
- ApiAccess.admin_project = ApiAccess.api_config.get("admin_project",
- "admin")
- ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+ ApiAccess.admin_project = self.api_config.get("admin_project", "admin")
+ ApiAccess.admin_endpoint = "http://" + self.host + ":" + self.ADMIN_PORT
token = self.v2_auth_pwd(ApiAccess.admin_project)
if not token:
raise ValueError("Authentication failed. Failed to obtain token")
else:
self.subject_token = token
+ self.initialized = True
@staticmethod
def parse_time(time_str):
@@ -95,9 +83,9 @@ class ApiAccess(Fetcher):
subject_token = self.get_existing_token(project_id)
if subject_token:
return subject_token
- req_url = ApiAccess.base_url + "/v2.0/tokens"
+ req_url = self.base_url + "/v2.0/tokens"
response = requests.post(req_url, json=post_body, headers=headers,
- timeout=5)
+ timeout=self.CONNECT_TIMEOUT)
response = response.json()
ApiAccess.auth_response[project_id] = response
if 'error' in response:
@@ -120,8 +108,8 @@ class ApiAccess(Fetcher):
return token_details
def v2_auth_pwd(self, project):
- user = ApiAccess.api_config["user"]
- pwd = ApiAccess.api_config["pwd"]
+ user = self.api_config["user"]
+ pwd = self.api_config["pwd"]
post_body = {
"auth": {
"passwordCredentials": {
@@ -148,23 +136,6 @@ class ApiAccess(Fetcher):
auth_response = ApiAccess.auth_response.get('admin', {})
return auth_response
- def get_rel_url(self, relative_url, headers):
- req_url = ApiAccess.base_url + relative_url
- return self.get_url(req_url, headers)
-
- def get_url(self, req_url, headers):
- response = requests.get(req_url, headers=headers)
- if response.status_code != requests.codes.ok:
- # some error happened
- if "reason" in response:
- msg = ", reason: {}".format(response.reason)
- else:
- msg = ", response: {}".format(response.text)
- self.log.error("req_url: {} {}".format(req_url, msg))
- return None
- ret = response.json()
- return ret
-
def get_region_url(self, region_name, service):
if region_name not in self.regions:
return None
@@ -174,7 +145,7 @@ class ApiAccess(Fetcher):
return None
orig_url = s["adminURL"]
# replace host name with the host found in config
- url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + self.host, orig_url)
return url
# like get_region_url(), but remove everything starting from the "/v2"
diff --git a/app/discover/fetchers/api/api_fetch_availability_zones.py b/app/discover/fetchers/api/api_fetch_availability_zones.py
index 196893b..ad9550e 100644
--- a/app/discover/fetchers/api/api_fetch_availability_zones.py
+++ b/app/discover/fetchers/api/api_fetch_availability_zones.py
@@ -28,7 +28,7 @@ class ApiFetchAvailabilityZones(ApiAccess):
# because the later does not inclde the "internal" zone in the results
endpoint = self.get_region_url_nover(region, "nova")
req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
- "/os-availability-zone/detail"
+ "/os-availability-zone/detail"
headers = {
"X-Auth-Project-Id": project,
"X-Auth-Token": token["id"]
@@ -45,11 +45,10 @@ class ApiFetchAvailabilityZones(ApiAccess):
for doc in azs:
doc["id"] = doc["zoneName"]
doc["name"] = doc.pop("zoneName")
- doc["master_parent_type"] = "region"
- doc["master_parent_id"] = region
- doc["parent_type"] = "availability_zones_folder"
- doc["parent_id"] = region + "-availability_zones"
- doc["parent_text"] = "Availability Zones"
+ self.set_folder_parent(doc, object_type="availability_zone",
+ master_parent_type="region",
+ master_parent_id=region,
+ parent_text="Availability Zones")
doc["available"] = doc["zoneState"]["available"]
doc.pop("zoneState")
ret.append(doc)
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py
index 56cffda..bf8513a 100644
--- a/app/discover/fetchers/api/api_fetch_host_instances.py
+++ b/app/discover/fetchers/api/api_fetch_host_instances.py
@@ -18,7 +18,7 @@ class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
def __init__(self):
super(ApiFetchHostInstances, self).__init__()
self.inv = InventoryMgr()
- self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
+ self.endpoint = self.base_url.replace(":5000", ":8774")
self.projects = None
self.db_fetcher = DbFetchInstances()
diff --git a/app/discover/fetchers/api/api_fetch_network.py b/app/discover/fetchers/api/api_fetch_network.py
index 889b8a5..b253773 100644
--- a/app/discover/fetchers/api/api_fetch_network.py
+++ b/app/discover/fetchers/api/api_fetch_network.py
@@ -23,7 +23,8 @@ class ApiFetchNetwork(ApiAccess):
return []
ret = []
for region in self.regions:
- # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+ # TODO: refactor legacy code
+ # (Unresolved reference - self.get_for_region)
ret.extend(self.get_for_region(region, token, project_id))
return ret
@@ -37,7 +38,7 @@ class ApiFetchNetwork(ApiAccess):
"X-Auth-Token": token["id"]
}
response = self.get_url(req_url, headers)
- if not "network" in response:
+ if "network" not in response:
return []
network = response["network"]
subnets = network['subnets']
@@ -60,13 +61,12 @@ class ApiFetchNetwork(ApiAccess):
network["cidrs"] = cidrs
network["subnet_ids"] = subnet_ids
- network["master_parent_type"] = "project"
- network["master_parent_id"] = network["tenant_id"]
- network["parent_type"] = "networks_folder"
- network["parent_id"] = network["tenant_id"] + "-networks"
- network["parent_text"] = "Networks"
- # set the 'network' attribute for network objects to the name of network,
- # to allow setting constraint on network when creating network clique
+ self.set_folder_parent(network, object_type="network",
+ master_parent_type="project",
+ master_parent_id=network["tenant_id"])
+ # set the 'network' attribute for network objects to the name of
+ # network, to allow setting constraint on network when creating
+ # network clique
network['network'] = network["id"]
# get the project name
project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
diff --git a/app/discover/fetchers/api/api_fetch_networks.py b/app/discover/fetchers/api/api_fetch_networks.py
index 4b70f65..f76517a 100644
--- a/app/discover/fetchers/api/api_fetch_networks.py
+++ b/app/discover/fetchers/api/api_fetch_networks.py
@@ -34,7 +34,7 @@ class ApiFetchNetworks(ApiAccess):
"X-Auth-Token": token["id"]
}
response = self.get_url(req_url, headers)
- if not "networks" in response:
+ if "networks" not in response:
return []
networks = response["networks"]
req_url = endpoint + "/v2.0/subnets"
@@ -46,7 +46,6 @@ class ApiFetchNetworks(ApiAccess):
for s in subnets:
subnets_hash[s["id"]] = s
for doc in networks:
- doc["master_parent_type"] = "project"
project_id = doc["tenant_id"]
if not project_id:
# find project ID of admin project
@@ -57,12 +56,12 @@ class ApiFetchNetworks(ApiAccess):
if not project:
self.log.error("failed to find admin project in DB")
project_id = project["id"]
- doc["master_parent_id"] = project_id
- doc["parent_type"] = "networks_folder"
- doc["parent_id"] = project_id + "-networks"
- doc["parent_text"] = "Networks"
- # set the 'network' attribute for network objects to the name of network,
- # to allow setting constraint on network when creating network clique
+ self.set_folder_parent(doc, object_type='network',
+ master_parent_id=project_id,
+ master_parent_type='project')
+ # set the 'network' attribute for network objects to the name of
+ # network, to allow setting constraint on network when creating
+ # network clique
doc['network'] = doc["id"]
# get the project name
project = self.inv.get_by_id(self.get_env(), project_id)
diff --git a/app/discover/fetchers/api/api_fetch_port.py b/app/discover/fetchers/api/api_fetch_port.py
index f8d9eeb..8de1452 100644
--- a/app/discover/fetchers/api/api_fetch_port.py
+++ b/app/discover/fetchers/api/api_fetch_port.py
@@ -43,11 +43,9 @@ class ApiFetchPort(ApiAccess):
return []
doc = response["port"]
- doc["master_parent_type"] = "network"
- doc["master_parent_id"] = doc["network_id"]
- doc["parent_type"] = "ports_folder"
- doc["parent_id"] = doc["network_id"] + "-ports"
- doc["parent_text"] = "Ports"
+ self.set_folder_parent(doc, object_type="port",
+ master_parent_type="network",
+ master_parent_id=doc["network_id"])
# get the project name
net = self.inv.get_by_id(self.get_env(), doc["network_id"])
if net:
diff --git a/app/discover/fetchers/api/api_fetch_ports.py b/app/discover/fetchers/api/api_fetch_ports.py
index f4c54a6..5e44c1b 100644
--- a/app/discover/fetchers/api/api_fetch_ports.py
+++ b/app/discover/fetchers/api/api_fetch_ports.py
@@ -38,11 +38,9 @@ class ApiFetchPorts(ApiAccess):
return []
ports = response["ports"]
for doc in ports:
- doc["master_parent_type"] = "network"
- doc["master_parent_id"] = doc["network_id"]
- doc["parent_type"] = "ports_folder"
- doc["parent_id"] = doc["network_id"] + "-ports"
- doc["parent_text"] = "Ports"
+ self.set_folder_parent(doc, object_type="port",
+ master_parent_type="network",
+ master_parent_id=doc["network_id"])
# get the project name
net = self.inv.get_by_id(self.get_env(), doc["network_id"])
if net:
diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py
index 5b911f5..1059600 100644
--- a/app/discover/fetchers/api/api_fetch_project_hosts.py
+++ b/app/discover/fetchers/api/api_fetch_project_hosts.py
@@ -11,9 +11,11 @@ import json
from discover.fetchers.api.api_access import ApiAccess
from discover.fetchers.db.db_access import DbAccess
+from discover.fetchers.cli.cli_fetch_host_details import CliFetchHostDetails
+from utils.ssh_connection import SshError
-class ApiFetchProjectHosts(ApiAccess, DbAccess):
+class ApiFetchProjectHosts(ApiAccess, DbAccess, CliFetchHostDetails):
def __init__(self):
super(ApiFetchProjectHosts, self).__init__()
@@ -107,6 +109,7 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
s = services["nova-compute"]
if s["available"] and s["active"]:
self.add_host_type(doc, "Compute", az['zoneName'])
+ self.fetch_host_os_details(doc)
return doc
# fetch more details of network nodes from neutron DB agents table
@@ -121,7 +124,12 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
""".format(self.neutron_db)
results = self.get_objects_list(query, "")
for r in results:
- host = hosts[r["host"]]
+ host = r["host"]
+ if host not in hosts:
+ self.log.error("host from agents table not in hosts list: {}"
+ .format(host))
+ continue
+ host = hosts[host]
host["config"] = json.loads(r["configurations"])
self.add_host_type(host, "Network", '')
@@ -136,9 +144,33 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess):
for db_row in results:
doc.update(db_row)
- def add_host_type(self, doc, type, zone):
- if not type in doc["host_type"]:
- doc["host_type"].append(type)
- if type == 'Compute':
+ @staticmethod
+ def add_host_type(doc, host_type, zone):
+ if host_type not in doc["host_type"]:
+ doc["host_type"].append(host_type)
+ if host_type == 'Compute':
doc['zone'] = zone
doc['parent_id'] = zone
+
+ def fetch_host_os_details(self, doc):
+ cmd = 'cat /etc/os-release && echo "ARCHITECURE=`arch`"'
+ try:
+ lines = self.run_fetch_lines(cmd, ssh_to_host=doc['host'])
+ except SshError as e:
+ self.log.error('{}: {}', cmd, str(e))
+ os_attributes = {}
+ attributes_to_fetch = {
+ 'NAME': 'name',
+ 'VERSION': 'version',
+ 'ID': 'ID',
+ 'ID_LIKE': 'ID_LIKE',
+ 'ARCHITECURE': 'architecure'
+ }
+ for attr in attributes_to_fetch:
+ matches = [l for l in lines if l.startswith(attr + '=')]
+ if matches:
+ line = matches[0]
+ attr_name = attributes_to_fetch[attr]
+ os_attributes[attr_name] = line[line.index('=')+1:].strip('"')
+ if os_attributes:
+ doc['OS'] = os_attributes
diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py
index 23a3736..4e83b01 100644
--- a/app/discover/fetchers/api/api_fetch_regions.py
+++ b/app/discover/fetchers/api/api_fetch_regions.py
@@ -13,7 +13,7 @@ from discover.fetchers.api.api_access import ApiAccess
class ApiFetchRegions(ApiAccess):
def __init__(self):
super(ApiFetchRegions, self).__init__()
- self.endpoint = ApiAccess.base_url
+ self.endpoint = self.base_url
def get(self, regions_folder_id):
token = self.v2_auth_pwd(self.admin_project)
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
index c77b22a..68b81c8 100644
--- a/app/discover/fetchers/cli/cli_access.py
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -17,7 +17,7 @@ from utils.logging.console_logger import ConsoleLogger
from utils.ssh_conn import SshConn
-class CliAccess(BinaryConverter, Fetcher):
+class CliAccess(Fetcher, BinaryConverter):
connections = {}
ssh_cmd = "ssh -q -o StrictHostKeyChecking=no "
call_count_per_con = {}
@@ -71,8 +71,9 @@ class CliAccess(BinaryConverter, Fetcher):
self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
return ret
- def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
- out = self.run(cmd, ssh_to_host, enable_cache)
+ def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True,
+ use_sudo=True):
+ out = self.run(cmd, ssh_to_host, enable_cache, use_sudo=use_sudo)
if not out:
return []
# first try to split lines by whitespace
@@ -236,7 +237,7 @@ class CliAccess(BinaryConverter, Fetcher):
self.find_matching_regexps(o, line, regexps)
for regexp_tuple in regexps:
name = regexp_tuple['name']
- if 'name' not in o and 'default' in regexp_tuple:
+ if name not in o and 'default' in regexp_tuple:
o[name] = regexp_tuple['default']
@staticmethod
@@ -247,4 +248,8 @@ class CliAccess(BinaryConverter, Fetcher):
regex = re.compile(regex)
matches = regex.search(line)
if matches and name not in o:
- o[name] = matches.group(1)
+ try:
+ o[name] = matches.group(1)
+ except IndexError as e:
+ self.log.error('failed to find group 1 in match, {}'
+ .format(str(regexp_tuple)))
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
index 26cd603..81d164d 100644
--- a/app/discover/fetchers/cli/cli_fetch_host_pnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -27,8 +27,8 @@ class CliFetchHostPnics(CliAccess):
'description': 'IPv6 Address'}
]
- def get(self, id):
- host_id = id[:id.rindex("-")]
+ def get(self, parent_id):
+ host_id = parent_id[:parent_id.rindex("-")]
cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
host = self.inv.get_by_id(self.get_env(), host_id)
if not host:
@@ -39,7 +39,8 @@ class CliFetchHostPnics(CliAccess):
", host: " + str(host))
return []
host_types = host["host_type"]
- if "Network" not in host_types and "Compute" not in host_types:
+ accepted_host_types = ['Network', 'Compute', 'Kube-node']
+ if not [t for t in accepted_host_types if t in host_types]:
return []
interface_lines = self.run_fetch_lines(cmd, host_id)
interfaces = []
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
index ff37569..ac04568 100644
--- a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
@@ -18,8 +18,8 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
def get_vconnectors(self, host):
host_id = host['id']
- lines = self.run_fetch_lines("brctl show", host_id)
- headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+ lines = self.run_fetch_lines('brctl show', host_id)
+ headers = ['bridge_name', 'bridge_id', 'stp_enabled', 'interfaces']
headers_count = len(headers)
# since we hard-coded the headers list, remove the headers line
del lines[:1]
@@ -31,26 +31,32 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
ret = []
for doc in results:
- doc["name"] = doc.pop("bridge_name")
- doc["id"] = doc["name"] + "-" + doc.pop("bridge_id")
- doc["host"] = host_id
- doc["connector_type"] = "bridge"
- if "interfaces" in doc:
- interfaces = {}
- interface_names = doc["interfaces"].split(",")
- for interface_name in interface_names:
- # find MAC address for this interface from ports list
- port_id_prefix = interface_name[3:]
- port = self.inv.find_items({
- "environment": self.get_env(),
- "type": "port",
- "binding:host_id": host_id,
- "id": {"$regex": r"^" + re.escape(port_id_prefix)}
- }, get_single=True)
- mac_address = '' if not port else port['mac_address']
- interface = {'name': interface_name, 'mac_address': mac_address}
- interfaces[interface_name] = interface
- doc["interfaces"] = interfaces
- doc['interfaces_names'] = list(interfaces.keys())
- ret.append(doc)
+ doc['name'] = '{}-{}'.format(host_id, doc['bridge_name'])
+ doc['id'] = '{}-{}'.format(doc['name'], doc.pop('bridge_id'))
+ doc['host'] = host_id
+ doc['connector_type'] = 'bridge'
+ self.get_vconnector_interfaces(doc, host_id)
+ ret.append(doc)
return ret
+
+ def get_vconnector_interfaces(self, doc, host_id):
+ if 'interfaces' not in doc:
+ doc['interfaces'] = {}
+ doc['interfaces_names'] = []
+ return
+ interfaces = {}
+ interface_names = doc['interfaces'].split(',')
+ for interface_name in interface_names:
+ # find MAC address for this interface from ports list
+ port_id_prefix = interface_name[3:]
+ port = self.inv.find_items({
+ 'environment': self.get_env(),
+ 'type': 'port',
+ 'binding:host_id': host_id,
+ 'id': {'$regex': r'^' + re.escape(port_id_prefix)}
+ }, get_single=True)
+ mac_address = '' if not port else port['mac_address']
+ interface = {'name': interface_name, 'mac_address': mac_address}
+ interfaces[interface_name] = interface
+ doc['interfaces'] = interfaces
+ doc['interfaces_names'] = list(interfaces.keys())
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
index 3bc3a5b..0129d3b 100644
--- a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -66,17 +66,15 @@ class CliFetchVserviceVnics(CliAccess):
master_parent_id = "{}-{}".format(host, service)
current = {
"id": host + "-" + name,
- "type": "vnic",
"vnic_type": "vservice_vnic",
"host": host,
"name": name,
- "master_parent_type": "vservice",
- "master_parent_id": master_parent_id,
- "parent_type": "vnics_folder",
- "parent_id": "{}-vnics".format(master_parent_id),
- "parent_text": "vNICs",
"lines": []
}
+ self.set_folder_parent(current, object_type="vnic",
+ master_parent_type="vservice",
+ master_parent_id=master_parent_id,
+ parent_text="vNICs")
interfaces.append(current)
self.handle_line(current, line_remainder)
else:
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
index 090ab84..5ff49d5 100644
--- a/app/discover/fetchers/db/db_access.py
+++ b/app/discover/fetchers/db/db_access.py
@@ -38,8 +38,7 @@ class DbAccess(Fetcher):
conn = None
query_count_per_con = 0
- # connection timeout set to 30 seconds,
- # due to problems over long connections
+ # connection timeout set to 5 seconds
TIMEOUT = 5
def __init__(self, mysql_config=None):
@@ -47,6 +46,9 @@ class DbAccess(Fetcher):
self.config = {'mysql': mysql_config} if mysql_config \
else Configuration()
self.conf = self.config.get("mysql")
+ self.connect_timeout = int(self.conf['connect_timeout']) \
+ if 'connect_timeout' in self.conf \
+ else self.TIMEOUT
self.connect_to_db()
self.neutron_db = self.get_neutron_db_name()
@@ -55,16 +57,18 @@ class DbAccess(Fetcher):
return
try:
connector = mysql.connector
- DbAccess.conn = connector.connect(host=_host, port=_port,
- connection_timeout=self.TIMEOUT,
- user=_user,
- password=_pwd,
- database=_database,
- raise_on_warnings=True)
+ conn = connector.connect(host=_host, port=_port,
+ connection_timeout=self.connect_timeout,
+ user=_user,
+ password=_pwd,
+ database=_database,
+ raise_on_warnings=True)
+ DbAccess.conn = conn
DbAccess.conn.ping(True) # auto-reconnect if necessary
except Exception as e:
- self.log.critical("failed to connect to MySQL DB: {}"
- .format(str(e)))
+ msg = "failed to connect to MySQL DB: {}".format(str(e))
+ self.log.critical(msg)
+ raise ScanError(msg)
return
DbAccess.query_count_per_con = 0
@@ -93,8 +97,11 @@ class DbAccess(Fetcher):
DbAccess.conn = None
self.conf = self.config.get("mysql")
cnf = self.conf
+ pwd = cnf.get('pwd', '')
+ if not pwd:
+ raise ScanError('db_access: attribute pwd is missing')
self.db_connect(cnf.get('host', ''), cnf.get('port', ''),
- cnf.get('user', ''), cnf.get('pwd', ''),
+ cnf.get('user', ''), pwd,
cnf.get('schema', 'nova'))
@with_cursor
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
index 85376ed..7721136 100644
--- a/app/discover/fetchers/db/db_fetch_oteps.py
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -82,4 +82,4 @@ class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
interface = l.split(":")[1].strip()
if vconnector:
- doc["vconnector"] = vconnector
+ doc["vconnector"] = '{}-{}'.format(host_id, vconnector)
diff --git a/app/discover/fetchers/kube/__init__.py b/app/discover/fetchers/kube/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/kube/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/kube/kube_access.py b/app/discover/fetchers/kube/kube_access.py
new file mode 100644
index 0000000..38bb978
--- /dev/null
+++ b/app/discover/fetchers/kube/kube_access.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from kubernetes.client import Configuration as KubConf, CoreV1Api
+
+from utils.api_access_base import ApiAccessBase
+
+
+class KubeAccess(ApiAccessBase):
+
+ def __init__(self, config=None):
+ super().__init__('Kubernetes', config)
+ self.base_url = 'https://{}:{}'.format(self.host, self.port)
+ self.bearer_token = self.api_config.get('token', '')
+ conf = KubConf()
+ conf.host = self.base_url
+ conf.user = self.api_config.get('user')
+ conf.api_key_prefix['authorization'] = 'Bearer'
+ conf.api_key['authorization'] = self.bearer_token
+ conf.verify_ssl = False
+ self.api = CoreV1Api()
+
diff --git a/app/discover/fetchers/kube/kube_fetch_namespaces.py b/app/discover/fetchers/kube/kube_fetch_namespaces.py
new file mode 100644
index 0000000..951ddb8
--- /dev/null
+++ b/app/discover/fetchers/kube/kube_fetch_namespaces.py
@@ -0,0 +1,32 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.kube.kube_access import KubeAccess
+
+
+class KubeFetchNamespaces(KubeAccess):
+
+ def __init__(self, config=None):
+ super().__init__(config)
+
+ def get(self, object_id):
+ namespaces = self.api.list_namespace()
+ return [self.get_namespace(i) for i in namespaces.items]
+
+ @staticmethod
+ def get_namespace(namespace):
+ attrs = ['creation_timestamp', 'self_link', 'uid']
+ namespace_details = {
+ 'name': namespace.metadata.name,
+ 'status': namespace.status.phase
+ }
+ namespace_details.update({x: getattr(namespace.metadata, x, '')
+ for x in attrs})
+ namespace_details['id'] = namespace_details['uid']
+ return namespace_details
diff --git a/app/discover/link_finders/find_implicit_links.py b/app/discover/link_finders/find_implicit_links.py
new file mode 100644
index 0000000..01eaa7b
--- /dev/null
+++ b/app/discover/link_finders/find_implicit_links.py
@@ -0,0 +1,128 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.link_finders.find_links import FindLinks
+
+
+class FindImplicitLinks(FindLinks):
+
+ def __init__(self):
+ super().__init__()
+ self.links = []
+ self.constraint_attributes = self.get_constraint_attributes()
+
+ def add_links(self):
+ self.log.info('adding implicit links')
+ self.get_existing_links()
+ self.get_transitive_closure()
+
+ def get_constraint_attributes(self) -> list:
+ attributes = set()
+ for c in self.inv.find({'environment': self.get_env()},
+ collection='clique_constraints'):
+ for a in c['constraints']:
+ attributes.add(a)
+ return list(attributes)
+
+ def get_existing_links(self):
+ self.log.info('fetching existing links')
+ existing_links = self.inv.find({'environment': self.get_env()},
+ collection='links')
+ for l in existing_links:
+ self.links.append({'pass': 0, 'link': l})
+
+ def constraints_match(self, link1, link2):
+ if 'attributes' not in link1 or 'attributes' not in link2:
+ return True
+ attr1 = link1['attributes']
+ attr2 = link2['attributes']
+ for a in self.constraint_attributes:
+ if a in attr1 and a in attr2 and attr1[a] != attr2[a]:
+ return False
+ return True
+
+ def links_match(self, start, dest):
+ if start['link_type'] == dest['link_type']:
+ return False # obviously we cannot make an implicit link of this
+ if start['source_id'] == dest['target_id']:
+ return False # avoid cyclic links
+ if not self.constraints_match(start, dest):
+ return False
+ return start['target_id'] == dest['source_id']
+
+ def add_matching_links(self, link, pass_no):
+ self.log.debug('looking for matches for link: {};{}'
+ .format(link['source_id'], link['target_id']))
+ matches = [l for l in self.links
+ if l['pass'] == 0 # take only original links
+ and self.links_match(link, l['link'])]
+ for l in matches:
+ implicit = self.add_implicit_link(link, l['link'])
+ self.links.append({'pass': pass_no, 'link': implicit})
+ return len(matches)
+
+ def get_link_constraint_attributes(self, link1, link2) -> dict:
+ attributes = {}
+ for a in self.constraint_attributes:
+ # constraints_match() verified the attribute values don't conflict
+ if a in link1.get('attributes', {}):
+ attributes[a] = link1['attributes'][a]
+ elif a in link2.get('attributes', {}):
+ attributes[a] = link2['attributes'][a]
+ return attributes
+
+ @staticmethod
+ def get_attr(attr, link1, link2):
+ if attr not in link1 and attr not in link2:
+ return None
+ if attr not in link1:
+ return link2[attr]
+ if attr not in link2 or link1[attr] == link2[attr]:
+ return link1[attr]
+ return None
+
+ def add_implicit_link(self, link1, link2):
+ link_type_from = link1['link_type'].split('-')[0]
+ link_type_to = link2['link_type'].split('-')[1]
+ link_type = '{}-{}'.format(link_type_from, link_type_to)
+ link_name = ''
+ state = 'down' \
+ if link1['state'] == 'down' or link2['state'] == 'down' \
+ else 'up'
+ link_weight = 0 # TBD
+ host = self.get_attr('host', link1, link2)
+ switch = self.get_attr('switch', link1, link2)
+ extra_attributes = self.get_link_constraint_attributes(link1, link2)
+ self.log.debug('adding implicit link: link type: {}, from: {}, to: {}'
+ .format(link_type,
+ link1['source_id'],
+ link2['target_id']))
+ implicit = self.create_link(self.get_env(),
+ link1['source'], link1['source_id'],
+ link2['target'], link2['target_id'],
+ link_type, link_name, state, link_weight,
+ host=host, switch=switch,
+ implicit=True,
+ extra_attributes=extra_attributes)
+ return implicit
+
+ def get_transitive_closure(self):
+ pass_no = 1
+ while True:
+ match_count = 0
+ last_pass_links = [l for l in self.links if l['pass'] == pass_no-1]
+ for l in last_pass_links:
+ match_count += self.add_matching_links(l['link'], pass_no)
+ self.log.info('Transitive closure pass #{}: '
+ 'found {} implicit links'
+ .format(pass_no, match_count))
+ if match_count == 0:
+ break
+ pass_no += 1
+ self.log.info('done adding implicit links')
diff --git a/app/discover/link_finders/find_links.py b/app/discover/link_finders/find_links.py
index d234479..31d39e5 100644
--- a/app/discover/link_finders/find_links.py
+++ b/app/discover/link_finders/find_links.py
@@ -19,6 +19,7 @@ class FindLinks(Fetcher):
def create_link(self, env, source, source_id, target, target_id,
link_type, link_name, state, link_weight,
host=None, switch=None,
+ implicit=False,
extra_attributes=None):
if extra_attributes is None:
extra_attributes = {}
@@ -27,9 +28,11 @@ class FindLinks(Fetcher):
link = self.inv.create_link(env,
source, source_id, target, target_id,
link_type, link_name, state, link_weight,
+ implicit=implicit,
source_label=source_label,
target_label=target_label,
host=host, switch=switch,
extra_attributes=extra_attributes)
if self.inv.monitoring_setup_manager:
self.inv.monitoring_setup_manager.create_setup(link)
+ return link
diff --git a/app/discover/link_finders/find_links_for_instance_vnics.py b/app/discover/link_finders/find_links_for_instance_vnics.py
index 975ab1a..1dfb818 100644
--- a/app/discover/link_finders/find_links_for_instance_vnics.py
+++ b/app/discover/link_finders/find_links_for_instance_vnics.py
@@ -49,6 +49,8 @@ class FindLinksForInstanceVnics(FindLinks):
network_id = net['network']['id']
v['network'] = network_id
self.inv.set(v)
+ if self.inv.monitoring_setup_manager:
+ self.inv.monitoring_setup_manager.create_setup(instance)
break
state = "up" # TBD
link_weight = 0 # TBD
diff --git a/app/discover/link_finders/find_links_for_vedges.py b/app/discover/link_finders/find_links_for_vedges.py
index f9719b4..afabdbe 100644
--- a/app/discover/link_finders/find_links_for_vedges.py
+++ b/app/discover/link_finders/find_links_for_vedges.py
@@ -104,8 +104,6 @@ class FindLinksForVedges(FindLinks):
if "pnic" in vedge:
if pname != vedge["pnic"]:
return
- elif self.configuration.has_network_plugin('VPP'):
- pass
pnic = self.inv.find_items({
"environment": self.get_env(),
"type": "host_pnic",
diff --git a/app/discover/scan.py b/app/discover/scan.py
index 49f37ff..fb5e833 100755
--- a/app/discover/scan.py
+++ b/app/discover/scan.py
@@ -22,6 +22,7 @@ from discover.scan_error import ScanError
from discover.scanner import Scanner
from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
from utils.constants import EnvironmentFeatures
+from utils.origins import ScanOrigin, ScanOrigins
from utils.mongo_access import MongoAccess
from utils.exceptions import ScanArgumentsError
from utils.inventory_mgr import InventoryMgr
@@ -112,6 +113,7 @@ class ScanPlan:
class ScanController(Fetcher):
DEFAULTS = {
+ "_id": None,
"env": "",
"mongo_config": "",
"type": "",
@@ -126,7 +128,8 @@ class ScanController(Fetcher):
"cliques_only": False,
"monitoring_setup_only": False,
"clear": False,
- "clear_all": False
+ "clear_all": False,
+ "scheduled": False
}
def __init__(self):
@@ -274,9 +277,13 @@ class ScanController(Fetcher):
self.conf.use_env(env_name)
# generate ScanObject Class and instance.
+ origin = ScanOrigin(origin_id=args['_id'],
+ origin_type=ScanOrigins.SCHEDULED
+ if args["scheduled"]
+ else ScanOrigins.MANUAL)
scanner = Scanner()
scanner.log.set_loglevel(args['loglevel'])
- scanner.set_env(env_name)
+ scanner.setup(env=env_name, origin=origin)
scanner.found_errors[env_name] = False
# decide what scanning operations to do
diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py
index 6c46d47..6e31bbd 100644
--- a/app/discover/scan_manager.py
+++ b/app/discover/scan_manager.py
@@ -103,7 +103,8 @@ class ScanManager(Manager):
def _build_scan_args(self, scan_request: dict):
args = {
- 'mongo_config': self.args.mongo_config
+ 'mongo_config': self.args.mongo_config,
+ 'scheduled': True if scan_request.get('interval') else False
}
def set_arg(name_from: str, name_to: str = None):
@@ -113,6 +114,7 @@ class ScanManager(Manager):
if val:
args[name_to] = val
+ set_arg("_id")
set_arg("object_id", "id")
set_arg("log_level", "loglevel")
set_arg("environment", "env")
@@ -219,71 +221,74 @@ class ScanManager(Manager):
for interval in self.INTERVALS.keys():
self._prepare_scheduled_requests_for_interval(interval)
+ def handle_scans(self):
+ self._prepare_scheduled_requests()
+
+ # Find a pending request that is waiting the longest time
+ results = self.scans_collection \
+ .find({'status': ScanStatus.PENDING.value,
+ 'submit_timestamp': {'$ne': None}}) \
+ .sort("submit_timestamp", pymongo.ASCENDING) \
+ .limit(1)
+
+ # If no scans are pending, sleep for some time
+ if results.count() == 0:
+ time.sleep(self.interval)
+ else:
+ scan_request = results[0]
+ env = scan_request.get('environment')
+ scan_feature = EnvironmentFeatures.SCANNING
+ if not self.inv.is_feature_supported(env, scan_feature):
+ self.log.error("Scanning is not supported for env '{}'"
+ .format(scan_request.get('environment')))
+ self._fail_scan(scan_request)
+ return
+
+ scan_request['start_timestamp'] = datetime.datetime.utcnow()
+ scan_request['status'] = ScanStatus.RUNNING.value
+ self._update_document(scan_request)
+
+ # Prepare scan arguments and run the scan with them
+ try:
+ scan_args = self._build_scan_args(scan_request)
+
+ self.log.info("Starting scan for '{}' environment"
+ .format(scan_args.get('env')))
+ self.log.debug("Scan arguments: {}".format(scan_args))
+ result, message = ScanController().run(scan_args)
+ except ScanArgumentsError as e:
+ self.log.error("Scan request '{id}' "
+ "has invalid arguments. "
+ "Errors:\n{errors}"
+ .format(id=scan_request['_id'],
+ errors=e))
+ self._fail_scan(scan_request)
+ except Exception as e:
+ self.log.exception(e)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ else:
+ # Check is scan returned success
+ if not result:
+ self.log.error(message)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ return
+
+ # update the status and timestamps.
+ self.log.info("Request '{}' has been scanned. ({})"
+ .format(scan_request['_id'], message))
+ end_time = datetime.datetime.utcnow()
+ scan_request['end_timestamp'] = end_time
+ self._complete_scan(scan_request, message)
+
def do_action(self):
self._clean_up()
try:
while True:
- self._prepare_scheduled_requests()
-
- # Find a pending request that is waiting the longest time
- results = self.scans_collection \
- .find({'status': ScanStatus.PENDING.value,
- 'submit_timestamp': {'$ne': None}}) \
- .sort("submit_timestamp", pymongo.ASCENDING) \
- .limit(1)
-
- # If no scans are pending, sleep for some time
- if results.count() == 0:
- time.sleep(self.interval)
- else:
- scan_request = results[0]
- env = scan_request.get('environment')
- scan_feature = EnvironmentFeatures.SCANNING
- if not self.inv.is_feature_supported(env, scan_feature):
- self.log.error("Scanning is not supported for env '{}'"
- .format(scan_request.get('environment')))
- self._fail_scan(scan_request)
- continue
-
- scan_request['start_timestamp'] = datetime.datetime.utcnow()
- scan_request['status'] = ScanStatus.RUNNING.value
- self._update_document(scan_request)
-
- # Prepare scan arguments and run the scan with them
- try:
- scan_args = self._build_scan_args(scan_request)
-
- self.log.info("Starting scan for '{}' environment"
- .format(scan_args.get('env')))
- self.log.debug("Scan arguments: {}".format(scan_args))
- result, message = ScanController().run(scan_args)
- except ScanArgumentsError as e:
- self.log.error("Scan request '{id}' "
- "has invalid arguments. "
- "Errors:\n{errors}"
- .format(id=scan_request['_id'],
- errors=e))
- self._fail_scan(scan_request)
- except Exception as e:
- self.log.exception(e)
- self.log.error("Scan request '{}' has failed."
- .format(scan_request['_id']))
- self._fail_scan(scan_request)
- else:
- # Check is scan returned success
- if not result:
- self.log.error(message)
- self.log.error("Scan request '{}' has failed."
- .format(scan_request['_id']))
- self._fail_scan(scan_request)
- continue
-
- # update the status and timestamps.
- self.log.info("Request '{}' has been scanned. ({})"
- .format(scan_request['_id'], message))
- end_time = datetime.datetime.utcnow()
- scan_request['end_timestamp'] = end_time
- self._complete_scan(scan_request, message)
+ self.handle_scans()
finally:
self._clean_up()
diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py
index df27e18..8757f79 100644
--- a/app/discover/scan_metadata_parser.py
+++ b/app/discover/scan_metadata_parser.py
@@ -49,21 +49,28 @@ class ScanMetadataParser(MetadataParser):
self.add_error('missing or empty fetcher in scanner {} type #{}'
.format(scanner_name, str(type_index)))
elif isinstance(fetcher, str):
+ error_str = None
try:
- module_name = ClassResolver.get_module_file_by_class_name(fetcher)
+ get_module = ClassResolver.get_module_file_by_class_name
+ module_name = get_module(fetcher)
fetcher_package = module_name.split("_")[0]
if package:
fetcher_package = ".".join((package, fetcher_package))
- instance = ClassResolver.get_instance_of_class(package_name=fetcher_package,
- module_name=module_name,
- class_name=fetcher)
- except ValueError:
- instance = None
- if not instance:
+ # get the fetcher qualified class but not a class instance
+ # instances will be created just-in-time (before fetching):
+ # this avoids init of access classes not needed in some envs
+ get_class = ClassResolver.get_fully_qualified_class
+ class_qualified = get_class(fetcher, fetcher_package,
+ module_name)
+ except ValueError as e:
+ class_qualified = None
+ error_str = str(e)
+ if not class_qualified:
self.add_error('failed to find fetcher class {} in scanner {}'
- ' type #{}'
- .format(fetcher, scanner_name, type_index))
- scan_type[self.FETCHER] = instance
+ ' type #{} ({})'
+ .format(fetcher, scanner_name, type_index,
+ error_str))
+ scan_type[self.FETCHER] = class_qualified
elif isinstance(fetcher, dict):
is_folder = fetcher.get('folder', False)
if not is_folder:
@@ -81,7 +88,6 @@ class ScanMetadataParser(MetadataParser):
def validate_children_scanner(self, scanner_name: str, type_index: int,
scanners: dict, scan_type: dict):
- scanner = scanners[scanner_name]
if 'children_scanner' in scan_type:
children_scanner = scan_type.get('children_scanner')
if not isinstance(children_scanner, str):
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
index 1fbcc68..8d36baf 100644
--- a/app/discover/scanner.py
+++ b/app/discover/scanner.py
@@ -10,6 +10,7 @@
# base class for scanners
import json
+
import os
import queue
import traceback
@@ -26,6 +27,7 @@ from utils.ssh_connection import SshError
class Scanner(Fetcher):
+
config = None
environment = None
env = None
@@ -82,27 +84,42 @@ class Scanner(Fetcher):
def check_type_env(self, type_to_fetch):
# check if type is to be run in this environment
- if "environment_condition" not in type_to_fetch:
- return True
- env_cond = type_to_fetch.get("environment_condition", {})
+ basic_cond = {'environment_type': self.ENV_TYPE_OPENSTACK}
+ env_cond = type_to_fetch.get("environment_condition", {}) \
+ if "environment_condition" in type_to_fetch \
+ else basic_cond
if not env_cond:
- return True
+ env_cond = basic_cond
+ if 'environment_type' not in env_cond.keys():
+ env_cond.update(basic_cond)
if not isinstance(env_cond, dict):
- self.log.warn('illegal environment_condition given '
- 'for type {}'.format(type_to_fetch['type']))
+ self.log.warn('Illegal environment_condition given '
+ 'for type {type}'.format(type=type_to_fetch['type']))
return True
conf = self.config.get_env_config()
+ if 'environment_type' not in conf:
+ conf.update(basic_cond)
for attr, required_val in env_cond.items():
if attr == "mechanism_drivers":
if "mechanism_drivers" not in conf:
- self.log.warn('illegal environment configuration: '
+ self.log.warn('Illegal environment configuration: '
'missing mechanism_drivers')
return False
if not isinstance(required_val, list):
required_val = [required_val]
- return bool(set(required_val) & set(conf["mechanism_drivers"]))
- elif attr not in conf or conf[attr] != required_val:
+ value_ok = bool(set(required_val) &
+ set(conf["mechanism_drivers"]))
+ if not value_ok:
+ return False
+ elif attr not in conf:
return False
+ else:
+ if isinstance(required_val, list):
+ if conf[attr] not in required_val:
+ return False
+ else:
+ if conf[attr] != required_val:
+ return False
# no check failed
return True
@@ -120,18 +137,23 @@ class Scanner(Fetcher):
# get Fetcher instance
fetcher = type_to_fetch["fetcher"]
- fetcher.set_env(self.get_env())
+ if not isinstance(fetcher, Fetcher):
+ type_to_fetch['fetcher'] = fetcher() # make it an instance
+ fetcher = type_to_fetch["fetcher"]
+ fetcher.setup(env=self.get_env(), origin=self.origin)
# get children_scanner instance
children_scanner = type_to_fetch.get("children_scanner")
escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
self.log.info(
- "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
- type_to_fetch["type"],
- parent.get('type', 'environment'),
- parent.get('name', ''),
- escaped_id)
+ "Scanning: type={type}, "
+ "parent: (type={parent_type}, "
+ "name={parent_name}, "
+ "id={parent_id})".format(type=type_to_fetch["type"],
+ parent_type=parent.get('type', 'environment'),
+ parent_name=parent.get('name', ''),
+ parent_id=escaped_id))
# fetch OpenStack data from environment by CLI, API or MySQL
# or physical devices data from ACI API
@@ -142,18 +164,21 @@ class Scanner(Fetcher):
self.found_errors[self.get_env()] = True
return []
except Exception as e:
- self.log.error("Error while scanning : " +
- "fetcher=%s, " +
- "type=%s, " +
- "parent: (type=%s, name=%s, id=%s), " +
- "error: %s",
- fetcher.__class__.__name__,
- type_to_fetch["type"],
- "environment" if "type" not in parent
- else parent["type"],
- "" if "name" not in parent else parent["name"],
- escaped_id,
- e)
+ self.log.error(
+ "Error while scanning: fetcher={fetcher}, type={type}, "
+ "parent: (type={parent_type}, name={parent_name}, "
+ "id={parent_id}), "
+ "error: {error}".format(fetcher=fetcher.__class__.__name__,
+ type=type_to_fetch["type"],
+ parent_type="environment"
+ if "type" not in parent
+ else parent["type"],
+ parent_name=""
+ if "name" not in parent
+ else parent["name"],
+ parent_id=escaped_id,
+ error=e))
+
traceback.print_exc()
raise ScanError(str(e))
@@ -220,14 +245,16 @@ class Scanner(Fetcher):
self.log.info("Scan complete")
def scan_links(self):
- self.log.info("scanning for links")
+ self.log.info("Scanning for links")
for fetcher in self.link_finders:
- fetcher.set_env(self.get_env())
+ fetcher.setup(env=self.get_env(),
+ origin=self.origin)
fetcher.add_links()
def scan_cliques(self):
clique_scanner = CliqueFinder()
- clique_scanner.set_env(self.get_env())
+ clique_scanner.setup(env=self.get_env(),
+ origin=self.origin)
clique_scanner.find_cliques()
def deploy_monitoring_setup(self):
@@ -254,7 +281,6 @@ class Scanner(Fetcher):
def load_link_finders_metadata(self):
parser = FindLinksMetadataParser()
- conf = self.config.get_env_config()
finders_file = os.path.join(self.get_run_app_path(),
'config',
FindLinksMetadataParser.FINDERS_FILE)
diff --git a/app/install/calipso-installer.py b/app/install/calipso-installer.py
index 84b10da..008ff38 100644
--- a/app/install/calipso-installer.py
+++ b/app/install/calipso-installer.py
@@ -1,6 +1,6 @@
###############################################################################
-# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
-# and others #
+# Copyright (c) 2017-2018 Koren Lev (Cisco Systems), #
+# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
@@ -16,16 +16,20 @@ import dockerpycreds
# note : not used, useful for docker api security if used
import time
import json
-
+import socket
+# by default, we want to use the docker0 interface ip address for inter-contatiner communications,
+# if hostname argument will not be provided as argument for the calipso-installer
+import os
+import errno
+dockerip = os.popen('ip addr show docker0 | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'')
+local_hostname = dockerip.read().replace("\n", "")
C_MONGO_CONFIG = "/local_dir/calipso_mongo_access.conf"
-H_MONGO_CONFIG = "/home/calipso/calipso_mongo_access.conf"
+H_MONGO_CONFIG = "calipso_mongo_access.conf"
PYTHONPATH = "/home/scan/calipso_prod/app"
C_LDAP_CONFIG = "/local_dir/ldap.conf"
-H_LDAP_CONFIG = "/home/calipso/ldap.conf"
-
+H_LDAP_CONFIG = "ldap.conf"
-calipso_volume = {'/home/calipso': {'bind': '/local_dir', 'mode': 'rw'}}
RESTART_POLICY = {"Name": "always"}
# environment variables definitions
@@ -161,7 +165,8 @@ def start_mongo(dbport, copy):
copy_file("clique_types")
copy_file("cliques")
copy_file("constants")
- copy_file("environments_config")
+ copy_file("environments_config"),
+ copy_file("environment_options"),
copy_file("inventory")
copy_file("link_types")
copy_file("links")
@@ -255,12 +260,12 @@ def start_scan():
volumes=calipso_volume)
-def start_sensu(uchiwaport, sensuport, rabbitport, rabbitmport):
- name = "calipso-sensu"
+def start_monitor(uchiwaport, sensuport, rabbitport, rabbitmport):
+ name = "calipso-monitor"
if container_started(name):
return
print("\nstarting container {}...\n".format(name))
- image_name = "korenlev/calipso:sensu"
+ image_name = "korenlev/calipso:monitor"
download_image(image_name)
sensu_ports = {'22/tcp': 20022, '3000/tcp': uchiwaport, '4567/tcp': sensuport,
'5671/tcp': rabbitport, '15672/tcp': rabbitmport}
@@ -283,7 +288,7 @@ def start_ui(host, dbuser, dbpassword, webport, dbport):
root_url = "ROOT_URL=http://{}:{}".format(host, str(webport))
mongo_url = "MONGO_URL=mongodb://{}:{}@{}:{}/calipso" \
.format(dbuser, dbpassword, host, str(dbport))
- ports = {'3000/tcp': webport}
+ ports = {'4000/tcp': webport}
DockerClient.containers.run(image_name,
detach=True,
name=name,
@@ -292,6 +297,22 @@ def start_ui(host, dbuser, dbpassword, webport, dbport):
environment=[root_url, mongo_url, LDAP_CONFIG])
+def start_test():
+ name = "calipso-test"
+ if container_started(name):
+ return
+ print("\nstarting container {}...\n".format(name))
+ image_name = "korenlev/calipso:test"
+ download_image(image_name)
+ ports = {'22/tcp': 10022}
+ DockerClient.containers.run(image_name,
+ detach=True,
+ name=name,
+ ports=ports,
+ restart_policy=RESTART_POLICY,
+ environment=[PYTHON_PATH, MONGO_CONFIG],
+ volumes=calipso_volume)
+
# check and stop a calipso container by given name
def container_stop(container_name):
if not container_started(container_name, print_message=False):
@@ -312,10 +333,10 @@ def container_stop(container_name):
# parser for getting optional command arguments:
parser = argparse.ArgumentParser()
parser.add_argument("--hostname",
- help="Hostname or IP address of the server "
- "(default=172.17.0.1)",
+ help="FQDN (ex:mysrv.cisco.com) or IP address of the Server"
+ "(default=docker0 interface ip address)",
type=str,
- default="172.17.0.1",
+ default=local_hostname,
required=False)
parser.add_argument("--webport",
help="Port for the Calipso WebUI "
@@ -371,6 +392,12 @@ parser.add_argument("--dbpassword",
type=str,
default="calipso_default",
required=False)
+parser.add_argument("--home",
+ help="Home directory for configuration files "
+ "(default=/home/calipso)",
+ type=str,
+ default="/home/calipso",
+ required=False)
parser.add_argument("--command",
help="'start-all' or 'stop-all' the Calipso containers "
"(default=None)",
@@ -383,7 +410,11 @@ parser.add_argument("--copy",
type=str,
default=None,
required=False)
+
args = parser.parse_args()
+calipso_volume = {args.home: {'bind': '/local_dir', 'mode': 'rw'}}
+
+print("\nrunning installer against host:", args.hostname, "\n")
if args.command == "start-all":
container = "all"
@@ -395,8 +426,8 @@ else:
container = ""
action = ""
-container_names = ["calipso-ui", "calipso-scan", "calipso-listen",
- "calipso-ldap", "calipso-api", "calipso-sensu", "calipso-mongo"]
+container_names = ["calipso-ui", "calipso-scan", "calipso-test", "calipso-listen",
+ "calipso-ldap", "calipso-api", "calipso-monitor", "calipso-mongo"]
container_actions = ["stop", "start"]
while action not in container_actions:
action = input("Action? (stop, start, or 'q' to quit):\n")
@@ -408,10 +439,16 @@ while container != "all" and container not in container_names:
if container == "q":
exit()
+# create local directory on host, raise error if it doesn't exists
+try:
+ os.makedirs(os.path.join(args.home, 'log/calipso'))
+except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
# starting the containers per arguments:
if action == "start":
- # building /home/calipso/calipso_mongo_access.conf and
- # /home/calipso/ldap.conf files, per the arguments:
+ # building mongo.conf and ldap.conf files, per the arguments:
calipso_mongo_access_text = \
"server {}\n" \
"user {}\n" \
@@ -434,13 +471,15 @@ if action == "start":
"group_member_attribute member"
ldap_text = ldap_text.format(LDAP_PWD_ATTRIBUTE, args.hostname,
LDAP_USER_PWD_ATTRIBUTE)
- print("creating default", H_MONGO_CONFIG, "file...\n")
- calipso_mongo_access_file = open(H_MONGO_CONFIG, "w+")
+ mongo_file_path = os.path.join(args.home, H_MONGO_CONFIG)
+ print("creating default", mongo_file_path, "file...\n")
+ calipso_mongo_access_file = open(mongo_file_path, "w+")
time.sleep(1)
calipso_mongo_access_file.write(calipso_mongo_access_text)
calipso_mongo_access_file.close()
- print("creating default", H_LDAP_CONFIG, "file...\n")
- ldap_file = open(H_LDAP_CONFIG, "w+")
+ ldap_file_path = os.path.join(args.home, H_LDAP_CONFIG)
+ print("creating default", ldap_file_path, "file...\n")
+ ldap_file = open(ldap_file_path, "w+")
time.sleep(1)
ldap_file.write(ldap_text)
ldap_file.close()
@@ -460,8 +499,11 @@ if action == "start":
if container == "calipso-scan" or container == "all":
start_scan()
time.sleep(1)
- if container == "calipso-sensu" or container == "all":
- start_sensu(args.uchiwaport, args.sensuport, args.rabbitport, args.rabbitmport)
+ if container == "calipso-test" or container == "all":
+ start_test()
+ time.sleep(1)
+ if container == "calipso-monitor" or container == "all":
+ start_monitor(args.uchiwaport, args.sensuport, args.rabbitport, args.rabbitmport)
time.sleep(1)
if container == "calipso-ui" or container == "all":
start_ui(args.hostname, args.dbuser, args.dbpassword, args.webport,
diff --git a/app/install/db/clique_types.json b/app/install/db/clique_types.json
index 77e2d7d..624de70 100644
--- a/app/install/db/clique_types.json
+++ b/app/install/db/clique_types.json
@@ -26,6 +26,23 @@
],
"name" : "vservice"
},
+{
+ "environment" : "config_based_example",
+ "focal_point_type" : "vservice",
+ "link_types" : [
+ "vservice-vnic",
+ "vnic-vedge",
+ "vedge-otep",
+ "otep-vconnector",
+ "vconnector-host_pnic",
+ "host_pnic-network"
+ ],
+ "name" : "vservice_config_based",
+ "distribution" : "Mirantis",
+ "distribution_version" : "6.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+},
{
"environment" : "ANY",
"focal_point_type" : "network",
@@ -135,5 +152,14 @@
"vnic-vservice"
],
"name" : "network"
+},
+{
+ "name" : "instance",
+ "use_implicit_links" : true,
+ "link_types" : [
+ "instance-network"
+ ],
+ "environment" : "implicit-links-ex",
+ "focal_point_type" : "instance"
}
]
diff --git a/app/install/db/constants.json b/app/install/db/constants.json
index 6912eeb..8ea89e9 100644
--- a/app/install/db/constants.json
+++ b/app/install/db/constants.json
@@ -58,6 +58,27 @@
],
"name" : "log_levels"
},
+{
+ "data" : [
+ {
+ "label" : "OpenStack",
+ "value" : "OpenStack"
+ },
+ {
+ "label" : "Kubernetes",
+ "value" : "Kubernetes"
+ },
+ {
+ "label" : "VMware",
+ "value" : "VMware"
+ },
+ {
+ "label" : "Bare-metal",
+ "value" : "Bare-metal"
+ }
+ ],
+ "name" : "environment_types"
+},
{
"data" : [
{
@@ -530,6 +551,10 @@
"label" : "10239"
},
{
+ "label" : "10307",
+ "value" : "10307"
+ },
+ {
"value" : "10918",
"label" : "10918"
},
@@ -727,6 +752,14 @@
{
"label" : "switch",
"value" : "switch"
+ },
+ {
+ "value" : "namespace",
+ "label" : "namespace"
+ },
+ {
+ "value" : "namespaces_folder",
+ "label" : "namespaces_folder"
}
]
},
diff --git a/app/install/db/environments_config.json b/app/install/db/environments_config.json
index d7157e7..80bc6aa 100644
--- a/app/install/db/environments_config.json
+++ b/app/install/db/environments_config.json
@@ -37,7 +37,8 @@
"server_name" : "sensu_server",
"env_type" : "production",
"provision" : "None",
- "name" : "Monitoring",
+ "name" : "Monitoring",
+ "install_monitoring_client": false,
"ssh_port" : "20022",
"rabbitmq_pass" : "dummy_pwd",
"ssh_password" : "dummy_pwd",
@@ -55,7 +56,7 @@
}
],
"enable_monitoring" : true,
- "name" : "DEMO-ENVIRONMENT-SCHEME",
+ "name" : "DEMO-OpenStack",
"distribution" : "Mirantis",
"distribution_version" : "8.0",
"last_scanned" : "filled-by-scanning",
@@ -74,6 +75,93 @@
"wNLeBJxNDyw8G7Ssg"
]
},
- "type" : "environment"
+ "type" : "environment",
+ "environment_type" : "OpenStack"
+},
+{
+ "user" : "wNLeBJxNDyw8G7Ssg",
+ "name" : "DEMO-Kubernetes",
+ "last_scanned" : "filled-by-scanning",
+ "auth" : {
+ "view-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ],
+ "edit-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ]
+ },
+ "type_drivers" : "vxlan",
+ "distribution_version" : "8.0",
+ "enable_monitoring" : true,
+ "operational" : "stopped",
+ "mechanism_drivers" : [
+ "OVS"
+ ],
+ "type" : "environment",
+ "distribution" : "Mirantis",
+ "listen" : true,
+ "configuration" : [
+ {
+ "user" : "adminuser",
+ "name" : "OpenStack",
+ "pwd" : "dummy_pwd",
+ "host" : "10.0.0.1",
+ "admin_token" : "dummy_token",
+ "port" : "5000"
+ },
+ {
+ "host" : "10.56.20.78",
+ "name" : "Kubernetes",
+ "user" : "koren",
+ "token" : "baba-token-xyz",
+ "port" : "6443"
+ },
+ {
+ "user" : "mysqluser",
+ "name" : "mysql",
+ "pwd" : "dummy_pwd",
+ "port" : "3307",
+ "host" : "10.0.0.1"
+ },
+ {
+ "user" : "sshuser",
+ "name" : "CLI",
+ "pwd" : "dummy_pwd",
+ "host" : "10.0.0.1"
+ },
+ {
+ "user" : "rabbitmquser",
+ "name" : "AMQP",
+ "pwd" : "dummy_pwd",
+ "port" : "5673",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "Monitoring",
+ "install_monitoring_client": false,
+ "api_port" : 4567,
+ "ssh_port" : "20022",
+ "rabbitmq_pass" : "dummy_pwd",
+ "env_type" : "production",
+ "rabbitmq_port" : "5671",
+ "server_ip" : "10.0.0.1",
+ "config_folder" : "/local_dir/sensu_config",
+ "type" : "Sensu",
+ "provision" : "None",
+ "ssh_user" : "root",
+ "ssh_password" : "dummy_pwd",
+ "rabbitmq_user" : "sensu",
+ "server_name" : "sensu_server"
+ },
+ {
+ "user" : "admin",
+ "name" : "ACI",
+ "pwd" : "dummy_pwd",
+ "host" : "10.1.1.104"
+ }
+ ],
+ "app_path" : "/home/scan/calipso_prod/app",
+ "scanned" : false,
+ "environment_type" : "Kubernetes"
}
]
diff --git a/app/install/db/monitoring_config_templates.json b/app/install/db/monitoring_config_templates.json
index 9bddfa2..e0d59e7 100644
--- a/app/install/db/monitoring_config_templates.json
+++ b/app/install/db/monitoring_config_templates.json
@@ -311,6 +311,35 @@
"type" : "client_check_link_vnic-vconnector.json"
},
{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS",
+ "Flannel"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vconnector.py {name}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vconnector_ovs.json"
+},
+{
"side" : "client",
"order" : "1",
"condition" : {
@@ -394,5 +423,28 @@
},
"monitoring_system" : "sensu",
"type" : "client_check_vservice.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "standalone" : true,
+ "interval" : 15,
+ "command" : "PYTHONPATH=/etc/sensu/plugins check_instance_communications.py {services_and_vnics}",
+ "handlers" : [
+ "file",
+ "osdna-monitor"
+ ],
+ "type" : "metric",
+ "subscribers" : [
+ "base"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_instance.json"
}
]
diff --git a/app/install/db/supported_environments.json b/app/install/db/supported_environments.json
index c2c376b..baa3150 100644
--- a/app/install/db/supported_environments.json
+++ b/app/install/db/supported_environments.json
@@ -12,6 +12,21 @@
"monitoring" : true
}
},
+ {
+ "environment" : {
+ "distribution_version" : [
+ "10307"
+ ],
+ "distribution" : "Mercury",
+ "type_drivers" : "vlan",
+ "mechanism_drivers" : "OVS"
+ },
+ "features" : {
+ "scanning" : true,
+ "monitoring" : false,
+ "listening" : true
+ }
+ },
{
"environment" : {
"distribution" : "Devstack",
diff --git a/app/messages/message.py b/app/messages/message.py
index e940054..dd4c1d3 100644
--- a/app/messages/message.py
+++ b/app/messages/message.py
@@ -8,9 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import datetime
-from typing import Union
-from bson import ObjectId
+from utils.util import merge_dicts
class Message:
@@ -23,13 +22,14 @@ class Message:
msg: dict,
source: str,
env: str = None,
- object_id: Union[str, ObjectId] = None,
- display_context: Union[str, ObjectId] = None,
+ object_id=None,
+ display_context=None,
level: str = DEFAULT_LEVEL,
object_type: str = None,
ts: datetime = None,
received_ts: datetime = None,
- finished_ts: datetime = None):
+ finished_ts: datetime = None,
+ **kwargs):
super().__init__()
if level and level.lower() in self.LEVELS:
@@ -48,19 +48,24 @@ class Message:
self.received_timestamp = received_ts
self.finished_timestamp = finished_ts
self.viewed = False
+ self.extra = kwargs
def get(self):
- return {
- "id": self.id,
- "environment": self.environment,
- "source_system": self.source_system,
- "related_object": self.related_object,
- "related_object_type": self.related_object_type,
- "display_context": self.display_context,
- "level": self.level,
- "message": self.message,
- "timestamp": self.timestamp,
- "received_timestamp": self.received_timestamp,
- "finished_timestamp": self.finished_timestamp,
- "viewed": self.viewed
- }
+ return merge_dicts(
+ self.extra,
+ {
+ "id": self.id,
+ "environment": self.environment,
+ "source_system": self.source_system,
+ "related_object": self.related_object,
+ "related_object_type": self.related_object_type,
+ "display_context": self.display_context,
+ "level": self.level,
+ "message": self.message,
+ "timestamp": self.timestamp,
+ "received_timestamp": self.received_timestamp,
+ "finished_timestamp": self.finished_timestamp,
+ "viewed": self.viewed
+ }
+ )
+
diff --git a/app/monitoring/checks/check_instance_communictions.py b/app/monitoring/checks/check_instance_communictions.py
new file mode 100644
index 0000000..4ce5165
--- /dev/null
+++ b/app/monitoring/checks/check_instance_communictions.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# find status of instance network
+# For each instance vNIC - take the MAC address
+# For each vService in the same network as the instance,
+# use local_service_id attribute in the following command in the network node:
+# "ip netns exec <local_service_id> arp -n"
+# look for the instance vNIC's mac_address to appear in the response
+# for each mac_address:
+# - if Flag 'C' = 'Complete' - mark result OK for that instance,
+# - 'I' = 'Incomplete' - mark as 'warn',
+# - no mac_address mark as 'error'
+
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+arp_headers = ['Address', 'HWtype', 'HWaddress', 'Flags', 'Mask', 'Iface']
+arp_mac_pos = arp_headers.index('HWaddress')
+arp_flags_pos = arp_headers.index('Flags')
+
+
+def check_vnic_tuple(vnic_and_service):
+ tuple_parts = vnic_and_service.split(',')
+ local_service_id = tuple_parts[0]
+ mac_address = tuple_parts[1]
+ check_output = None
+ try:
+ netns_cmd = 'ip netns exec {} arp -n'.format(local_service_id)
+ check_output = 'MAC={}, local_service_id={}\n'\
+ .format(mac_address, local_service_id)
+ netns_out = subprocess.check_output([netns_cmd],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ netns_out = binary2str(netns_out)
+ check_output += '{}\n'.format(netns_out)
+ netns_lines = netns_out.splitlines()
+ if not netns_lines or \
+ netns_lines[0].endswith('No such file or directory'):
+ check_rc = 2
+ else:
+ mac_found = False
+ flags = None
+ for l in netns_lines:
+ line_parts = l.split()
+ line_mac = line_parts[arp_mac_pos]
+ if len(line_parts) > arp_mac_pos and line_mac == mac_address:
+ mac_found = True
+ flags = line_parts[arp_flags_pos]
+ break
+ if mac_found:
+ check_rc = 1 if flags == 'I' else 0
+ else:
+ check_rc = 2
+ except subprocess.CalledProcessError as e:
+ check_output = str(e)
+ check_rc = 2
+ return check_rc, check_output
+
+
+if len(sys.argv) < 2:
+ print('usage: ' + sys.argv[0] +
+ ' <vService local_service_id>,<MAC>[;<>,<>]...')
+ exit(1)
+
+rc = 0
+output = ''
+vnics = str(sys.argv[1]).split(';')
+for vnic_tuple in vnics:
+ tuple_ret, out = check_vnic_tuple(vnic_tuple)
+ rc = min(rc, tuple_ret)
+ output += out
+print(output)
+exit(rc)
diff --git a/app/monitoring/checks/check_ping.py b/app/monitoring/checks/check_ping.py
index 35e7234..fbf1304 100755
--- a/app/monitoring/checks/check_ping.py
+++ b/app/monitoring/checks/check_ping.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_pnic_ovs.py b/app/monitoring/checks/check_pnic_ovs.py
index c26e42f..7cfa699 100755
--- a/app/monitoring/checks/check_pnic_ovs.py
+++ b/app/monitoring/checks/check_pnic_ovs.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
@@ -15,7 +15,7 @@ import subprocess
from binary_converter import binary2str
-def nic_not_found(name: str, output: str):
+def nic_not_found(name, output):
print("Error finding NIC {}{}{}\n".format(name, ': ' if output else '',
output))
return 2
diff --git a/app/monitoring/checks/check_pnic_vpp.py b/app/monitoring/checks/check_pnic_vpp.py
index 942fdc2..3db4e49 100755
--- a/app/monitoring/checks/check_pnic_vpp.py
+++ b/app/monitoring/checks/check_pnic_vpp.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_vconnector.py b/app/monitoring/checks/check_vconnector.py
new file mode 100644
index 0000000..961d7ad
--- /dev/null
+++ b/app/monitoring/checks/check_vconnector.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# find status of vconnector
+# vconnector object name defines name of bridge
+# use "brctl showmacs <bridge>", return ERROR if 'No such device' is returned
+
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 2:
+ print('usage: ' + sys.argv[0] + ' <bridge>')
+ exit(1)
+bridge_name = str(sys.argv[1])
+
+rc = 0
+
+cmd = None
+out = ''
+try:
+ cmd = "brctl showmacs {}".format(bridge_name)
+ out = subprocess.check_output([cmd],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ if not lines or lines[0].endswith('No such device'):
+ rc = 2
+ else:
+ print(out)
+except subprocess.CalledProcessError as e:
+ rc = 2
+ out = str(e)
+
+if rc != 0:
+ print('Failed to find vConnector {}:\n{}\n'
+ .format(bridge_name, out))
+
+exit(rc)
diff --git a/app/monitoring/checks/check_vedge_ovs.py b/app/monitoring/checks/check_vedge_ovs.py
index 849af66..33d3f71 100755
--- a/app/monitoring/checks/check_vedge_ovs.py
+++ b/app/monitoring/checks/check_vedge_ovs.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_vedge_vpp.py b/app/monitoring/checks/check_vedge_vpp.py
index 346feae..94d5977 100755
--- a/app/monitoring/checks/check_vedge_vpp.py
+++ b/app/monitoring/checks/check_vedge_vpp.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_vnic_vconnector.py b/app/monitoring/checks/check_vnic_vconnector.py
index 76efd0b..fc8721f 100755
--- a/app/monitoring/checks/check_vnic_vconnector.py
+++ b/app/monitoring/checks/check_vnic_vconnector.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_vnic_vpp.py b/app/monitoring/checks/check_vnic_vpp.py
index 0f77ddd..22cc31d 100755
--- a/app/monitoring/checks/check_vnic_vpp.py
+++ b/app/monitoring/checks/check_vnic_vpp.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/checks/check_vservice.py b/app/monitoring/checks/check_vservice.py
index a95a46a..2a30a53 100644
--- a/app/monitoring/checks/check_vservice.py
+++ b/app/monitoring/checks/check_vservice.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
# and others #
diff --git a/app/monitoring/handlers/handle_otep.py b/app/monitoring/handlers/handle_otep.py
index 0189625..b19baab 100644
--- a/app/monitoring/handlers/handle_otep.py
+++ b/app/monitoring/handlers/handle_otep.py
@@ -29,7 +29,7 @@ class HandleOtep(MonitoringCheckHandler):
self.log.error('Port not found: ' + port_id)
return 1
status = check_result['status']
- port['status'] = self.STATUS_LABEL[status]
+ port['status'] = self.get_label_for_status(status)
port['status_value'] = status
port['status_text'] = check_result['output']
diff --git a/app/monitoring/handlers/handle_vconnector.py b/app/monitoring/handlers/handle_vconnector.py
new file mode 100644
index 0000000..85ee05f
--- /dev/null
+++ b/app/monitoring/handlers/handle_vconnector.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# handle monitoring event for pNIC objects
+
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
+from utils.special_char_converter import SpecialCharConverter
+
+
+class HandleVconnector(MonitoringCheckHandler):
+
+ def handle(self, obj_id, check_result):
+ object_id = obj_id[:obj_id.index('-')]
+ mac = obj_id[obj_id.index('-')+1:]
+ converter = SpecialCharConverter()
+ mac_address = converter.decode_special_characters(mac)
+ object_id += '-' + mac_address
+ doc = self.doc_by_id(object_id)
+ if not doc:
+ return 1
+ self.keep_result(doc, check_result)
+ return check_result['status']
diff --git a/app/monitoring/handlers/monitor.py b/app/monitoring/handlers/monitor.py
index 9caed74..2495110 100755
--- a/app/monitoring/handlers/monitor.py
+++ b/app/monitoring/handlers/monitor.py
@@ -12,11 +12,15 @@
# handle monitoring events
import argparse
+import datetime
import json
import sys
+from discover.configuration import Configuration
+from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler
from utils.inventory_mgr import InventoryMgr
from utils.mongo_access import MongoAccess
+from utils.special_char_converter import SpecialCharConverter
from utils.util import ClassResolver
@@ -32,7 +36,9 @@ class Monitor:
MongoAccess.set_config_file(self.args.mongo_config)
self.inv = InventoryMgr()
self.inv.set_collections(self.args.inventory)
+ self.configuration = Configuration()
self.input_text = None
+ self.converter = SpecialCharConverter()
def get_args(self):
parser = argparse.ArgumentParser()
@@ -125,13 +131,83 @@ class Monitor:
return handler
def get_handler(self, check_type, obj_type):
- basic_handling_types = ['vedge', 'vservice']
+ basic_handling_types = ['instance', 'vedge', 'vservice', 'vconnector']
if obj_type not in basic_handling_types:
return self.get_handler_by_type(check_type, obj_type)
from monitoring.handlers.basic_check_handler \
import BasicCheckHandler
return BasicCheckHandler(self.args)
+ def check_link_interdependency_for(self,
+ object_id: str,
+ from_type: str=None,
+ to_type: str=None):
+ if from_type is not None and to_type is not None or \
+ from_type is None and to_type is None:
+ raise ValueError('check_link_interdependency: '
+ 'supply one of from_type/to_type')
+ obj_id = self.converter.decode_special_characters(object_id)
+ obj = self.inv.get_by_id(environment=self.args.env, item_id=obj_id)
+ if not obj:
+ self.inv.log.error('check_link_interdependency: '
+ 'failed to find object with ID: {}'
+ .format(object_id))
+ return
+ if 'status' not in obj:
+ return
+ id_attr = 'source_id' if from_type is None else 'target_id'
+ link_type = '{}-{}'.format(
+ from_type if from_type is not None else obj['type'],
+ to_type if to_type is not None else obj['type'])
+ condition = {
+ 'environment': self.args.env,
+ 'link_type': link_type,
+ id_attr: obj_id
+ }
+ link = self.inv.find_one(search=condition, collection='links')
+ if not link:
+ self.inv.log.error('check_link_interdependency: '
+ 'failed to find {} link with {}: {}'
+ .format(link_type, id_attr, obj_id))
+ return
+ other_id_attr = '{}_id' \
+ .format('source' if from_type is not None else 'target')
+ other_obj = self.inv.get_by_id(environment=self.args.env,
+ item_id=link[other_id_attr])
+ if not other_obj:
+ self.inv.log.error('check_link_interdependency: '
+ 'failed to find {} with ID: {} (link type: {})'
+ .format(other_id_attr, link[other_id_attr],
+ link_type))
+ return
+ if 'status' not in other_obj:
+ return
+ status = 'Warning'
+ if obj['status'] == 'OK' and other_obj['status'] == 'OK':
+ status = 'OK'
+ elif obj['status'] == 'OK' and other_obj['status'] == 'OK':
+ status = 'OK'
+ link['status'] = status
+ time_format = MonitoringCheckHandler.TIME_FORMAT
+ timestamp1 = obj['status_timestamp']
+ t1 = datetime.datetime.strptime(timestamp1, time_format)
+ timestamp2 = other_obj['status_timestamp']
+ t2 = datetime.datetime.strptime(timestamp2, time_format)
+ timestamp = max(t1, t2)
+ link['status_timestamp'] = datetime.datetime.strftime(timestamp,
+ time_format)
+ self.inv.set(link, self.inv.collections['links'])
+
+ def check_link_interdependency(self, object_id: str, object_type: str):
+ conf = self.configuration.get_env_config()
+ if 'OVS' in conf['mechanism_drivers']:
+ if object_type == 'vedge':
+ self.check_link_interdependency_for(object_id,
+ to_type='host_pnic')
+ if object_type == 'host_pnic':
+ self.check_link_interdependency_for(object_id,
+ from_type='vedge')
+
def process_input(self):
check_result_full = json.loads(self.input_text)
check_client = check_result_full['client']
@@ -142,14 +218,19 @@ class Monitor:
monitor.find_object_type_and_id(name)
if 'environment' in check_client:
self.args.env = check_client['environment']
+ else:
+ raise ValueError('Check client should contain environment name')
+ self.configuration.use_env(self.args.env)
check_handler = self.get_handler(check_type, object_type)
if check_handler:
check_handler.handle(object_id, check_result)
+ self.check_link_interdependency(object_id, object_type)
def process_check_result(self):
self.read_input()
self.process_input()
+
monitor = Monitor()
monitor.process_check_result()
diff --git a/app/monitoring/handlers/monitoring_check_handler.py b/app/monitoring/handlers/monitoring_check_handler.py
index 1436a46..4902c3c 100644
--- a/app/monitoring/handlers/monitoring_check_handler.py
+++ b/app/monitoring/handlers/monitoring_check_handler.py
@@ -19,15 +19,14 @@ from messages.message import Message
from utils.inventory_mgr import InventoryMgr
from utils.logging.full_logger import FullLogger
from utils.special_char_converter import SpecialCharConverter
-from utils.string_utils import stringify_datetime
-TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
SOURCE_SYSTEM = 'Sensu'
ERROR_LEVEL = ['info', 'warn', 'error']
class MonitoringCheckHandler(SpecialCharConverter):
- STATUS_LABEL = ['OK', 'Warning', 'Error']
+ status_labels = {}
+ TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
def __init__(self, args):
super().__init__()
@@ -39,9 +38,30 @@ class MonitoringCheckHandler(SpecialCharConverter):
self.inv = InventoryMgr()
self.inv.log.set_loglevel(args.loglevel)
self.inv.set_collections(args.inventory)
+ self.status_labels = self.get_status_labels()
except FileNotFoundError:
sys.exit(1)
+ def get_status_labels(self):
+ statuses_name_search = {'name': 'monitoring_check_statuses'}
+ labels_data = self.inv.find_one(search=statuses_name_search,
+ collection='constants')
+ if not isinstance(labels_data, dict) or 'data' not in labels_data:
+ return ''
+ labels = {}
+ for status_data in labels_data['data']:
+ if not isinstance(status_data, dict):
+ continue
+ val = int(status_data['value'])
+ label = status_data['label']
+ labels[val] = label
+ return labels
+
+ def get_label_for_status(self, status: int) -> str:
+ if status not in self.status_labels.keys():
+ return ''
+ return self.status_labels.get(status, '')
+
def doc_by_id(self, object_id):
doc = self.inv.get_by_id(self.env, object_id)
if not doc:
@@ -57,11 +77,14 @@ class MonitoringCheckHandler(SpecialCharConverter):
return doc
def set_doc_status(self, doc, status, status_text, timestamp):
- doc['status'] = self.STATUS_LABEL[status] if isinstance(status, int) \
+ doc['status_value'] = status if isinstance(status, int) \
+ else status
+ doc['status'] = self.get_label_for_status(status) \
+ if isinstance(status, int) \
else status
if status_text:
doc['status_text'] = status_text
- doc['status_timestamp'] = strftime(TIME_FORMAT, timestamp)
+ doc['status_timestamp'] = strftime(self.TIME_FORMAT, timestamp)
if 'link_type' in doc:
self.inv.write_link(doc)
else:
@@ -83,7 +106,8 @@ class MonitoringCheckHandler(SpecialCharConverter):
obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
if is_link \
else doc['id']
- obj_type = 'link_{}'.format(doc['link_type']) if is_link else doc['type']
+ obj_type = 'link_{}'.format(doc['link_type']) if is_link else \
+ doc['type']
display_context = obj_id if is_link \
else doc['network_id'] if doc['type'] == 'port' else doc['id']
level = error_level if error_level\
diff --git a/app/monitoring/setup/monitoring_check_handler.py b/app/monitoring/setup/monitoring_check_handler.py
index c453439..d1b863d 100644
--- a/app/monitoring/setup/monitoring_check_handler.py
+++ b/app/monitoring/setup/monitoring_check_handler.py
@@ -8,7 +8,6 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from monitoring.setup.monitoring_handler import MonitoringHandler
-from utils.inventory_mgr import InventoryMgr
from utils.special_char_converter import SpecialCharConverter
@@ -28,14 +27,13 @@ class MonitoringCheckHandler(MonitoringHandler, SpecialCharConverter):
type_str = values['check_type'] if 'check_type' in values else \
(o['type'] if 'type' in o else 'link_' + o['link_type'])
file_type = 'client_check_' + type_str + '.json'
- host = o['host']
+ host = values['host'] if 'host' in values else o['host']
sub_dir = '/host/' + host
content = self.prepare_config_file(
file_type,
{'side': 'client', 'type': file_type})
# need to put this content inside client.json file
client_file = 'client.json'
- host = o['host']
client_file_content = self.get_config_from_db(host, client_file)
# merge checks attribute from current content into client.json
checks = client_file_content['config']['checks'] \
@@ -53,3 +51,14 @@ class MonitoringCheckHandler(MonitoringHandler, SpecialCharConverter):
}
content = client_file_content
self.write_config_file(client_file, sub_dir, host, content)
+
+ def get_check_from_db(self, o, postfix=''):
+ client_config = self.get_config_from_db(o. get('host', ''),
+ 'client.json')
+ if not client_config:
+ return {}
+ checks = client_config.get('config', {}).get('checks', {})
+ objid = self.encode_special_characters(o.get('id', ''))
+ object_check_id = '{}_{}{}'.format(o.get('type'), objid, postfix)
+ check = checks.get(object_check_id, {})
+ return check
diff --git a/app/monitoring/setup/monitoring_handler.py b/app/monitoring/setup/monitoring_handler.py
index 903b8d8..0eeb668 100644
--- a/app/monitoring/setup/monitoring_handler.py
+++ b/app/monitoring/setup/monitoring_handler.py
@@ -105,13 +105,25 @@ class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
if 'condition' not in doc:
return True
condition = doc['condition']
- if 'mechanism_drivers' not in condition:
- return True
- required_mechanism_drivers = condition['mechanism_drivers']
- if not isinstance(required_mechanism_drivers, list):
- required_mechanism_drivers = [required_mechanism_drivers]
- intersection = [val for val in required_mechanism_drivers
- if val in self.mechanism_drivers]
+ if not isinstance(condition, dict):
+ self.log.error('incorrect condition in monitoring ({}): '
+ 'condition must be a dict'
+ .format(doc.get(doc.get('type'), '')))
+ return False
+ for key, required_value in condition.items():
+ if not self.check_env_config(key, required_value):
+ return False
+ return True
+
+ def check_env_config(self, config_name, required_config_value):
+ required_config_values = required_config_value \
+ if isinstance(required_config_value, list) \
+ else [required_config_value]
+ conf_values = self.configuration.environment.get(config_name, [])
+ conf_values = conf_values if isinstance(conf_values, list) \
+ else [conf_values]
+ intersection = [val for val in required_config_values
+ if val in conf_values]
return bool(intersection)
def content_replace(self, content):
@@ -435,6 +447,7 @@ class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
if '/*' in local_dir else local_dir
if local_dir_base.strip('/*') == remote_path.strip('/*'):
return # same directory - nothing to do
+ self.make_remote_dir(host, remote_path)
cmd = 'cp {} {}'.format(what_to_copy, remote_path)
self.run(cmd, ssh=ssh)
return
diff --git a/app/monitoring/setup/monitoring_host.py b/app/monitoring/setup/monitoring_host.py
index 9450cf6..0b9f420 100644
--- a/app/monitoring/setup/monitoring_host.py
+++ b/app/monitoring/setup/monitoring_host.py
@@ -12,6 +12,7 @@ import os
from os.path import join, sep
from monitoring.setup.monitoring_handler import MonitoringHandler
+from monitoring.setup.sensu_client_installer import SensuClientInstaller
RABBITMQ_CONFIG_FILE = 'rabbitmq.json'
RABBITMQ_CONFIG_ATTR = 'rabbitmq'
@@ -27,13 +28,14 @@ class MonitoringHost(MonitoringHandler):
# add monitoring setup for remote host
def create_setup(self, o):
+ host_id = o.get('host', '')
+ self.install_sensu_on_host(host_id)
sensu_host_files = [
'transport.json',
'rabbitmq.json',
'client.json'
]
server_ip = self.env_monitoring_config['server_ip']
- host_id = o['host']
sub_dir = join('/host', host_id)
config = copy.copy(self.env_monitoring_config)
env_name = self.configuration.env_name
@@ -88,3 +90,10 @@ class MonitoringHost(MonitoringHandler):
# this configuration requires SSL
# keep the path of the files for later use
self.fetch_ssl_files.append(path)
+
+ def install_sensu_on_host(self, host_id):
+ auto_install = self.env_monitoring_config \
+ .get('install_monitoring_client', False)
+ if auto_install:
+ installer = SensuClientInstaller(self.env, host_id)
+ installer.install()
diff --git a/app/monitoring/setup/monitoring_instance.py b/app/monitoring/setup/monitoring_instance.py
new file mode 100644
index 0000000..b376441
--- /dev/null
+++ b/app/monitoring/setup/monitoring_instance.py
@@ -0,0 +1,67 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringInstance(MonitoringSimpleObject):
+
+ def __init__(self, env):
+ super().__init__(env)
+
+ # monitoring setup for instance can only be done after vNIC is found
+ # and network for vNIC is set, so the first call will not do anything
+ def create_setup(self, instance: dict):
+ vnics = self.inv.find_items({
+ 'environment': self.get_env(),
+ 'type': 'vnic',
+ 'vnic_type': 'instance_vnic',
+ 'id_path': {'$regex': '^{}/'.format(instance['id_path'])}
+ })
+ for vnic in vnics:
+ self.add_instance_communication_monitoring(instance, vnic)
+
+ # for instance we keep list of instance vNICs and services to use in call
+ # to check_instance_communications.py
+ # add this vNIC to the list with the corresponding
+ def add_instance_communication_monitoring(self, instance: dict, vnic: dict):
+ service = self.get_service_for_vnic(vnic)
+ if not service:
+ return
+ check = self.get_check_from_db(instance)
+ services_and_vnics = check.get('command', '')
+ if services_and_vnics:
+ services_and_vnics = \
+ services_and_vnics[services_and_vnics.index('.py')+4:]
+ services_and_vnics_list = \
+ services_and_vnics.split(';') if services_and_vnics \
+ else []
+ service_and_vnic = '{},{}'.format(service.get('local_service_id', ''),
+ vnic.get('id'))
+ if service_and_vnic in services_and_vnics_list:
+ return # we already have this tuple define
+ services_and_vnics_list.append(service_and_vnic)
+ values = {
+ 'objtype': 'instance',
+ 'objid': self.encode_special_characters(instance['id']),
+ 'host': service['host'],
+ 'services_and_vnics': ';'.join(services_and_vnics_list)
+ }
+ self.create_monitoring_for_object(instance, values)
+
+ def get_service_for_vnic(self, vnic: dict) -> dict:
+ services = self.inv.find_items({'environment': self.get_env(),
+ 'type': 'vservice',
+ 'network': vnic.get('network', '')})
+ if not services:
+ return {}
+ dhcp = next(s for s in services if s.get('service_type') == 'dhcp')
+ if dhcp:
+ return dhcp # If we have both DHCP and router, return the DHCP
+ return services[0] # currently only DHCP and router services
diff --git a/app/monitoring/setup/monitoring_setup_manager.py b/app/monitoring/setup/monitoring_setup_manager.py
index bc4fe01..8b7693a 100644
--- a/app/monitoring/setup/monitoring_setup_manager.py
+++ b/app/monitoring/setup/monitoring_setup_manager.py
@@ -11,12 +11,14 @@
from monitoring.setup.monitoring_handler import MonitoringHandler
from monitoring.setup.monitoring_host import MonitoringHost
+from monitoring.setup.monitoring_instance import MonitoringInstance
from monitoring.setup.monitoring_link_vnic_vconnector \
import MonitoringLinkVnicVconnector
from monitoring.setup.monitoring_pnic import MonitoringPnic
from monitoring.setup.monitoring_otep import MonitoringOtep
from monitoring.setup.monitoring_vedge import MonitoringVedge
from monitoring.setup.monitoring_vnic import MonitoringVnic
+from monitoring.setup.monitoring_vconnector import MonitoringVconnector
from monitoring.setup.monitoring_vservice import MonitoringVservice
@@ -31,7 +33,9 @@ class MonitoringSetupManager(MonitoringHandler):
"otep": MonitoringOtep(env),
"vedge": MonitoringVedge(env),
"host_pnic": MonitoringPnic(env),
+ "instance": MonitoringInstance(env),
"vnic": MonitoringVnic(env),
+ "vconnector": MonitoringVconnector(env),
"vservice": MonitoringVservice(env),
"vnic-vconnector": MonitoringLinkVnicVconnector(env)}
diff --git a/app/monitoring/setup/monitoring_vconnector.py b/app/monitoring/setup/monitoring_vconnector.py
new file mode 100644
index 0000000..9ddc6af
--- /dev/null
+++ b/app/monitoring/setup/monitoring_vconnector.py
@@ -0,0 +1,24 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject
+
+
+class MonitoringVconnector(MonitoringSimpleObject):
+
+ # add monitoring setup for remote host
+ def create_setup(self, o):
+ type = 'vconnector'
+ env_config = self.configuration.get_env_config()
+ vpp_or_ovs = 'vpp' if 'VPP' in env_config['mechanism_drivers'] \
+ else 'ovs'
+ type_str = '{}_{}'.format(type, vpp_or_ovs)
+ self.setup(type, o, values={'check_type': type_str,
+ 'name': o['name']})
+
diff --git a/app/monitoring/setup/sensu_client_installer.py b/app/monitoring/setup/sensu_client_installer.py
new file mode 100644
index 0000000..72a8bbb
--- /dev/null
+++ b/app/monitoring/setup/sensu_client_installer.py
@@ -0,0 +1,158 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import os.path
+from pkg_resources import parse_version
+
+from monitoring.setup.monitoring_handler import MonitoringHandler
+from utils.inventory_mgr import InventoryMgr
+
+
+class SensuClientInstaller(MonitoringHandler):
+
+ UBUNTU = 'ubuntu'
+ CENTOS = 'centos'
+
+ INSTALL_CMD = {
+ UBUNTU: 'dpkg -i {}',
+ CENTOS: 'rpm -i {}'
+ }
+ PERMISSIONS_CMD = {
+ UBUNTU: '',
+ CENTOS: 'usermod -aG wheel sensu'
+ }
+ SUDOERS_FILE = '/etc/sudoers'
+
+ available_downloads = {}
+
+ def __init__(self, env: str, host_id: str):
+ super().__init__(env)
+ self.cli_ssh = self.get_ssh(host_id)
+ self.inv = InventoryMgr()
+ self.host = self.inv.get_by_id(env, host_id)
+ self.server = self.env_monitoring_config.get('server_ip')
+ self.server_cli_ssh = self.get_ssh(self.server)
+ self.ubuntu_dist = None
+ self.required_package = None
+
+ def install(self):
+ pkg_to_install = self.get_pkg_to_install()
+ if not pkg_to_install:
+ return
+ try:
+ self.fetch_package(pkg_to_install)
+ self.install_package(pkg_to_install)
+ self.set_permissions()
+ except SystemError as e:
+ self.log.error('Sensu install on host {} failed: {}'
+ .format(self.host, str(e)))
+ return
+
+ @staticmethod
+ def get_attr_from_output(output_lines: list, attr: str) -> str:
+ matches = [l for l in output_lines if l.startswith(attr)]
+ if not matches:
+ return ''
+ line = matches[0]
+ return SensuClientInstaller.get_attr_from_output_line(line)
+
+ @staticmethod
+ def get_attr_from_output_line(output_line: str):
+ val = output_line[output_line.index(':')+1:].strip()
+ return val
+
+ INSTALLED = 'Installed: '
+ CANDIDATE = 'Candidate: '
+ SENSU_DIR = '/opt/sensu'
+ SENSU_PKG_DIR = '/etc/sensu/pkg'
+ SENSU_PKG_DIR_LOCAL = '/tmp/sensu_pkg'
+ SENSU_VERSION_FILE = '/opt/sensu/version-manifest.txt'
+
+ def find_available_downloads(self):
+ ls_output = self.server_cli_ssh.exec('ls -R {}'
+ .format(self.SENSU_PKG_DIR))
+ ls_lines = ls_output.splitlines()
+ last_target_dir = None
+ for line in ls_lines:
+ if line[-4:] in ['/32:', '/64:']:
+ last_target_dir = line.replace(self.SENSU_PKG_DIR, '')
+ continue
+ elif last_target_dir:
+ target_dir = last_target_dir.strip(os.path.sep).strip(':')
+ self.available_downloads[target_dir] = line
+ last_target_dir = None
+ else:
+ last_target_dir = None
+
+ def find_available_package(self, os_details: dict):
+ if not self.available_downloads:
+ self.find_available_downloads()
+ distribution = os_details['ID']
+ version = os_details['version'].split()[-2].lower()
+ arch = os_details['architecure'][-2:]
+ download_dir = os.path.join(distribution, version, arch)
+ download_file = self.available_downloads.get(download_dir)
+ full_path = '' if not download_file \
+ else os.path.join(self.SENSU_PKG_DIR, download_dir, download_file)
+ return download_file, full_path
+
+ @staticmethod
+ def find_available_version(download_file: str) -> str:
+ ver = download_file.replace('sensu', '').strip('-_')
+ ver = ver[:ver.index('-')]
+ return ver
+
+ def get_pkg_to_install(self) -> str:
+ if self.provision == self.provision_levels['none']:
+ return ''
+ if not self.host:
+ return ''
+ supported_os = [self.UBUNTU, self.CENTOS]
+ distribution = self.host['OS']['ID']
+ if distribution not in [self.UBUNTU, self.CENTOS]:
+ self.log.error('Sensu client auto-install only supported for: {}'
+ .format(', '.join(supported_os)))
+ return ''
+ cmd = 'if [ -d {} ]; then head -1 {} | sed "s/sensu //"; fi' \
+ .format(self.SENSU_DIR, self.SENSU_VERSION_FILE)
+ installed_version = self.cli_ssh.exec(cmd).strip()
+ os_details = self.host['OS']
+ available_pkg, pkg_path = self.find_available_package(os_details)
+ available_version = self.find_available_version(available_pkg)
+ if parse_version(available_version) <= parse_version(installed_version):
+ return ''
+ return pkg_path
+
+ def get_local_path(self, pkg_to_install: str):
+ return os.path.join(self.SENSU_PKG_DIR_LOCAL,
+ os.path.basename(pkg_to_install))
+
+ def fetch_package(self, pkg_to_install: str):
+ self.make_directory(self.SENSU_PKG_DIR_LOCAL)
+ self.get_file(self.server, pkg_to_install,
+ self.get_local_path(pkg_to_install))
+ local_path = self.get_local_path(pkg_to_install)
+ self.copy_to_remote_host(self.host['host'],
+ local_path=local_path,
+ remote_path=local_path)
+
+ def install_package(self, pkg_to_install):
+ local_path = self.get_local_path(pkg_to_install)
+ install_cmd = self.INSTALL_CMD[self.host['OS']['ID']]
+ self.cli_ssh.exec(install_cmd.format(local_path))
+
+ def set_permissions(self):
+ cmd = self.PERMISSIONS_CMD[self.host['OS']['ID']]
+ if cmd:
+ self.cli_ssh.exec(cmd)
+ # add to sudoers file
+ sudoer_permission = 'sensu ALL=(ALL) NOPASSWD: ALL'
+ sudoer_cmd = 'grep --silent -w sensu {} || echo "{}" >> {}'\
+ .format(self.SUDOERS_FILE, sudoer_permission, self.SUDOERS_FILE)
+ self.cli_ssh.exec(sudoer_cmd)
diff --git a/app/test/api/responders_test/resource/test_clique_types.py b/app/test/api/responders_test/resource/test_clique_types.py
index f5e331e..5e52cea 100644
--- a/app/test/api/responders_test/resource/test_clique_types.py
+++ b/app/test/api/responders_test/resource/test_clique_types.py
@@ -17,10 +17,17 @@ from unittest.mock import patch
class TestCliqueTypes(TestBase):
- def test_get_clique_types_list_without_env_name(self):
- self.validate_get_request(clique_types.URL,
- params={},
- expected_code=base.BAD_REQUEST_CODE)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_all_clique_types_list(self, read):
+ self.validate_get_request(
+ clique_types.URL,
+ params={},
+ mocks={
+ read: clique_types.CLIQUE_TYPES
+ },
+ expected_code=base.SUCCESSFUL_CODE,
+ expected_response=clique_types.CLIQUE_TYPES_RESPONSE
+ )
def test_get_clique_types_with_invalid_filter(self):
self.validate_get_request(clique_types.URL,
@@ -53,6 +60,28 @@ class TestCliqueTypes(TestBase):
expected_code=base.SUCCESSFUL_CODE
)
+ def test_get_clique_type_with_insufficient_configuration(self):
+ self.validate_get_request(
+ clique_types.URL,
+ params={
+ "distribution_version": base.CORRECT_DIST_VER,
+ },
+ expected_code=base.BAD_REQUEST_CODE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_get_clique_type_with_correct_configuration(self, read):
+ self.validate_get_request(
+ clique_types.URL,
+ params=clique_types.TEST_CONFIGURATION,
+ mocks={
+ read: clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION
+ },
+ expected_response=clique_types.
+ CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE,
+ expected_code=base.SUCCESSFUL_CODE
+ )
+
def test_get_clique_types_list_with_wrong_focal_point_type(self):
self.validate_get_request(clique_types.URL,
params={
@@ -204,9 +233,53 @@ class TestCliqueTypes(TestBase):
body=json.dumps(clique_types.NON_DICT_CLIQUE_TYPE),
expected_code=base.BAD_REQUEST_CODE)
- def test_post_clique_type_without_env_name(self):
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_post_clique_type_with_reserved_env_name(self, check_env_name):
+ self.validate_post_request(
+ clique_types.URL,
+ mocks={
+ check_env_name: True
+ },
+ body=json.dumps(clique_types.CLIQUE_TYPE_WITH_RESERVED_NAME),
+ expected_code=base.BAD_REQUEST_CODE
+ )
+
+ def test_post_clique_type_without_env_name_and_configuration(self):
+ self.validate_post_request(
+ clique_types.URL,
+ body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_ENV_NAME_AND_CONF),
+ expected_code=base.BAD_REQUEST_CODE
+ )
+
+ def test_post_clique_type_with_both_env_name_and_configuration(self):
+ self.validate_post_request(
+ clique_types.URL,
+ body=json.dumps(
+ clique_types.CLIQUE_TYPE_WITH_BOTH_ENV_AND_CONF),
+ expected_code=base.BAD_REQUEST_CODE
+ )
+
+ @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
+ def test_post_clique_type_with_insufficient_configuration(self, check_env_name):
+ self.validate_post_request(
+ clique_types.URL,
+ mocks={
+ check_env_name: True
+ },
+ body=json.dumps(clique_types.CLIQUE_TYPE_WITH_INSUFFICIENT_CONF),
+ expected_code=base.BAD_REQUEST_CODE
+ )
+
+ @patch(base.RESPONDER_BASE_READ)
+ def test_post_clique_type_with_duplicate_configuration(self, read):
+ data = clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION[0]
+ resp = clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE
+ test_data = self.get_updated_data(data, deleted_keys=['id'])
self.validate_post_request(clique_types.URL,
- body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_ENVIRONMENT),
+ body=json.dumps(test_data),
+ mocks={
+ read: resp,
+ },
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
@@ -231,6 +304,17 @@ class TestCliqueTypes(TestBase):
CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE),
expected_code=base.BAD_REQUEST_CODE)
+ @patch(base.RESPONDER_BASE_READ)
+ def test_post_clique_type_with_duplicate_focal_point_type(self, read):
+ test_data = self.get_updated_data(clique_types.CLIQUE_TYPE,
+ updates={'name': 'test-name'})
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(test_data),
+ mocks={
+ read: [clique_types.CLIQUE_TYPE],
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
def test_post_clique_type_without_link_types(self):
self.validate_post_request(clique_types.URL,
body=json.dumps(
@@ -255,6 +339,18 @@ class TestCliqueTypes(TestBase):
body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_NAME),
expected_code=base.BAD_REQUEST_CODE)
+ def test_post_clique_type_with_wrong_mechanism_drivers(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_WRONG_MECH_DRIVERS),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_clique_type_with_wrong_type_drivers(self):
+ self.validate_post_request(clique_types.URL,
+ body=json.dumps(clique_types.
+ CLIQUE_TYPE_WITH_WRONG_TYPE_DRIVERS),
+ expected_code=base.BAD_REQUEST_CODE)
+
@patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME)
@patch(base.RESPONDER_BASE_WRITE)
def test_post_clique_type(self, write, check_environment_name):
diff --git a/app/test/api/responders_test/resource/test_environment_configs.py b/app/test/api/responders_test/resource/test_environment_configs.py
index 6356f06..4405f2b 100644
--- a/app/test/api/responders_test/resource/test_environment_configs.py
+++ b/app/test/api/responders_test/resource/test_environment_configs.py
@@ -9,7 +9,9 @@
###############################################################################
import json
+from api.responders.resource.environment_configs import EnvironmentConfigs
from test.api.responders_test.test_data import base
+from test.api.responders_test.test_data.base import CONSTANTS_BY_NAMES
from test.api.test_base import TestBase
from test.api.responders_test.test_data import environment_configs
from utils.constants import EnvironmentFeatures
@@ -23,35 +25,25 @@ class TestEnvironmentConfigs(TestBase):
def test_get_environment_configs_list(self, read):
self.validate_get_request(environment_configs.URL,
params={},
- mocks={
- read: environment_configs.ENV_CONFIGS
- },
+ mocks={read: environment_configs.ENV_CONFIGS},
expected_code=base.SUCCESSFUL_CODE,
expected_response=environment_configs.
- ENV_CONFIGS_RESPONSE
- )
+ ENV_CONFIGS_RESPONSE)
def test_get_environment_configs_list_with_invalid_filters(self):
self.validate_get_request(environment_configs.URL,
- params={
- "unknown": "unknown"
- },
+ params={"unknown": "unknown"},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_name(self, read):
+ mocks = {read: environment_configs.ENV_CONFIGS_WITH_SPECIFIC_NAME}
self.validate_get_request(environment_configs.URL,
- params={
- "name": environment_configs.NAME
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_NAME
- },
+ params={"name": environment_configs.NAME},
+ mocks=mocks,
expected_code=base.SUCCESSFUL_CODE,
expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_NAME[0]
- )
+ ENV_CONFIGS_WITH_SPECIFIC_NAME[0])
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_unknown_name(self, read):
@@ -82,193 +74,151 @@ class TestEnvironmentConfigs(TestBase):
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_distribution(self, read):
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION
+ config_response = \
+ environment_configs.ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE
self.validate_get_request(environment_configs.URL,
params={
"distribution":
environment_configs.
CORRECT_DISTRIBUTION
},
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION
- },
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE)
+ expected_response=config_response)
def test_get_environment_configs_list_with_wrong_mechanism_driver(self):
+ config = environment_configs.WRONG_MECHANISM_DRIVER
self.validate_get_request(environment_configs.URL,
- params={
- "mechanism_drivers":
- environment_configs.WRONG_MECHANISM_DRIVER
- },
+ params={"mechanism_drivers": config},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_mechanism_driver(self, read):
+ mechanism = environment_configs.CORRECT_MECHANISM_DRIVER
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER
+ config_response = environment_configs.\
+ ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "mechanism_drivers":
- environment_configs.CORRECT_MECHANISM_DRIVER
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER
- },
+ params={"mechanism_drivers": mechanism},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE
- )
+ expected_response=config_response)
def test_get_environment_configs_list_with_wrong_type_driver(self):
+ driver = environment_configs.WRONG_TYPE_DRIVER
self.validate_get_request(environment_configs.URL,
- params={
- "type_drivers":
- environment_configs.WRONG_TYPE_DRIVER
- },
+ params={"type_drivers": driver},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_type_driver(self, read):
+ driver = environment_configs.CORRECT_TYPE_DRIVER
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER
+ config_response = environment_configs.\
+ ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "type_drivers":
- environment_configs.CORRECT_TYPE_DRIVER
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER
- },
+ params={"type_drivers": driver},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE
+ expected_response=config_response
)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_user(self, read):
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_USER
+ config_response = \
+ environment_configs.ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "user": environment_configs.USER
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_USER
- },
+ params={"user": environment_configs.USER},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE
- )
+ expected_response=config_response)
def test_get_environment_configs_list_with_non_bool_listen(self):
self.validate_get_request(environment_configs.URL,
- params={
- "listen": environment_configs.NON_BOOL_LISTEN
- },
+ params={"listen": environment_configs.
+ NON_BOOL_LISTEN},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_bool_listen(self, read):
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_LISTEN
+ config_response = \
+ environment_configs.ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "listen": environment_configs.BOOL_LISTEN
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_LISTEN
- },
+ params={"listen": environment_configs.
+ BOOL_LISTEN},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE
- )
+ expected_response=config_response)
def test_get_environment_configs_list_with_non_bool_scanned(self):
self.validate_get_request(environment_configs.URL,
- params={
- "scanned": environment_configs.
- NON_BOOL_SCANNED
- },
+ params={"scanned": environment_configs.
+ NON_BOOL_SCANNED},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_bool_scanned(self, read):
+ config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_SCANNED
+ config_response = \
+ environment_configs.ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "scanned": environment_configs.BOOL_SCANNED
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_SCANNED
- },
+ params={"scanned": environment_configs.
+ BOOL_SCANNED},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE
+ expected_response=config_response
)
- def test_get_environment_configs_list_with_non_bool_monitoring_setup_done(self):
+ def test_get_env_configs_list_with_non_bool_monitoring_setup_done(self):
self.validate_get_request(environment_configs.URL,
- params={
- "listen": environment_configs.
- NON_BOOL_MONITORING_SETUP_DONE
- },
+ params={"listen": environment_configs.
+ NON_BOOL_MONITORING_SETUP_DONE},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
- def test_get_environment_configs_list_with_bool_monitoring_setup_done(self, read):
+ def test_get_environment_configs_list_with_bool_monitoring_setup_done(self,
+ read):
+ config = environment_configs.\
+ ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE
+ config_response = environment_configs.\
+ ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "scanned": environment_configs.
- BOOL_MONITORING_SETUP_DONE
- },
- mocks={
- read: environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE
- },
+ params={"scanned": environment_configs.
+ BOOL_MONITORING_SETUP_DONE},
+ mocks={read: config},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE
- )
+ expected_response=config_response)
def test_get_environment_configs_list_with_non_int_page(self):
self.validate_get_request(environment_configs.URL,
- params={
- "page": base.NON_INT_PAGE
- },
+ params={"page": base.NON_INT_PAGE},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_int_page(self, read):
+ config_response = environment_configs.ENV_CONFIGS_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "page": base.INT_PAGE
- },
- mocks={
- read: environment_configs.ENV_CONFIGS
- },
+ params={"page": base.INT_PAGE},
+ mocks={read: environment_configs.ENV_CONFIGS},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_RESPONSE
- )
+ expected_response=config_response)
def test_get_environment_configs_list_with_non_int_page_size(self):
self.validate_get_request(environment_configs.URL,
- params={
- "page_size": base.NON_INT_PAGESIZE
- },
+ params={"page_size": base.NON_INT_PAGESIZE},
expected_code=base.BAD_REQUEST_CODE)
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_int_page_size(self, read):
+ config_response = environment_configs.ENV_CONFIGS_RESPONSE
self.validate_get_request(environment_configs.URL,
- params={
- "page_size": base.INT_PAGESIZE
- },
- mocks={
- read: environment_configs.ENV_CONFIGS
- },
+ params={"page_size": base.INT_PAGESIZE},
+ mocks={read: environment_configs.ENV_CONFIGS},
expected_code=base.SUCCESSFUL_CODE,
- expected_response=environment_configs.
- ENV_CONFIGS_RESPONSE
- )
+ expected_response=config_response)
def test_post_environment_config_without_app_path(self):
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
@@ -292,8 +242,9 @@ class TestEnvironmentConfigs(TestBase):
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_distribution(self):
+ dist = environment_configs.WRONG_DISTRIBUTION
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
- updates={"distribution": environment_configs.WRONG_DISTRIBUTION})
+ updates={"distribution": dist})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
expected_code=base.BAD_REQUEST_CODE)
@@ -306,8 +257,9 @@ class TestEnvironmentConfigs(TestBase):
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_listen(self):
+ listen_val = environment_configs.NON_BOOL_LISTEN
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
- updates={"listen": environment_configs.NON_BOOL_LISTEN})
+ updates={"listen": listen_val})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
expected_code=base.BAD_REQUEST_CODE)
@@ -320,10 +272,10 @@ class TestEnvironmentConfigs(TestBase):
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_mechanism_driver(self):
+ mechanism = environment_configs.WRONG_MECHANISM_DRIVER
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
updates={
- "mechanism_drivers":
- [environment_configs.WRONG_MECHANISM_DRIVER]
+ "mechanism_drivers": [mechanism]
})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
@@ -344,19 +296,17 @@ class TestEnvironmentConfigs(TestBase):
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_scanned(self):
+ scanned_val = environment_configs.NON_BOOL_SCANNED
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
- updates={
- "scanned": environment_configs.NON_BOOL_SCANNED
- })
+ updates={"scanned": scanned_val})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_last_scanned(self):
+ scanned_val = base.WRONG_FORMAT_TIME
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
- updates={
- "last_scanned": base.WRONG_FORMAT_TIME
- })
+ updates={"last_scanned": scanned_val})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
expected_code=base.BAD_REQUEST_CODE)
@@ -376,16 +326,81 @@ class TestEnvironmentConfigs(TestBase):
expected_code=base.BAD_REQUEST_CODE)
def test_post_environment_config_with_wrong_type_drivers(self):
+ driver = environment_configs.WRONG_TYPE_DRIVER
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
+ updates={"type_drivers": [driver]})
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_duplicate_configurations(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG)
+ test_data["configuration"].append({
+ "name": "OpenStack"
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_empty_configuration(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG)
+ test_data["configuration"].append({})
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_unknown_configuration(self):
+ test_data = self.get_updated_data(environment_configs.ENV_CONFIG)
+ test_data["configuration"].append({
+ "name": "Unknown configuration",
+ })
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_without_required_configurations(self):
+ for env_type in CONSTANTS_BY_NAMES["environment_types"]:
+ required_conf_list = (
+ EnvironmentConfigs.REQUIRED_CONFIGURATIONS_NAMES.get(env_type,
+ [])
+ )
+ if required_conf_list:
+ test_data = \
+ self.get_updated_data(environment_configs.ENV_CONFIG)
+ test_data['environment_type'] = env_type
+ test_data['configuration'] = [
+ c
+ for c in test_data['configuration']
+ if c['name'] != required_conf_list[0]
+ ]
+
+ self.validate_post_request(environment_configs.URL,
+ body=json.dumps(test_data),
+ expected_code=base.BAD_REQUEST_CODE)
+
+ def test_post_environment_config_with_incomplete_configuration(self):
test_data = self.get_updated_data(environment_configs.ENV_CONFIG,
updates={
- "type_drivers": [environment_configs.WRONG_TYPE_DRIVER]
+ "configuration": [{
+ "host": "10.56.20.239",
+ "name": "mysql",
+ "user": "root"
+ }, {
+ "name": "OpenStack",
+ "host": "10.56.20.239",
+ }, {
+ "host": "10.56.20.239",
+ "name": "CLI",
+ "user": "root"
+ }]
})
self.validate_post_request(environment_configs.URL,
body=json.dumps(test_data),
expected_code=base.BAD_REQUEST_CODE)
- def mock_validate_env_config_with_supported_envs(self, scanning,
- monitoring, listening):
+ @staticmethod
+ def mock_validate_env_config_with_supported_envs(scanning, monitoring,
+ listening):
InventoryMgr.is_feature_supported_in_env = \
lambda self, matches, feature: {
EnvironmentFeatures.SCANNING: scanning,
@@ -396,11 +411,12 @@ class TestEnvironmentConfigs(TestBase):
@patch(base.RESPONDER_BASE_WRITE)
def test_post_environment_config(self, write):
self.mock_validate_env_config_with_supported_envs(True, True, True)
+ post_body = json.dumps(environment_configs.ENV_CONFIG)
self.validate_post_request(environment_configs.URL,
mocks={
write: None
},
- body=json.dumps(environment_configs.ENV_CONFIG),
+ body=post_body,
expected_code=base.CREATED_CODE)
def test_post_unsupported_environment_config(self):
@@ -421,10 +437,11 @@ class TestEnvironmentConfigs(TestBase):
"listening": False
}
]
+ mock_validate = self.mock_validate_env_config_with_supported_envs
+ config = environment_configs.ENV_CONFIG
for test_case in test_cases:
- self.mock_validate_env_config_with_supported_envs(test_case["scanning"],
- test_case["monitoring"],
- test_case["listening"])
+ mock_validate(test_case["scanning"], test_case["monitoring"],
+ test_case["listening"])
self.validate_post_request(environment_configs.URL,
- body=json.dumps(environment_configs.ENV_CONFIG),
+ body=json.dumps(config),
expected_code=base.BAD_REQUEST_CODE)
diff --git a/app/test/api/responders_test/test_data/base.py b/app/test/api/responders_test/test_data/base.py
index b99d5bb..6d2422a 100644
--- a/app/test/api/responders_test/test_data/base.py
+++ b/app/test/api/responders_test/test_data/base.py
@@ -16,14 +16,14 @@ UNAUTHORIZED_CODE = "401"
CREATED_CODE = "201"
ENV_NAME = "Mirantis-Liberty-API"
-UNKNOWN_ENV = "Unkown-Environment"
+UNKNOWN_ENV = "Unknown-Environment"
NON_INT_PAGE = 1.4
INT_PAGE = 1
NON_INT_PAGESIZE = 2.4
INT_PAGESIZE = 2
WRONG_LINK_TYPE = "instance-host"
-CORRECT_LINK_TYPE= "instance-vnic"
+CORRECT_LINK_TYPE = "instance-vnic"
WRONG_LINK_STATE = "wrong"
CORRECT_LINK_STATE = "up"
@@ -41,7 +41,7 @@ WRONG_TYPE_DRIVER = "wrong_type"
CORRECT_TYPE_DRIVER = "local"
WRONG_MECHANISM_DRIVER = "wrong-mechanism-dirver"
-CORRECT_MECHANISM_DRIVER = "ovs"
+CORRECT_MECHANISM_DRIVER = "OVS"
WRONG_LOG_LEVEL = "wrong-log-level"
CORRECT_LOG_LEVEL = "critical"
@@ -71,16 +71,32 @@ NON_DICT_OBJ = ""
CONSTANTS_BY_NAMES = {
"link_types": [
"instance-vnic",
- "otep-vconnector",
- "otep-host_pnic",
+ "vnic-instance",
+ "vnic-vconnector",
+ "vconnector-vnic",
+ "vconnector-vedge",
+ "vedge-vconnector",
+ "vedge-host_pnic",
+ "host_pnic-vedge",
"host_pnic-network",
+ "network-host_pnic",
"vedge-otep",
- "vnic-vconnector",
+ "otep-vedge",
+ "otep-vconnector",
+ "vconnector-otep",
+ "otep-host_pnic",
+ "host_pnic-otep",
"vconnector-host_pnic",
- "vconnector-vedge",
+ "host_pnic-vconnector",
"vnic-vedge",
- "vedge-host_pnic",
- "vservice-vnic"
+ "vedge-vnic",
+ "vservice-vnic",
+ "vnic-vservice",
+ "switch_pnic-host_pnic",
+ "host_pnic-switch_pnic",
+ "switch_pnic-switch_pnic",
+ "switch_pnic-switch",
+ "switch-switch_pnic"
],
"link_states": [
"up",
@@ -117,9 +133,9 @@ CONSTANTS_BY_NAMES = {
"flat"
],
"mechanism_drivers": [
- "ovs",
- "vpp",
- "LinuxBridge",
+ "OVS",
+ "VPP",
+ "LXB",
"Arista",
"Nexus"
],
@@ -155,6 +171,10 @@ CONSTANTS_BY_NAMES = {
"Mirantis",
"RDO"
],
+ "distribution_versions": [
+ "8.0",
+ "9.0"
+ ],
"environment_operational_status": [
"stopped",
"running",
@@ -168,6 +188,30 @@ CONSTANTS_BY_NAMES = {
],
"environment_monitoring_types": [
"Sensu"
+ ],
+ "scans_statuses": [
+ "draft",
+ "pending",
+ "running",
+ "completed",
+ "completed_with_errors",
+ "failed",
+ "aborted"
+ ],
+ "configuration_targets": [
+ "AMQP",
+ "CLI",
+ "ACI",
+ "mysql",
+ "OpenStack",
+ "Monitoring",
+ "Kubernetes"
+ ],
+ "environment_types": [
+ "OpenStack",
+ "Kubernetes",
+ "VMware",
+ "Bare-metal"
]
}
@@ -175,7 +219,8 @@ CONSTANTS_BY_NAMES = {
RESPONDER_BASE_PATH = "api.responders.responder_base.ResponderBase"
RESPONDER_BASE_GET_OBJECTS_LIST = RESPONDER_BASE_PATH + ".get_objects_list"
RESPONDER_BASE_GET_OBJECT_BY_ID = RESPONDER_BASE_PATH + ".get_object_by_id"
-RESPONDER_BASE_CHECK_ENVIRONMENT_NAME = RESPONDER_BASE_PATH + ".check_environment_name"
+RESPONDER_BASE_CHECK_ENVIRONMENT_NAME = \
+ RESPONDER_BASE_PATH + ".check_environment_name"
RESPONDER_BASE_READ = RESPONDER_BASE_PATH + ".read"
RESPONDER_BASE_WRITE = RESPONDER_BASE_PATH + ".write"
RESPONDER_BASE_AGGREGATE = RESPONDER_BASE_PATH + ".aggregate"
diff --git a/app/test/api/responders_test/test_data/clique_types.py b/app/test/api/responders_test/test_data/clique_types.py
index ae962ce..4d55c8b 100644
--- a/app/test/api/responders_test/test_data/clique_types.py
+++ b/app/test/api/responders_test/test_data/clique_types.py
@@ -8,13 +8,19 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from test.api.responders_test.test_data import base
-
+from test.api.responders_test.test_data.base import WRONG_MECHANISM_DRIVER, \
+ CORRECT_MECHANISM_DRIVER, CORRECT_TYPE_DRIVER, WRONG_TYPE_DRIVER, \
+ CORRECT_DISTRIBUTION, CORRECT_DIST_VER
+from utils.util import merge_dicts
URL = "/clique_types"
WRONG_ID = base.WRONG_OBJECT_ID
NONEXISTENT_ID = "58ca73ae3a8a836d10ff3b44"
CORRECT_ID = base.CORRECT_OBJECT_ID
+SAMPLE_IDS = ['58ca73ae3a8a836d10ff3b80', '58ca73ae3a8a836d10ff3b81']
+
+RESERVED_ENV_NAME = 'ANY'
WRONG_FOCAL_POINT_TYPE = base.WRONG_OBJECT_TYPE
CORRECT_FOCAL_POINT_POINT_TYPE = base.CORRECT_OBJECT_TYPE
@@ -23,25 +29,51 @@ WRONG_LINK_TYPE = base.WRONG_LINK_TYPE
NONEXISTENT_LINK_TYPE = "otep-host_pnic"
CORRECT_LINK_TYPE = base.CORRECT_LINK_TYPE
+CLIQUE_TYPE = {
+ "environment": "Mirantis-Liberty-API",
+ "name": "instance_vconnector_clique",
+ "link_types": [
+ "instance-vnic",
+ "vnic-vconnector"
+ ],
+ "focal_point_type": "instance"
+}
+
+TEST_CONFIGURATION = {
+ "distribution": CORRECT_DISTRIBUTION,
+ "distribution_version": CORRECT_DIST_VER,
+ "mechanism_drivers": CORRECT_MECHANISM_DRIVER,
+ "type_drivers": CORRECT_TYPE_DRIVER
+}
+
+
+def get_payload(update: dict = None, delete: list = None):
+ payload = CLIQUE_TYPE.copy()
+ if update:
+ payload.update(update)
+ if delete:
+ for k in delete:
+ del payload[k]
+ return payload
+
+
CLIQUE_TYPES_WITH_SPECIFIC_ID = [
- {
- "environment": "Mirantis-Liberty-API",
- "focal_point_type": "host_pnic",
- "id": CORRECT_ID
- }
+ get_payload(update={'id': CORRECT_ID})
+]
+
+CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION = [
+ get_payload(update=merge_dicts(TEST_CONFIGURATION, {'id': SAMPLE_IDS[0]}),
+ delete=['environment'])
]
+CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE = {
+ "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION
+}
+
CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE = [
- {
- "environment": "Mirantis-Liberty-API",
- "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE,
- "id": "58ca73ae3a8a836d10ff3b80"
- },
- {
- "environment": "Mirantis-Liberty-API",
- "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE,
- "id": "58ca73ae3a8a836d10ff3b81"
- }
+ get_payload(update={'id': _id,
+ 'focal_point_type': CORRECT_FOCAL_POINT_POINT_TYPE})
+ for _id in SAMPLE_IDS
]
CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = {
@@ -49,20 +81,9 @@ CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = {
}
CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE = [
- {
- "environment": "Mirantis-Liberty-API",
- "link_types": [
- CORRECT_LINK_TYPE
- ],
- "id": "58ca73ae3a8a836d10ff3b80"
- },
- {
- "environment": "Mirantis-Liberty-API",
- "link_types": [
- CORRECT_LINK_TYPE
- ],
- "id": "58ca73ae3a8a836d10ff3b81"
- }
+ get_payload(update={'id': _id,
+ 'link_types': [CORRECT_LINK_TYPE]})
+ for _id in SAMPLE_IDS
]
CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = {
@@ -70,16 +91,7 @@ CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = {
}
CLIQUE_TYPES = [
- {
- "environment": "Mirantis-Liberty-API",
- "focal_point_type": "vnic",
- "id": "58ca73ae3a8a836d10ff3b80"
- },
- {
- "environment": "Mirantis-Liberty-API",
- "focal_point_type": "vnic",
- "id": "58ca73ae3a8a836d10ff3b81"
- }
+ get_payload(update={'id': _id}) for _id in SAMPLE_IDS
]
CLIQUE_TYPES_RESPONSE = {
@@ -88,83 +100,48 @@ CLIQUE_TYPES_RESPONSE = {
NON_DICT_CLIQUE_TYPE = base.NON_DICT_OBJ
-CLIQUE_TYPE_WITHOUT_ENVIRONMENT = {
- "name": "instance_vconnector_clique",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector"
- ],
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITH_RESERVED_NAME = get_payload(
+ update={'environment': RESERVED_ENV_NAME}
+)
-CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT = {
- "environment": base.UNKNOWN_ENV,
- "id": "589a3969761b0555a3ef6093",
- "name": "instance_vconnector_clique",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector"
- ],
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITHOUT_ENV_NAME_AND_CONF = get_payload(
+ delete=['environment']
+)
-CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector"
- ]
-}
+CLIQUE_TYPE_WITH_BOTH_ENV_AND_CONF = get_payload(
+ update=TEST_CONFIGURATION
+)
-CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector"
- ],
- "focal_point_type": WRONG_FOCAL_POINT_TYPE
-}
+CLIQUE_TYPE_WITH_INSUFFICIENT_CONF = get_payload(
+ update={'distribution_version': CORRECT_DIST_VER}
+)
-CLIQUE_TYPE_WITHOUT_LINK_TYPES = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT = get_payload(
+ update={'environment': base.UNKNOWN_ENV}
+)
-CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "link_types": "instance-vnic",
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE = get_payload(delete=['focal_point_type'])
-CLIQUE_TYPE_WITH_WRONG_LINK_TYPE = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "link_types": [
- WRONG_LINK_TYPE,
- "vnic-vconnector"
- ],
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE = get_payload(
+ update={'focal_point_type': WRONG_FOCAL_POINT_TYPE}
+)
-CLIQUE_TYPE_WITHOUT_NAME = {
- "environment": "Mirantis-Liberty-API",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector",
- ],
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITHOUT_LINK_TYPES = get_payload(delete=['link_types'])
-CLIQUE_TYPE = {
- "environment": "Mirantis-Liberty-API",
- "name": "instance_vconnector_clique",
- "link_types": [
- "instance-vnic",
- "vnic-vconnector"
- ],
- "focal_point_type": "instance"
-}
+CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES = get_payload(
+ update={'link_types': "instance-vnic"}
+)
+
+CLIQUE_TYPE_WITH_WRONG_LINK_TYPE = get_payload(
+ update={'link_types': [WRONG_LINK_TYPE, "vnic-vconnector"]}
+)
+
+CLIQUE_TYPE_WITHOUT_NAME = get_payload(delete=['name'])
+
+CLIQUE_TYPE_WITH_WRONG_MECH_DRIVERS = get_payload(
+ update={'mechanism_drivers': WRONG_MECHANISM_DRIVER}
+)
+
+CLIQUE_TYPE_WITH_WRONG_TYPE_DRIVERS = get_payload(
+ update={'type_drivers': WRONG_TYPE_DRIVER}
+) \ No newline at end of file
diff --git a/app/test/api/responders_test/test_data/environment_configs.py b/app/test/api/responders_test/test_data/environment_configs.py
index 4cea105..3e976ec 100644
--- a/app/test/api/responders_test/test_data/environment_configs.py
+++ b/app/test/api/responders_test/test_data/environment_configs.py
@@ -201,6 +201,7 @@ ENV_CONFIG = {
"provision": "None",
"env_type": "development",
"name": "Monitoring",
+ "install_monitoring_client": True,
"api_port": "4567",
"rabbitmq_port": "5671",
"rabbitmq_pass": "sensuaccess",
@@ -218,12 +219,13 @@ ENV_CONFIG = {
"last_scanned": "2017-03-16T11:14:54Z",
"listen": True,
"mechanism_drivers": [
- "ovs"
+ "OVS"
],
"name": "Mirantis-Liberty",
"operational": "running",
"scanned": True,
"type": "environment",
"type_drivers": "vxlan",
- "user": "WS7j8oTbWPf3LbNne"
+ "user": "WS7j8oTbWPf3LbNne",
+ "environment_type": "OpenStack"
}
diff --git a/app/test/api/test_base.py b/app/test/api/test_base.py
index 33185ec..edc59ae 100644
--- a/app/test/api/test_base.py
+++ b/app/test/api/test_base.py
@@ -34,8 +34,10 @@ class TestBase(TestCase):
self.original_auth_method = AuthenticationMiddleware.process_request
AuthenticationMiddleware.process_request = mock_auth_method
- ResponderBase.get_constants_by_name = MagicMock(side_effect=
- lambda name: base.CONSTANTS_BY_NAMES[name])
+ ResponderBase.get_constants_by_name = MagicMock(
+ side_effect=lambda name: base.CONSTANTS_BY_NAMES[name]
+ )
+
# mock mongo access
MongoAccess.mongo_connect = MagicMock()
MongoAccess.db = MagicMock()
@@ -47,8 +49,8 @@ class TestBase(TestCase):
log_level = 'debug'
self.app = App(log_level=log_level).get_app()
- def validate_get_request(self, url, params={}, headers=None, mocks={},
- side_effects={},
+ def validate_get_request(self, url, params=None, headers=None, mocks=None,
+ side_effects=None,
expected_code=base.SUCCESSFUL_CODE,
expected_response=None):
self.validate_request("GET", url, params, headers, "",
@@ -59,25 +61,27 @@ class TestBase(TestCase):
def validate_request(self, action, url, params, headers, body,
mocks, side_effects, expected_code,
expected_response):
- for mock_method, mock_data in mocks.items():
- mock_method.return_value = mock_data
+ if mocks:
+ for mock_method, mock_data in mocks.items():
+ mock_method.return_value = mock_data
- for mock_method, side_effect in side_effects.items():
- mock_method.side_effect = side_effect
+ if side_effects:
+ for mock_method, side_effect in side_effects.items():
+ mock_method.side_effect = side_effect
result = self.simulate_request(action, url, params=params, headers=headers, body=body)
self.assertEqual(result.status, expected_code)
if expected_response:
self.assertEqual(result.json, expected_response)
- def validate_post_request(self, url, headers={}, body="", mocks={},
- side_effects={},
+ def validate_post_request(self, url, headers=None, body="", mocks=None,
+ side_effects=None,
expected_code=base.CREATED_CODE, expected_response=None):
self.validate_request("POST", url, {}, headers, body, mocks, side_effects,
expected_code, expected_response)
- def validate_delete_request(self, url, params={}, headers={}, mocks={},
- side_effects={},
+ def validate_delete_request(self, url, params=None, headers=None, mocks=None,
+ side_effects=None,
expected_code=base.SUCCESSFUL_CODE, expected_response=None):
self.validate_request("DELETE", url, params, headers, "",
mocks, side_effects,
diff --git a/app/test/fetch/api_fetch/test_api_access.py b/app/test/fetch/api_fetch/test_api_access.py
index 0effc0e..440b730 100644
--- a/app/test/fetch/api_fetch/test_api_access.py
+++ b/app/test/fetch/api_fetch/test_api_access.py
@@ -7,9 +7,9 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
-from unittest.mock import MagicMock, Mock
-
+import copy
import requests
+from unittest.mock import MagicMock, Mock
from discover.fetchers.api.api_access import ApiAccess
from test.fetch.api_fetch.test_data.api_access import *
@@ -35,38 +35,45 @@ class TestApiAccess(TestFetch):
def test_parse_illegal_time(self):
time = self.api_access.parse_time(ILLEGAL_TIME)
- self.assertEqual(time, None, "Can't get None when the time format is wrong")
+ self.assertEqual(time, None,
+ "Can't get None when the time format is wrong")
def test_get_existing_token(self):
self.api_access.tokens = VALID_TOKENS
token = self.api_access.get_existing_token(PROJECT)
- self.assertNotEqual(token, VALID_TOKENS[PROJECT], "Can't get existing token")
+ self.assertNotEqual(token, VALID_TOKENS[PROJECT],
+ "Can't get existing token")
def test_get_nonexistent_token(self):
self.api_access.tokens = EMPTY_TOKENS
token = self.api_access.get_existing_token(TEST_PROJECT)
- self.assertEqual(token, None, "Can't get None when the token doesn't " +
- "exist in tokens")
+ self.assertEqual(token, None,
+ "Can't get None when the token doesn't exist "
+ "in tokens")
def test_v2_auth(self):
self.api_access.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# mock authentication info from OpenStack Api
- token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
+ TEST_BODY)
self.assertNotEqual(token_details, None, "Can't get the token details")
def test_v2_auth_with_error_content(self):
self.api_access.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=ERROR_AUTH_CONTENT)
# authentication content from OpenStack Api will be incorrect
- token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
- self.assertIs(token_details, None, "Can't get None when the content is wrong")
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
+ TEST_BODY)
+ self.assertIs(token_details, None,
+ "Can't get None when the content is wrong")
def test_v2_auth_with_error_token(self):
self.response.status_code = requests.codes.bad_request
self.response.json = Mock(return_value=ERROR_TOKEN_CONTENT)
# authentication info from OpenStack Api will not contain token info
- token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
+ TEST_BODY)
self.assertIs(token_details, None, "Can't get None when the content " +
"doesn't contain any token info")
@@ -78,12 +85,13 @@ class TestApiAccess(TestFetch):
# the time will not be parsed
self.api_access.parse_time = MagicMock(return_value=None)
- token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY)
+ token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
+ TEST_BODY)
# reset original parse_time method
self.api_access.parse_time = original_method
- self.assertIs(token_details, None, "Can't get None when the time in token " +
- "can't be parsed")
+ self.assertIs(token_details, None,
+ "Can't get None when the time in token can't be parsed")
def test_v2_auth_pwd(self):
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
@@ -92,20 +100,30 @@ class TestApiAccess(TestFetch):
self.assertNotEqual(token, None, "Can't get token")
def test_get_url(self):
- self.response.json = Mock(return_value=GET_CONTENT)
+ get_response = copy.deepcopy(self.response)
+ get_response.status_code = requests.codes.ok
+ self.requests_get = requests.get
+ requests.get = MagicMock(return_value=get_response)
+ get_response.json = Mock(return_value=GET_CONTENT)
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
# check whether it returns content message when the response is correct
self.assertNotEqual(result, None, "Can't get content when the "
"response is correct")
+ requests.get = self.requests_get
def test_get_url_with_error_response(self):
- self.response.status_code = requests.codes.bad_request
- self.response.json = Mock(return_value=None)
- self.response.text = "Bad request"
+ get_response = copy.deepcopy(self.response)
+ get_response.status_code = requests.codes.bad_request
+ get_response.text = "Bad request"
+ get_response.json = Mock(return_value=GET_CONTENT)
+ self.requests_get = requests.get
+ requests.get = MagicMock(return_value=get_response)
+
# the response will be wrong
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
self.assertEqual(result, None, "Result returned" +
"when the response status is not 200")
+ requests.get = self.requests_get
def test_get_region_url(self):
region_url = self.api_access.get_region_url(REGION_NAME, SERVICE_NAME)
@@ -120,23 +138,30 @@ class TestApiAccess(TestFetch):
def test_get_region_url_without_service_endpoint(self):
# error service doesn't exist in region service endpoints
- region_url = self.api_access.get_region_url(REGION_NAME, ERROR_SERVICE_NAME)
- self.assertIs(region_url, None, "Can't get None with wrong service name")
+ region_url = self.api_access.get_region_url(REGION_NAME,
+ ERROR_SERVICE_NAME)
+ self.assertIs(region_url, None,
+ "Can't get None with wrong service name")
def test_region_url_nover(self):
- # mock return value of get_region_url, which has something starting from v2
+ # mock return value of get_region_url,
+ # which has something starting from v2
self.api_access.get_region_url = MagicMock(return_value=REGION_URL)
- region_url = self.api_access.get_region_url_nover(REGION_NAME, SERVICE_NAME)
+ region_url = self.api_access.get_region_url_nover(REGION_NAME,
+ SERVICE_NAME)
# get_region_nover will remove everything from v2
- self.assertNotIn("v2", region_url, "Can't get region url without v2 info")
+ self.assertNotIn("v2", region_url,
+ "Can't get region url without v2 info")
def test_get_service_region_endpoints(self):
region = REGIONS[REGION_NAME]
- result = self.api_access.get_service_region_endpoints(region, SERVICE_NAME)
+ result = self.api_access.get_service_region_endpoints(region,
+ SERVICE_NAME)
self.assertNotEqual(result, None, "Can't get service endpoint")
def test_get_service_region_endpoints_with_nonexistent_service(self):
region = REGIONS[REGION_NAME]
- result = self.api_access.get_service_region_endpoints(region, ERROR_SERVICE_NAME)
+ get_endpoints = self.api_access.get_service_region_endpoints
+ result = get_endpoints(region, ERROR_SERVICE_NAME)
self.assertIs(result, None, "Can't get None when the service name " +
"doesn't exist in region's services")
diff --git a/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py
index da3df17..784079e 100644
--- a/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py
+++ b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py
@@ -7,6 +7,7 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+import copy
from unittest.mock import MagicMock
from discover.fetchers.api.api_fetch_project_hosts import ApiFetchProjectHosts
from test.fetch.test_fetch import TestFetch
@@ -36,23 +37,28 @@ class TestApiFetchProjectHosts(TestFetch):
"type in host_type")
def test_add_host_type_with_existent_host_type(self):
+ fetch_host_os_details = self.fetcher.fetch_host_os_details
+ self.fetcher.fetch_host_os_details = MagicMock()
# add nonexistent host type to host type
HOST_DOC["host_type"] = [NONEXISTENT_TYPE]
# try to add existing host type
self.fetcher.add_host_type(HOST_DOC, NONEXISTENT_TYPE, HOST_ZONE)
- self.assertEqual(len(HOST_DOC['host_type']), 1, "Add duplicate host type")
+ self.assertEqual(len(HOST_DOC['host_type']), 1,
+ "Add duplicate host type")
+ self.fetcher.fetch_host_os_details = fetch_host_os_details
def test_add_compute_host_type(self):
- HOST_DOC['host_type'] = []
+ doc = copy.deepcopy(HOST_DOC)
+ doc['host_type'] = []
# clear zone
- HOST_DOC['zone'] = None
+ doc['zone'] = None
# add compute host type
- self.fetcher.add_host_type(HOST_DOC, COMPUTE_TYPE, HOST_ZONE)
+ self.fetcher.add_host_type(doc, COMPUTE_TYPE, HOST_ZONE)
# for compute host type, zone information will be added
- self.assertEqual(HOST_DOC['zone'], HOST_ZONE, "Can't update zone " +
- "name for compute node")
- self.assertEqual(HOST_DOC['parent_id'], HOST_ZONE, "Can't update parent_id " +
- "for compute node")
+ self.assertEqual(doc['zone'], HOST_ZONE,
+ "Can't update zone name for compute node")
+ self.assertEqual(doc['parent_id'], HOST_ZONE,
+ "Can't update parent_id for compute node")
def test_fetch_compute_node_ip_address(self):
# mock ip address information fetched from DB
@@ -78,16 +84,24 @@ class TestApiFetchProjectHosts(TestFetch):
def test_get_host_details(self):
# test node have nova-conductor attribute, controller type will be added
+ fetch_host_os_details = self.fetcher.fetch_host_os_details
+ self.fetcher.fetch_host_os_details = MagicMock()
result = self.fetcher.get_host_details(AVAILABILITY_ZONE, HOST_NAME)
self.assertIn("Controller", result['host_type'], "Can't put controller type " +
"in the compute node host_type")
+ self.fetcher.fetch_host_os_details = fetch_host_os_details
def test_get_hosts_from_az(self):
+ fetch_host_os_details = self.fetcher.fetch_host_os_details
+ self.fetcher.fetch_host_os_details = MagicMock()
result = self.fetcher.get_hosts_from_az(AVAILABILITY_ZONE)
self.assertNotEqual(result, [], "Can't get hosts information from "
"availability zone")
+ self.fetcher.fetch_host_os_details = fetch_host_os_details
def test_get_for_region(self):
+ fetch_host_os_details = self.fetcher.fetch_host_os_details
+ self.fetcher.fetch_host_os_details = MagicMock()
# mock region url for nova node
self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
# mock the response from OpenStack Api
@@ -96,6 +110,7 @@ class TestApiFetchProjectHosts(TestFetch):
result = self.fetcher.get_for_region(self.region, TOKEN)
self.assertNotEqual(result, [], "Can't get hosts information for region")
+ self.fetcher.fetch_host_os_details = fetch_host_os_details
def test_get_for_region_without_token(self):
self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
@@ -112,6 +127,8 @@ class TestApiFetchProjectHosts(TestFetch):
self.assertEqual(result, [], "Can't get [] when the response is wrong")
def test_get_for_region_with_error_hypervisors_response(self):
+ fetch_host_os_details = self.fetcher.fetch_host_os_details
+ self.fetcher.fetch_host_os_details = MagicMock()
self.fetcher.get_region_url = MagicMock(return_value=REGION_URL)
# mock error hypervisors response from OpenStack Api
side_effect = [AVAILABILITY_ZONE_RESPONSE, HYPERVISORS_ERROR_RESPONSE]
@@ -120,6 +137,7 @@ class TestApiFetchProjectHosts(TestFetch):
result = self.fetcher.get_for_region(self.region, TOKEN)
self.assertNotEqual(result, [], "Can't get hosts information when " +
"the hypervisors response is wrong")
+ self.fetcher.fetch_host_os_details = fetch_host_os_details
def test_get(self):
original_method = self.fetcher.get_for_region
@@ -140,6 +158,15 @@ class TestApiFetchProjectHosts(TestFetch):
result = self.fetcher.get(PROJECT_NAME)
self.assertEqual(result, [], "Can't get [] when the token is invalid")
+ def test_fetch_host_os_details(self):
+ original_method = self.fetcher.run
+ self.fetcher.run = MagicMock(return_value=OS_DETAILS_INPUT)
+ doc = {'host': 'host1'}
+ self.fetcher.fetch_host_os_details(doc)
+ self.assertEqual(doc.get('OS', {}), OS_DETAILS)
+ self.fetcher.run = original_method
+
+
def tearDown(self):
super().tearDown()
ApiFetchProjectHosts.v2_auth_pwd = self._v2_auth_pwd
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py
index 3ef1ac7..ba42590 100644
--- a/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py
@@ -223,3 +223,24 @@ GET_FOR_REGION_INFO = [
"zone": "osdna-zone"
}
]
+
+OS_DETAILS_INPUT = """
+NAME="Ubuntu"
+VERSION="16.04 LTS (Xenial Xerus)"
+ID=ubuntu
+ID_LIKE=debian
+PRETTY_NAME="Ubuntu 16.04 LTS"
+VERSION_ID="16.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
+UBUNTU_CODENAME=xenial
+ARCHITECURE=x86_64
+"""
+OS_DETAILS = {
+ 'name': 'Ubuntu',
+ 'version': '16.04 LTS (Xenial Xerus)',
+ 'ID': 'ubuntu',
+ 'ID_LIKE': 'debian',
+ 'architecure': 'x86_64'
+}
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_networks.py b/app/test/fetch/api_fetch/test_data/api_fetch_networks.py
index 5079a92..38c60a3 100644
--- a/app/test/fetch/api_fetch/test_data/api_fetch_networks.py
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_networks.py
@@ -21,6 +21,7 @@ NETWORKS_RESPONSE = {
NETWORKS_RESULT = [
{
+ "type": "network",
"id": "8673c48a-f137-4497-b25d-08b7b218fd17",
"subnets": {
"test23": {
diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_ports.py b/app/test/fetch/api_fetch/test_data/api_fetch_ports.py
index fc0552c..bb1d89f 100644
--- a/app/test/fetch/api_fetch/test_data/api_fetch_ports.py
+++ b/app/test/fetch/api_fetch/test_data/api_fetch_ports.py
@@ -26,6 +26,7 @@ PORTS_RESULT_WITH_NET = [
"name": "fa:16:3e:d7:c5:16",
"network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port",
"master_parent_type": "network",
"master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"parent_type": "ports_folder",
@@ -41,6 +42,7 @@ PORTS_RESULT_WITHOUT_NET = [
"name": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
"network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port",
"master_parent_type": "network",
"master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"parent_type": "ports_folder",
@@ -56,6 +58,7 @@ PORTS_RESULT_WITH_PROJECT = [
"name": "fa:16:3e:d7:c5:16",
"network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+ "type": "port",
"master_parent_type": "network",
"master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
"parent_type": "ports_folder",
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
index 6940c61..b0467a7 100644
--- a/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
@@ -128,5 +128,5 @@ OTEP_WITH_CONNECTOR = {
"br-tun": {
}
},
- "vconnector": "br-mesh"
+ "vconnector": "node-5.cisco.com-br-mesh"
}
diff --git a/app/test/fetch/link_finders/__init__.py b/app/test/fetch/link_finders/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/link_finders/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/link_finders/test_data/__init__.py b/app/test/fetch/link_finders/test_data/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/test/fetch/link_finders/test_data/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/test/fetch/link_finders/test_data/test_find_implicit_links.py b/app/test/fetch/link_finders/test_data/test_find_implicit_links.py
new file mode 100644
index 0000000..aef20f6
--- /dev/null
+++ b/app/test/fetch/link_finders/test_data/test_find_implicit_links.py
@@ -0,0 +1,303 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+ENV = 'env1'
+CLIQUE_CONSTRAINTS = [
+ {
+ 'focal_point_type': 'instance',
+ 'constraints': ['network']
+ },
+ {
+ 'focal_point_type': 'dummy1',
+ 'constraints': []
+ },
+ {
+ 'focal_point_type': 'dummy2',
+ 'constraints': ['network', 'dummy_constraint']
+ },
+ {
+ 'focal_point_type': 'dummy3',
+ 'constraints': ['dummy_constraint2']
+ }
+]
+CONSTRAINTS = ['network', 'dummy_constraint', 'dummy_constraint2']
+
+LINK_ATTRIBUTES_NONE = {}
+LINK_ATTRIBUTES_NONE_2 = {}
+LINK_ATTRIBUTES_EMPTY = {'attributes': []}
+LINK_ATTR_V1 = {'attributes': {'network': 'v1'}}
+LINK_ATTR_V1_2 = {'attributes': {'network': 'v1'}}
+LINK_ATTR_V2 = {'attributes': {'network': 'v2'}}
+LINK_ATTR_V1_AND_A2V2 = {'attributes': {'network': 'v1', 'attr2': 'v2'}}
+
+LINK_TYPE_1 = {
+ 'link_type': 'instance-vnic',
+ 'source_id': 'instance1',
+ 'target_id': 'vnic1'
+}
+LINK_TYPE_1_REVERSED = {
+ 'link_type': 'instance-vnic',
+ 'source_id': 'vnic1',
+ 'target_id': 'instance1'
+}
+LINK_TYPE_1_2 = {
+ 'link_type': 'instance-vnic',
+ 'source_id': 'instance1',
+ 'target_id': 'vnic2'
+}
+LINK_TYPE_2 = {
+ 'link_type': 'vnic-vconnector',
+ 'source_id': 'vnic1',
+ 'target_id': 'vconnector1'
+}
+LINK_TYPE_3 = {
+ 'implicit': True,
+ 'link_type': 'instance-vconnector',
+ 'source_id': 'instance1',
+ 'target_id': 'vconnector1'
+}
+LINK_TYPE_4_NET1 = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'instance-host_pnic',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'host_pnic1_dbid',
+ 'target_id': 'host_pnic1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_TYPE_5_NET2 = {
+ 'environment': ENV,
+ 'link_type': 'host_pnic-switch',
+ 'source_id': 'host_pnic1',
+ 'target': 'switch1_dbid',
+ 'target_id': 'switch1',
+ 'host': 'host2',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID2'}
+}
+LINK_TYPE_6_NET1 = {
+ 'environment': ENV,
+ 'link_type': 'host_pnic-switch',
+ 'source': 'host_pnic1_dbid',
+ 'source_id': 'host_pnic1',
+ 'target': 'switch2_dbid',
+ 'target_id': 'switch2',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_TYPE_7_NET1 = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'instance-switch',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'switch2_dbid',
+ 'target_id': 'switch2',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+
+LINK_FULL_A2B = {
+ 'environment': ENV,
+ 'link_type': 'instance-vnic',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'vnic1_dbid',
+ 'target_id': 'vnic1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_B2C = {
+ 'environment': ENV,
+ 'link_type': 'vnic-vconnector',
+ 'source': 'vnic1_dbid',
+ 'source_id': 'vnic1',
+ 'target': 'vconnector1_dbid',
+ 'target_id': 'vconnector1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_C2D = {
+ 'environment': ENV,
+ 'link_type': 'vconnector-vedge',
+ 'source': 'vconnector1_dbid',
+ 'source_id': 'vconnector1',
+ 'target': 'vedge1_dbid',
+ 'target_id': 'vedge1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_D2E = {
+ 'environment': ENV,
+ 'link_type': 'vedge-otep',
+ 'source': 'vedge1_dbid',
+ 'source_id': 'vedge1',
+ 'target': 'otep1_dbid',
+ 'target_id': 'otep1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_A2C = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'instance-vconnector',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'vconnector1_dbid',
+ 'target_id': 'vconnector1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_B2D = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'vnic-vedge',
+ 'source': 'vnic1_dbid',
+ 'source_id': 'vnic1',
+ 'target': 'vedge1_dbid',
+ 'target_id': 'vedge1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_C2E = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'vconnector-otep',
+ 'source': 'vconnector1_dbid',
+ 'source_id': 'vconnector1',
+ 'target': 'otep1_dbid',
+ 'target_id': 'otep1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_A2D = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'instance-vedge',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'vedge1_dbid',
+ 'target_id': 'vedge1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_B2E = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'vnic-otep',
+ 'source': 'vnic1_dbid',
+ 'source_id': 'vnic1',
+ 'target': 'otep1_dbid',
+ 'target_id': 'otep1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+LINK_FULL_A2E = {
+ 'environment': ENV,
+ 'implicit': True,
+ 'link_type': 'instance-otep',
+ 'source': 'instance1_dbid',
+ 'source_id': 'instance1',
+ 'target': 'otep1_dbid',
+ 'target_id': 'otep1',
+ 'host': 'host1',
+ 'link_name': '',
+ 'state': 'up',
+ 'source_label': '',
+ 'target_label': '',
+ 'link_weight': 0,
+ 'attributes': {'network': 'netID1'}
+}
+BASE_LINKS = [
+ {'pass': 0, 'link': LINK_FULL_A2B},
+ {'pass': 0, 'link': LINK_FULL_B2C},
+ {'pass': 0, 'link': LINK_FULL_C2D},
+ {'pass': 0, 'link': LINK_FULL_D2E},
+]
+IMPLICIT_LINKS = [
+ [
+ {'pass': 1, 'link': LINK_FULL_A2C},
+ {'pass': 1, 'link': LINK_FULL_B2D},
+ {'pass': 1, 'link': LINK_FULL_C2E},
+ ],
+ [
+ {'pass': 2, 'link': LINK_FULL_A2D},
+ {'pass': 2, 'link': LINK_FULL_B2E},
+ ],
+ [
+ {'pass': 3, 'link': LINK_FULL_A2E},
+ ],
+ []
+]
diff --git a/app/test/fetch/link_finders/test_find_implicit_links.py b/app/test/fetch/link_finders/test_find_implicit_links.py
new file mode 100644
index 0000000..9931688
--- /dev/null
+++ b/app/test/fetch/link_finders/test_find_implicit_links.py
@@ -0,0 +1,107 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import bson
+
+from discover.link_finders.find_implicit_links import FindImplicitLinks
+from test.fetch.test_fetch import TestFetch
+from unittest.mock import MagicMock
+from test.fetch.link_finders.test_data.test_find_implicit_links import *
+
+from utils.inventory_mgr import InventoryMgr
+
+
+class TestFindImplicitLinks(TestFetch):
+
+ def setUp(self):
+ super().setUp()
+ self.configure_environment()
+ self.fetcher = FindImplicitLinks()
+ self.fetcher.set_env(ENV)
+ self.fetcher.constraint_attributes = ['network']
+ self.original_write_link = self.inv.write_link
+ self.inv.write_link = lambda x: x
+ self.original_objectid = bson.ObjectId
+ bson.ObjectId = lambda x: x
+
+ def tearDown(self):
+ super().tearDown()
+ bson.ObjectId = self.original_objectid
+ self.inv.write_link = self.original_write_link
+
+ def test_get_constraint_attributes(self):
+ original_find = InventoryMgr.find
+ InventoryMgr.find = MagicMock(return_value=CLIQUE_CONSTRAINTS)
+ constraint_types = self.fetcher.get_constraint_attributes()
+ self.assertEqual(sorted(constraint_types), sorted(CONSTRAINTS))
+ InventoryMgr.find = original_find
+
+ def test_constraints_match(self):
+ matcher = self.fetcher.constraints_match
+ self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTRIBUTES_NONE_2))
+ self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTRIBUTES_EMPTY))
+ self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTR_V1))
+ self.assertTrue(matcher(LINK_ATTRIBUTES_EMPTY, LINK_ATTR_V1))
+ self.assertTrue(matcher(LINK_ATTR_V1, LINK_ATTR_V1_2))
+ self.assertTrue(matcher(LINK_ATTR_V1,
+ LINK_ATTR_V1_AND_A2V2))
+ self.assertFalse(matcher(LINK_ATTR_V1, LINK_ATTR_V2))
+
+ def test_links_match(self):
+ matcher = self.fetcher.links_match
+ self.assertFalse(matcher(LINK_TYPE_1, LINK_TYPE_1_2))
+ self.assertFalse(matcher(LINK_TYPE_1, LINK_TYPE_1_REVERSED))
+ self.assertFalse(matcher(LINK_TYPE_4_NET1, LINK_TYPE_5_NET2))
+ self.assertFalse(matcher(LINK_TYPE_1_2, LINK_TYPE_2))
+ self.assertTrue(matcher(LINK_TYPE_1, LINK_TYPE_2))
+
+ def test_get_link_constraint_attributes(self):
+ getter = self.fetcher.get_link_constraint_attributes
+ self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_1_2), {})
+ self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_4_NET1),
+ LINK_TYPE_4_NET1.get('attributes'))
+ self.assertEqual(getter(LINK_TYPE_4_NET1, LINK_TYPE_1),
+ LINK_TYPE_4_NET1.get('attributes'))
+ self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_5_NET2),
+ LINK_TYPE_5_NET2.get('attributes'))
+ self.assertEqual(getter(LINK_TYPE_4_NET1, LINK_TYPE_6_NET1),
+ LINK_TYPE_4_NET1.get('attributes'))
+
+ def test_get_attr(self):
+ getter = self.fetcher.get_attr
+ self.assertIsNone(getter('host', {}, {}))
+ self.assertIsNone(getter('host', {'host': 'v1'}, {'host': 'v2'}))
+ self.assertEqual(getter('host', {'host': 'v1'}, {}), 'v1')
+ self.assertEqual(getter('host', {}, {'host': 'v2'}), 'v2')
+ self.assertEqual(getter('host', {'host': 'v1'}, {'host': 'v1'}), 'v1')
+
+ def test_add_implicit_link(self):
+ original_write_link = self.inv.write_link
+ self.inv.write_link = lambda x: x
+ original_objectid = bson.ObjectId
+ bson.ObjectId = lambda x: x
+ add_func = self.fetcher.add_implicit_link
+ self.assertEqual(add_func(LINK_TYPE_4_NET1, LINK_TYPE_6_NET1),
+ LINK_TYPE_7_NET1)
+ bson.ObjectId = original_objectid
+ self.inv.write_link = original_write_link
+
+ def test_get_transitive_closure(self):
+ self.fetcher.links = [
+ {'pass': 0, 'link': LINK_FULL_A2B},
+ {'pass': 0, 'link': LINK_FULL_B2C},
+ {'pass': 0, 'link': LINK_FULL_C2D},
+ {'pass': 0, 'link': LINK_FULL_D2E},
+ ]
+ self.fetcher.get_transitive_closure()
+ for pass_no in range(1, len(IMPLICIT_LINKS)):
+ implicit_links = [l for l in self.fetcher.links
+ if l['pass'] == pass_no]
+ self.assertEqual(implicit_links, IMPLICIT_LINKS[pass_no-1],
+ 'incorrect links for pass #{}'.format(pass_no))
diff --git a/app/test/scan/test_data/configurations.py b/app/test/scan/test_data/configurations.py
index 96dbc23..044ff0b 100644
--- a/app/test/scan/test_data/configurations.py
+++ b/app/test/scan/test_data/configurations.py
@@ -47,6 +47,7 @@ CONFIGURATIONS = {
"provision": "Deploy",
"env_type": "development",
"name": "Monitoring",
+ "install_monitoring_client": True,
"rabbitmq_port": "5672",
"rabbitmq_pass": "osdna",
"rabbitmq_user": "sensu",
diff --git a/app/test/scan/test_data/scanner.py b/app/test/scan/test_data/scanner.py
index 23838aa..ed2129f 100644
--- a/app/test/scan/test_data/scanner.py
+++ b/app/test/scan/test_data/scanner.py
@@ -10,13 +10,25 @@
import queue
from discover.fetchers.folder_fetcher import FolderFetcher
-
SCANNER_TYPE_FOR_ENV = "ScanEnvironment"
METADATA = {
"scanners_package": "discover",
"scanners": {}
}
+LINK_FINDERS_METADATA = {
+ "finders_package": "discover.link_finders",
+ "base_finder": "FindLinks",
+ "link_finders": [
+ "FindLinksForInstanceVnics",
+ "FindLinksForOteps",
+ "FindLinksForVconnectors",
+ "FindLinksForVedges",
+ "FindLinksForVserviceVnics",
+ "FindLinksForPnics",
+ "FindImplicitLinks"
+ ]
+}
TYPE_TO_FETCH = {
"type": "host_pnic",
@@ -51,7 +63,8 @@ TYPES_TO_FETCH = [
},
{
"type": "network_services_folder",
- "fetcher": FolderFetcher("network_services", "network", "Network vServices")
+ "fetcher": FolderFetcher("network_services", "network",
+ "Network vServices")
}
]
@@ -149,9 +162,6 @@ TYPES_TO_FETCHES_FOR_SCAN_AGGREGATE = [{
"fetcher": "DbFetchAggregateHosts"
}]
-
-
-
# id = 'RegionOne-aggregates'
# obj = self.inv.get_by_id(self.env, id)
obj = {'id': 'Mirantis-Liberty-Nvn'}
@@ -159,7 +169,6 @@ id_field = 'id'
child_id = '',
child_type = ''
-
child_data = [
{
'id_path': '/Mirantis-Liberty-Nvn/Mirantis-Liberty-Nvn-regions',
@@ -178,15 +187,23 @@ child_data = [
]
PARENT = {
- "environment" : "Mirantis-Liberty-Xiaocong",
- "id" : "node-6.cisco.com-vservices-dhcps",
- "name" : "node-6.cisco.com-vservices-dhcps",
- "object_name" : "DHCP servers",
- "parent_id" : "node-6.cisco.com-vservices",
- "parent_type" : "vservices_folder",
- "show_in_tree" : True,
- "text" : "DHCP servers",
- "type" : "vservice_dhcps_folder"
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "id": "node-6.cisco.com-vservices-dhcps",
+ "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions"
+ "/RegionOne/RegionOne-availability_zones"
+ "/internal/node-6.cisco.com"
+ "/node-6.cisco.com-vservices/node-6.cisco.com-vservices-dhcps",
+ "name": "node-6.cisco.com-vservices-dhcps",
+ "name_path": "/Mirantis-Liberty-Xiaocong/Regions"
+ "/RegionOne/Availability Zones"
+ "/internal/node-6.cisco.com"
+ "/vServices/DHCP servers",
+ "object_name": "DHCP servers",
+ "parent_id": "node-6.cisco.com-vservices",
+ "parent_type": "vservices_folder",
+ "show_in_tree": True,
+ "text": "DHCP servers",
+ "type": "vservice_dhcps_folder"
}
PARENT_WITHOUT_ID = {
@@ -272,7 +289,6 @@ DB_RESULTS_WITHOUT_MASTER_PARENT_IN_DB = [
}
]
-
DICTIONARY_DB_RESULTS = {
"name": "Mirantis-Liberty-Xiaocong-regions",
"parent_type": "environment",
@@ -283,18 +299,22 @@ DICTIONARY_DB_RESULTS = {
}
MASTER_PARENT = {
- "create_object" : True,
- "environment" : "Mirantis-Liberty-Xiaocong",
- "id" : "node-6.cisco.com-vservices",
- "id_path" : "/Mirantis-Liberty/Mirantis-Liberty-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vservices",
- "name" : "Vservices",
- "name_path" : "/Mirantis-Liberty/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/Vservices",
- "object_name" : "Vservices",
- "parent_id" : "node-6.cisco.com",
- "parent_type" : "host",
- "show_in_tree" : True,
- "text" : "Vservices",
- "type" : "vservices_folder"
+ "create_object": True,
+ "environment": "Mirantis-Liberty-Xiaocong",
+ "id": "node-6.cisco.com-vservices",
+ "id_path": "/Mirantis-Liberty/Mirantis-Liberty-regions"
+ "/RegionOne/RegionOne-availability_zones"
+ "/internal/node-6.cisco.com/node-6.cisco.com-vservices",
+ "name": "Vservices",
+ "name_path": "/Mirantis-Liberty/Regions"
+ "/RegionOne/Availability Zones"
+ "/internal/node-6.cisco.com/Vservices",
+ "object_name": "Vservices",
+ "parent_id": "node-6.cisco.com",
+ "parent_type": "host",
+ "show_in_tree": True,
+ "text": "Vservices",
+ "type": "vservices_folder"
}
CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS = {
diff --git a/app/test/scan/test_scan_metadata_parser.py b/app/test/scan/test_scan_metadata_parser.py
index 91c11ef..5d91306 100644
--- a/app/test/scan/test_scan_metadata_parser.py
+++ b/app/test/scan/test_scan_metadata_parser.py
@@ -104,6 +104,8 @@ class TestScanMetadataParser(TestScan):
'input': METADATA_SCANNER_INCORRECT_FETCHER,
'msg': 'failed to find fetcher class f1 '
'in scanner ScanAggregate type #1'
+ ' (could not import module discover.fetchers.f1.f1: '
+ 'No module named \'discover.fetchers.f1\')'
},
{
'input': METADATA_SCANNER_WITH_INCORRECT_CHILD,
diff --git a/app/test/scan/test_scanner.py b/app/test/scan/test_scanner.py
index 4a7536e..bd1a0e3 100644
--- a/app/test/scan/test_scanner.py
+++ b/app/test/scan/test_scanner.py
@@ -10,6 +10,9 @@
from discover.scanner import Scanner
from test.scan.test_scan import TestScan
from unittest.mock import MagicMock, patch
+
+from discover.link_finders.find_links_metadata_parser \
+ import FindLinksMetadataParser
from discover.scan_metadata_parser import ScanMetadataParser
from test.scan.test_data.scanner import *
from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
@@ -19,7 +22,10 @@ class TestScanner(TestScan):
def setUp(self):
super().setUp()
- ScanMetadataParser.parse_metadata_file = MagicMock(return_value=METADATA)
+ ScanMetadataParser.parse_metadata_file = \
+ MagicMock(return_value=METADATA)
+ FindLinksMetadataParser.parse_metadata_file = \
+ MagicMock(return_value=LINK_FINDERS_METADATA)
self.scanner = Scanner()
self.scanner.set_env(self.env)
MonitoringSetupManager.create_setup = MagicMock()
@@ -182,9 +188,11 @@ class TestScanner(TestScan):
# store original method
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
# mock method
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
self.assertIn("projects", DB_RESULTS_WITH_PROJECT[0],
@@ -193,43 +201,53 @@ class TestScanner(TestScan):
"Can't delete the project key in the object")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
@patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
def test_scan_type_without_create_object(self, fetcher_get):
fetcher_get.return_value = DB_RESULTS_WITHOUT_CREATE_OBJECT
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
self.assertEqual(self.scanner.inv.set.call_count, 0,
"Set the object when the create object is false")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
@patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
def test_scan_type_with_create_object(self, fetcher_get):
fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
+
self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
self.assertEqual(self.scanner.inv.set.call_count, 1,
"Set the object when the create object is false")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
@patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
def test_scan_type_with_children_scanner(self, fetcher_get):
fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
original_queue_for_scan = self.scanner.queue_for_scan
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
self.scanner.queue_for_scan = MagicMock()
self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
@@ -238,6 +256,7 @@ class TestScanner(TestScan):
"Can't put children scanner in the queue")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
self.scanner.queue_for_scan = original_queue_for_scan
@patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
@@ -245,9 +264,11 @@ class TestScanner(TestScan):
fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
original_queue_for_scan = self.scanner.queue_for_scan
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
self.scanner.queue_for_scan = MagicMock()
self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENV_WITHOUT_CHILDREN_FETCHER,
@@ -257,6 +278,7 @@ class TestScanner(TestScan):
"Can't put children scanner in the queue")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
self.scanner.queue_for_scan = original_queue_for_scan
@patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
@@ -264,9 +286,11 @@ class TestScanner(TestScan):
fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
original_set = self.scanner.inv.set
+ original_get_by_id = self.scanner.inv.get_by_id
original_queue_for_scan = self.scanner.queue_for_scan
self.scanner.inv.set = MagicMock()
+ self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
self.scanner.queue_for_scan = MagicMock()
result = self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT,
@@ -275,6 +299,7 @@ class TestScanner(TestScan):
self.assertNotEqual(result, [], "Can't get children form scan_type")
self.scanner.inv.set = original_set
+ self.scanner.inv.get_by_id = original_get_by_id
self.scanner.queue_for_scan = original_queue_for_scan
def test_scan_with_limit_to_child_type(self):
diff --git a/app/test/verify.sh b/app/test/verify.sh
index a7ac9a2..684195e 100755
--- a/app/test/verify.sh
+++ b/app/test/verify.sh
@@ -8,7 +8,18 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
-set -o errexit
-set -o nounset
-set -o pipefail
+#set -o errexit
+#set -o nounset
+#set -o pipefail
+
+#sudo yum install -y https://centos7.iuscommunity.org/ius-release.rpm
+#sudo yum -y install python35
+#sudo pip install virtualenv
+#virtualenv -p $(which python3.5) $WORKSPACE/venv
+#. $WORKSPACE/venv/bin/activate
+
+PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/api
+PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/event_based_scan
PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/fetch
+PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/scan
+PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/utils
diff --git a/app/utils/api_access_base.py b/app/utils/api_access_base.py
new file mode 100644
index 0000000..31f50b4
--- /dev/null
+++ b/app/utils/api_access_base.py
@@ -0,0 +1,51 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import requests
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+
+
+class ApiAccessBase(Fetcher):
+
+ CONNECT_TIMEOUT = 5
+
+ def __init__(self, api_name=None, config=None):
+ super().__init__()
+ if api_name is None:
+ raise ValueError('ApiAccessBase: api_name must be defined')
+ self.config = {api_name: config} if config else Configuration()
+ self.api_config = self.config.get(api_name)
+ if self.api_config is None:
+ raise ValueError('ApiAccessBase: section "{}" missing in config'
+ .format(api_name))
+ self.host = self.api_config.get('host', '')
+ self.port = self.api_config.get('port', '')
+ if not (self.host and self.port):
+ raise ValueError('Missing definition of host or port ' +
+ 'for {} API access'
+ .format(api_name))
+
+ def get_rel_url(self, relative_url, headers):
+ req_url = self.base_url + relative_url
+ return self.get_url(req_url, headers)
+
+ def get_url(self, req_url, headers):
+ response = requests.get(req_url, headers=headers)
+ if response.status_code != requests.codes.ok:
+ # some error happened
+ if 'reason' in response:
+ msg = ', reason: {}'.format(response.reason)
+ else:
+ msg = ', response: {}'.format(response.text)
+ self.log.error('req_url: {} {}'.format(req_url, msg))
+ return None
+ ret = response.json()
+ return ret
diff --git a/app/utils/dict_naming_converter.py b/app/utils/dict_naming_converter.py
index 91fea2e..d0f8d42 100644
--- a/app/utils/dict_naming_converter.py
+++ b/app/utils/dict_naming_converter.py
@@ -8,6 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from bson.objectid import ObjectId
+from datetime import datetime
class DictNamingConverter:
@@ -20,21 +21,33 @@ class DictNamingConverter:
# Returns:
# Dictionary with the new keys.
@staticmethod
- def change_dict_naming_convention(d, cf):
+ def change_dict_naming_convention(d, cf, level: int=0):
new = {}
+ change_convention = DictNamingConverter.change_dict_naming_convention
if not d:
return d
- if isinstance(d, str):
+ if isinstance(d, str) or isinstance(d, int) or isinstance(d, float) \
+ or isinstance(d, bool) or isinstance(d, datetime):
return d
if isinstance(d, ObjectId):
return d
- for k, v in d.items():
- new_v = v
- if isinstance(v, dict):
- new_v = DictNamingConverter.change_dict_naming_convention(v, cf)
- elif isinstance(v, list):
- new_v = list()
- for x in v:
- new_v.append(DictNamingConverter.change_dict_naming_convention(x, cf))
- new[cf(k)] = new_v
+ if isinstance(d, object) and not isinstance(d, dict):
+ for k in dir(d):
+ if k.startswith('_'):
+ continue
+ v = getattr(d, k)
+ if callable(v):
+ continue
+ new[cf(k)] = change_convention(v, cf, level+1)
+ if isinstance(d, dict):
+ for k, v in d.items():
+ new_v = v
+ if isinstance(v, dict):
+ new_v = change_convention(v, cf, level+1)
+ elif isinstance(v, list):
+ new_v = list()
+ for x in v:
+ list_val = change_convention(x, cf, level+1)
+ new_v.append(list_val)
+ new[cf(k)] = new_v
return new
diff --git a/app/utils/inventory_mgr.py b/app/utils/inventory_mgr.py
index 722d0aa..97b6cd4 100644
--- a/app/utils/inventory_mgr.py
+++ b/app/utils/inventory_mgr.py
@@ -265,6 +265,7 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
# source_label, target_label: labels for the ends of the link (optional)
def create_link(self, env, src, source_id, target, target_id,
link_type, link_name, state, link_weight,
+ implicit=False,
source_label="", target_label="",
host=None, switch=None,
extra_attributes=None):
@@ -282,6 +283,7 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
"link_weight": link_weight,
"source_label": source_label,
"target_label": target_label,
+ "implicit": implicit,
"attributes": extra_attributes if extra_attributes else {}
}
if host:
@@ -347,16 +349,18 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
if not env_config:
return False
- # Workaround for mechanism_drivers field type
- mechanism_driver = env_config['mechanism_drivers'][0] \
- if isinstance(env_config['mechanism_drivers'], list) \
- else env_config['mechanism_drivers']
+ # Workarounds for mechanism_drivers and distribution_version field types
+ mechanism_driver = env_config.get('mechanism_drivers')
+ if isinstance(mechanism_driver, list):
+ mechanism_driver = mechanism_driver[0]
+ env_distribution_version = env_config.get('distribution_version')
+ if isinstance(env_distribution_version, list):
+ env_distribution_version = env_distribution_version[0]
full_env = {
- 'environment.distribution': env_config['distribution'],
- 'environment.distribution_version':
- {"$in": [env_config['distribution_version']]},
- 'environment.type_drivers': env_config['type_drivers'],
+ 'environment.distribution': env_config.get('distribution'),
+ 'environment.distribution_version': env_distribution_version,
+ 'environment.type_drivers': env_config.get('type_drivers'),
'environment.mechanism_drivers': mechanism_driver
}
return self.is_feature_supported_in_env(full_env, feature)
@@ -385,36 +389,9 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
parent_id_path = parent.get("id_path", "/{}".format(environment))
parent_name_path = parent.get("name_path", "/{}".format(environment))
- try:
- # case of dynamic folder added by need
- master_parent_type = o["master_parent_type"]
- master_parent_id = o["master_parent_id"]
- master_parent = self.get_by_id(environment, master_parent_id)
- if not master_parent:
- self.log.error("failed to find master parent " +
- master_parent_id)
+ if 'master_parent_type' in o:
+ if not self.create_parent_folder(o, parent):
return False
- folder_id_path = "/".join((master_parent["id_path"], o["parent_id"]))
- folder_name_path = "/".join((master_parent["name_path"], o["parent_text"]))
- folder = {
- "environment": parent["environment"],
- "parent_id": master_parent_id,
- "parent_type": master_parent_type,
- "id": o["parent_id"],
- "id_path": folder_id_path,
- "show_in_tree": True,
- "name_path": folder_name_path,
- "name": o["parent_id"],
- "type": o["parent_type"],
- "text": o["parent_text"]
- }
- # remove master_parent_type & master_parent_id after use,
- # as they're there just ro help create the dynamic folder
- o.pop("master_parent_type", True)
- o.pop("master_parent_id", True)
- self.set(folder)
- except KeyError:
- pass
if o.get("text"):
o["name"] = o["text"]
@@ -453,6 +430,42 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
if "create_object" not in o or o["create_object"]:
# add/update object in DB
self.set(o)
- if self.is_feature_supported(environment, EnvironmentFeatures.MONITORING):
+ if self.is_feature_supported(environment,
+ EnvironmentFeatures.MONITORING):
self.monitoring_setup_manager.create_setup(o)
return True
+
+ def create_parent_folder(self, o, parent) -> bool:
+ # case of dynamic folder added by need
+ master_parent_type = o["master_parent_type"]
+ master_parent_id = o["master_parent_id"]
+ env_path = '/{}'.format(parent['environment'])
+ master_parent = {'id_path': env_path, 'name_path': env_path} \
+ if master_parent_type == 'environment' \
+ else self.get_by_id(o['environment'], master_parent_id)
+ if not master_parent:
+ self.log.error("failed to find master parent " +
+ master_parent_id)
+ return False
+ folder_id_path = "/".join((master_parent['id_path'],
+ o["parent_id"]))
+ folder_name_path = "/".join((master_parent["name_path"],
+ o["parent_text"]))
+ folder = {
+ "environment": parent["environment"],
+ "parent_id": master_parent_id,
+ "parent_type": master_parent_type,
+ "id": o["parent_id"],
+ "id_path": folder_id_path,
+ "show_in_tree": True,
+ "name_path": folder_name_path,
+ "name": o["parent_id"],
+ "type": o["parent_type"],
+ "text": o["parent_text"]
+ }
+ # remove master_parent_type & master_parent_id after use,
+ # as they're there just ro help create the dynamic folder
+ o.pop("master_parent_type", True)
+ o.pop("master_parent_id", True)
+ self.set(folder)
+ return True
diff --git a/app/utils/logging/console_logger.py b/app/utils/logging/console_logger.py
index bb8b2ed..b1008e4 100644
--- a/app/utils/logging/console_logger.py
+++ b/app/utils/logging/console_logger.py
@@ -18,4 +18,3 @@ class ConsoleLogger(Logger):
super().__init__(logger_name="{}-Console".format(self.PROJECT_NAME),
level=level)
self.add_handler(logging.StreamHandler())
-
diff --git a/app/utils/logging/full_logger.py b/app/utils/logging/full_logger.py
index 411eceb..f6fe5fa 100644
--- a/app/utils/logging/full_logger.py
+++ b/app/utils/logging/full_logger.py
@@ -10,34 +10,62 @@
import logging
import logging.handlers
+from utils.origins import Origin
from utils.logging.logger import Logger
from utils.logging.mongo_logging_handler import MongoLoggingHandler
class FullLogger(Logger):
- def __init__(self, env: str = None, log_file: str = None,
- level: str = Logger.default_level):
+ def __init__(self, env: str = None, origin: Origin = None,
+ log_file: str = None, level: str = Logger.default_level):
super().__init__(logger_name="{}-Full".format(self.PROJECT_NAME),
level=level)
+ self.env = env
+ self.origin = origin
# Console handler
self.add_handler(logging.StreamHandler())
# Message handler
- self.add_handler(MongoLoggingHandler(env, self.level))
+ self.add_handler(MongoLoggingHandler(env=env, origin=origin,
+ level=self.level))
# File handler
if log_file:
self.add_handler(logging.handlers.WatchedFileHandler(log_file))
+ def _get_message_handler(self):
+ defined_handlers = [h for h in self.log.handlers
+ if isinstance(h, MongoLoggingHandler)]
+ return defined_handlers[0] if defined_handlers else None
+
# Make sure we update MessageHandler with new env
def set_env(self, env):
- super().set_env(env)
+ self.env = env
- defined_handler = [h for h in self.log.handlers
- if isinstance(h, MongoLoggingHandler)]
- if defined_handler:
- defined_handler[0].env = env
+ handler = self._get_message_handler()
+ if handler:
+ handler.env = env
else:
self.add_handler(MongoLoggingHandler(env, self.level))
+
+ def set_origin(self, origin: Origin):
+ self.origin = origin
+
+ handler = self._get_message_handler()
+ if handler:
+ handler.origin = origin
+ else:
+ self.add_handler(MongoLoggingHandler(env=self.env,
+ level=self.level,
+ origin=origin))
+
+ def setup(self, **kwargs):
+ env = kwargs.get('env')
+ if env and self.env != env:
+ self.set_env(env)
+
+ origin = kwargs.get('origin')
+ if origin and self.origin != origin:
+ self.set_origin(origin) \ No newline at end of file
diff --git a/app/utils/logging/logger.py b/app/utils/logging/logger.py
index 316d3fd..9628040 100644
--- a/app/utils/logging/logger.py
+++ b/app/utils/logging/logger.py
@@ -34,11 +34,12 @@ class Logger(ABC):
level=level)
self.log.propagate = False
self.set_loglevel(level)
- self.env = None
self.level = level
- def set_env(self, env):
- self.env = env
+ # Subclasses should override this method
+ # to perform runtime changes to handlers, etc.
+ def setup(self, **kwargs):
+ pass
@staticmethod
def check_level(level):
diff --git a/app/utils/logging/message_logger.py b/app/utils/logging/message_logger.py
index 02e098f..d433a0f 100644
--- a/app/utils/logging/message_logger.py
+++ b/app/utils/logging/message_logger.py
@@ -18,4 +18,18 @@ class MessageLogger(Logger):
def __init__(self, env: str = None, level: str = None):
super().__init__(logger_name="{}-Message".format(self.PROJECT_NAME),
level=level)
+ self.env = env
self.add_handler(MongoLoggingHandler(env, self.level))
+
+ def set_env(self, env):
+ self.env = env
+
+ if self.log.handlers:
+ self.log.handlers[0].env = env
+ else:
+ self.add_handler(MongoLoggingHandler(env, self.level))
+
+ def setup(self, **kwargs):
+ env = kwargs.get('env')
+ if env and self.env != env:
+ self.set_env(env)
diff --git a/app/utils/logging/mongo_logging_handler.py b/app/utils/logging/mongo_logging_handler.py
index ffb6f85..3929e02 100644
--- a/app/utils/logging/mongo_logging_handler.py
+++ b/app/utils/logging/mongo_logging_handler.py
@@ -11,9 +11,9 @@ import datetime
import logging
from messages.message import Message
+from utils.origins import Origin
from utils.inventory_mgr import InventoryMgr
from utils.logging.logger import Logger
-from utils.string_utils import stringify_datetime
class MongoLoggingHandler(logging.Handler):
@@ -22,11 +22,12 @@ class MongoLoggingHandler(logging.Handler):
"""
SOURCE_SYSTEM = 'Calipso'
- def __init__(self, env: str, level: str):
+ def __init__(self, env: str, level: str, origin: Origin = None):
super().__init__(Logger.get_numeric_level(level))
self.str_level = level
self.env = env
self.inv = None
+ self.origin = origin
def emit(self, record):
# Try to invoke InventoryMgr for logging
@@ -46,7 +47,22 @@ class MongoLoggingHandler(logging.Handler):
d = now - datetime.datetime(1970, 1, 1)
timestamp_id = '{}.{}.{}'.format(d.days, d.seconds, d.microseconds)
source = self.SOURCE_SYSTEM
+
message = Message(msg_id=timestamp_id, env=self.env, source=source,
msg=Logger.formatter.format(record), ts=now,
level=record.levelname)
+ if self.origin:
+ message.extra['origin_id'] = (
+ str(self.origin.origin_id)
+ if self.origin.origin_id
+ else None
+ )
+ message.extra['origin_type'] = (
+ self.origin.origin_type.value
+ if self.origin.origin_type
+ else None
+ )
+ for extra_field in self.origin.extra:
+ message.extra[extra_field] = getattr(self.origin, extra_field)
+
self.inv.collections['messages'].insert_one(message.get()) \ No newline at end of file
diff --git a/app/utils/mongo_access.py b/app/utils/mongo_access.py
index d4599f1..75c265c 100644
--- a/app/utils/mongo_access.py
+++ b/app/utils/mongo_access.py
@@ -36,8 +36,10 @@ class MongoAccess(DictNamingConverter):
def __init__(self):
super().__init__()
- self.log_file = os.path.join(FileLogger.LOG_DIRECTORY,
- MongoAccess.LOG_FILENAME)
+ log_dir = FileLogger.LOG_DIRECTORY \
+ if os.path.isdir(FileLogger.LOG_DIRECTORY) \
+ else os.path.abspath('.')
+ self.log_file = os.path.join(log_dir, MongoAccess.LOG_FILENAME)
try:
self.log = FileLogger(self.log_file)
diff --git a/app/utils/ssh_connection.py b/app/utils/ssh_connection.py
index e9dd39a..b9b1cde 100644
--- a/app/utils/ssh_connection.py
+++ b/app/utils/ssh_connection.py
@@ -22,6 +22,7 @@ class SshConnection(BinaryConverter):
max_call_count_per_con = 100
timeout = 15 # timeout for exec in seconds
+ CONNECT_TIMEOUT = 5
DEFAULT_PORT = 22
@@ -118,7 +119,8 @@ class SshConnection(BinaryConverter):
pkey=k,
port=self.port if self.port is not None
else self.DEFAULT_PORT,
- password=self.pwd, timeout=30)
+ password=self.pwd,
+ timeout=self.CONNECT_TIMEOUT)
else:
port = None
try:
@@ -127,7 +129,7 @@ class SshConnection(BinaryConverter):
username=self.user,
password=self.pwd,
port=port,
- timeout=30)
+ timeout=self.CONNECT_TIMEOUT)
except paramiko.ssh_exception.AuthenticationException:
self.log.error('Failed SSH connect to host {}, port={}'
.format(self.host, port))
diff --git a/app/utils/util.py b/app/utils/util.py
index ae7b518..788eba9 100644
--- a/app/utils/util.py
+++ b/app/utils/util.py
@@ -9,8 +9,6 @@
###############################################################################
import importlib
import signal
-from argparse import Namespace
-from typing import Dict, Callable
import os
import re
@@ -47,7 +45,6 @@ class ClassResolver:
class_name = ''.join(name_parts)
return class_name
-
@staticmethod
def get_fully_qualified_class(class_name: str = None,
package_name: str = "discover",
@@ -58,8 +55,9 @@ class ClassResolver:
module_name = ".".join(module_parts)
try:
class_module = importlib.import_module(module_name)
- except ImportError:
- raise ValueError('could not import module {}'.format(module_name))
+ except ImportError as e:
+ raise ValueError('could not import module {}: {}'
+ .format(module_name, str(e)))
clazz = getattr(class_module, class_name)
return clazz
@@ -74,7 +72,8 @@ class ClassResolver:
class_name = ClassResolver.get_class_name_by_module(module_name)
if class_name in ClassResolver.instances:
return 'instance', ClassResolver.instances[class_name]
- clazz = ClassResolver.get_fully_qualified_class(class_name, package_name,
+ clazz = ClassResolver.get_fully_qualified_class(class_name,
+ package_name,
module_name)
return 'class', clazz
@@ -128,8 +127,8 @@ def generate_object_ids(keys, obj):
# and convert them to dict to enforce uniformity.
# Throws a TypeError if arguments can't be converted to dict.
def setup_args(args: dict,
- defaults: Dict[str, object],
- get_cmd_args: Callable[[], Namespace] = None):
+ defaults,
+ get_cmd_args=None):
if defaults is None:
defaults = {}
@@ -170,3 +169,10 @@ def decode_aci_dn(object_id):
def get_object_path_part(path: str, part_name: str):
match = re.match(".*/{}/(.+?)/.*".format(part_name), path)
return match.group(1) if match else None
+
+
+def merge_dicts(*dicts):
+ result = {}
+ for dictionary in dicts:
+ result.update(dictionary)
+ return result
diff --git a/calipso/tests/functest/smoke_test.py b/calipso/tests/functest/smoke_test.py
index 09b394f..c9bbaf2 100755
--- a/calipso/tests/functest/smoke_test.py
+++ b/calipso/tests/functest/smoke_test.py
@@ -17,7 +17,7 @@ import sys
from utils.binary_converter import BinaryConverter
-IMAGES_TO_SEARCH = ["ui", "sensu", "scan", "api", "ldap", "listen", "mongo"]
+IMAGES_TO_SEARCH = ["ui", "monitor", "scan", "api", "ldap", "listen", "mongo"]
class DockerImageCheck:
diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico
deleted file mode 100755
index bbe55ab..0000000
--- a/docs/_static/favicon.ico
+++ /dev/null
Binary files differ
diff --git a/docs/_static/my-styles.css b/docs/_static/my-styles.css
deleted file mode 100644
index 8feb45b..0000000
--- a/docs/_static/my-styles.css
+++ /dev/null
@@ -1,33 +0,0 @@
-body {
- font-family: Helvetica, sans-serif;
- font-size: 16px;
-}
-
-body a {
- color: #27CCC0;
-}
-
-body a:hover {
- color: #676767;
-}
-
-.navbar-brand img {
- height: 200%;
- margin-top: -5%;
-}
-
-.navbar, h1, h2, h3, h4, h5, h6 {
- font-family: Helvetica, sans-serif;
-}
-
-.navbar-text{
- color: #676767;
-}
-
-.navbar-form.navbar-right{
- padding: 0;
-}
-
-.navbar-form .form-control{
- width: 150px;
-}
diff --git a/docs/_static/opnfv-logo.png b/docs/_static/opnfv-logo.png
deleted file mode 100644
index 1519503..0000000
--- a/docs/_static/opnfv-logo.png
+++ /dev/null
Binary files differ
diff --git a/docs/conf.py b/docs/conf.py
index 9d9f281..86ab8c5 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,282 +1 @@
-import sphinx_bootstrap_theme
-import os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-# needs_sphinx = '1.0'
-needs_sphinx = '1.3'
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = ['sphinxcontrib.httpdomain', 'sphinx.ext.autodoc',
- 'sphinx.ext.viewcode', 'sphinx.ext.napoleon']
-# Disable javasphinx generation until we have a solution to long build
-# times. readthedocs timesout after 902 seconds.
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
-
-# The encoding of source files.
-# source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = ''
-copyright = '2017, OPNFV. Licensed under CC BY 4.0'
-author = 'Open Platform for NFV'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = 'Latest'
-# The full version, including alpha/beta/rc tags.
-release = 'Latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-# today = ''
-# Else, today_fmt is used as the format for a strftime call.
-# today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This patterns also effect to html_static_path and html_extra_path
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-# add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-# show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-# modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-# keep_warnings = False
-
-# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'bootstrap'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-# html_theme_options = {}
-html_theme_options = {
- 'bootswatch_theme': "journal",
- 'navbar_sidebarrel': False,
-}
-
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = []
-html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
-
-# The name for this set of Sphinx documents.
-# "<project> v<release> documentation" by default.
-# html_title = 'OpenDaylight Documentation v0.3.0'
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-html_logo = '_static/opnfv-logo.png'
-
-# The name of an image file (relative to this directory) to use as a favicon of
-# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-html_favicon = '_static/favicon.ico'
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-# html_extra_path = []
-
-# If not None, a 'Last updated on:' timestamp is inserted at every page
-# bottom, using the given strftime format.
-# The empty string is equivalent to '%b %d, %Y'.
-# html_last_updated_fmt = None
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-# html_domain_indices = True
-
-# If false, no index is generated.
-# html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-# html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-# html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-# html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-# html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = None
-
-# Language to be used for generating the HTML full-text search index.
-# Sphinx supports the following languages:
-# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
-# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
-# html_search_language = 'en'
-
-# A dictionary with options for the search language support, empty by default.
-# 'ja' uses this config value.
-# 'zh' user can custom change `jieba` dictionary path.
-# html_search_options = {'type': 'default'}
-
-# The name of a javascript file (relative to the configuration directory) that
-# implements a search results scorer. If empty, the default will be used.
-# html_search_scorer = 'scorer.js'
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'OPNFV'
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- #'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- #'preamble': '',
-
- # Latex figure (float) alignment
- #'figure_align': 'htbp',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- (master_doc, 'OPNFV.tex', 'OPNFV Documentation',
- 'OPNFV Project', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-# latex_use_parts = False
-
-# If true, show page references after internal links.
-# latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-# latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-# latex_appendices = []
-
-# If false, no module index is generated.
-# latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'OPNFVDocs', 'OPNFV Documentation',
- [author], 1)
-]
-
-# If true, show URL addresses after external links.
-# man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- (master_doc, 'OPNFVDocs', 'OPNFV Documentation',
- author, 'OPNFV', 'One line description of project.',
- 'Miscellaneous'),
-]
-
-html_sidebars = {'**': ['localtoc.html', 'relations.html'],}
-# Documents to append as an appendix to all manuals.
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-# texinfo_no_detailmenu = False
-
-# intersphinx_mapping =
-# {'RTD':('http://opnfvdocsdemo.readthedocs.io/projects/', None)}
+from docs_conf.conf import * # flake8: noqa
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..2a91f61
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: calipso
diff --git a/docs/release/scenarios/os-nosdn-calipso-noha/apex-scenario-guide.rst b/docs/development/apex-scenario-guide.rst
index 50e4c60..50e4c60 100644
--- a/docs/release/scenarios/os-nosdn-calipso-noha/apex-scenario-guide.rst
+++ b/docs/development/apex-scenario-guide.rst
diff --git a/docs/index.rst b/docs/index.rst
index 7e960d8..53018dc 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,3 +1,5 @@
+.. _calipso-release-guide:
+
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV and others.
diff --git a/docs/release/Calipso-usage-stories.rst b/docs/release/Calipso-usage-stories.rst
new file mode 100644
index 0000000..4c0c753
--- /dev/null
+++ b/docs/release/Calipso-usage-stories.rst
@@ -0,0 +1,446 @@
+***The following are fake stories, although providing real examples of
+real problems that are faced today by cloud providers, and showing
+possible resolutions provided by Calipso:***
+
+***Enterprise use-case story (Calipso ‘S’ release):***
+
+Moz is a website publishing and managing product, Moz provides
+reputation and popularity tracking, helps with distributions, listing,
+and ratings and provides content distributions for industry marketing.
+
+Moz considers moving their main content distribution application to be
+hosted on https://www.dreamhost.com/, which provides shared and
+dedicated IaaS and PaaS hosting based on OpenStack.
+
+As a major milestone for Moz’s due diligence for choosing Dreamhost, Moz
+acquires a shared hosting facility from Dreamhost, that is
+cost-effective and stable, it includes 4 mid-sized Web-servers, 4
+large-sized Application-servers and 2 large-sized DB servers, connected
+using several networks, with some security services.
+
+Dreamhost executives instruct their infrastructure operations department
+to make sure proper SLA and Monitoring is in-place so the due diligence
+and final production deployment of Moz’s services in the Dreamhost
+datacenter goes well and that Moz’s engineers receive excellent service
+experience.
+
+Moz received the following SLA with their current VPS contract:
+
+- 97-day money back guarantee, in case of a single service down event
+ or any dissatisfaction.
+
+- 99.5 % uptime/availability with a weekly total downtime of 30
+ minutes.
+
+- 24/7/365 on-call service with a total of 6 hours MTTR.
+
+- Full HA for all networking services.
+
+- Managed VPS using own Control Panel IaaS provisioning with overall
+ health visibility.
+
+- Scalable RAM, starts with 1GB can grow per request to 16GB from
+ within control panel.
+
+- Guaranteed usage of SSD or equivalent speeds, storage capacity from
+ 30GB to 240GB.
+
+- Backup service based on cinder-backup and Ceph’s dedicated backup
+ volumes, with restoration time below 4 hours.
+
+Dreamhost‘s operations factored all requirement and has decided to
+include real-time monitoring and analysis for the VPS for Moz.
+
+One of the tools used now for Moz environment in Dreamhost is Calipso
+for virtual networking.
+
+Here are some benefits provided by Calipso for Dreamhost operations
+during service cycles:
+
+*Reporting:*
+
+Special handling of virtual networking is in place:
+
+- Dreamhost designed a certain virtual networking setup and
+ connectivity that provides the HA and performance required by the SLA
+ and decided on several physical locations for Moz’s virtual servers
+ in different availability zones.
+
+- Scheduling of discovery has been created, Calipso takes a snapshot of
+ Moz’s environment every Sunday at midnight, reporting on connectivity
+ among all 20 servers (10 main and 10 backups) and overall health of
+ that connectivity.
+
+- Every Sunday morning at 8am, before the week’s automatic
+ snapshotting, the NOC administrator runs a manual discovery and saves
+ that snapshot, she then runs a comparison check against last week’s
+ snapshot and against initial design to find any gaps or changes that
+ might happen due to other shared services deployments, virtual
+ instances and their connectivity are analyzed and reported with
+ Calipso’s topology and health monitoring.
+
+- Reports are saved for a bi-weekly reporting sent to Moz’s networking
+ engineers.
+
+ *Change management:*
+
+ If infrastructure changes needs to happen on any virtual service
+ (routers, switches, firewalls etc.) or on any physical server or
+ physical switch the following special guidelines apply:
+
+- Run a search on Calipso for the name of the virtual service, switch
+ or host. Lookup if Moz environment is using this object (using the
+ object’s attributes).
+
+- Using Calipso’s impact analysis, fill a report stating all Moz’s
+ objects, on which host, connected to which switch that is affected by
+ the planed change.
+
+- Run clique-type scan, using the specific object as ‘focal-point’ to
+ create a dedicated topology with accompanied health report before
+ conducting the change itself, use this a *pre snapshot*.
+
+- Simulate the change, using Moz’s testing environment only, make sure
+ HA services are in places and downtime is confirmed to be in the SLA
+ boundaries.
+
+- Using all reports provided by Calipso, along with application and
+ storage reports, send a detailed change request to NOC and later to
+ the end-customer for review.
+
+- During the change, make sure HA is operational, by running the same
+ clique-type snapshotting every 10 minutes and running a comparison.
+
+- NOC, while waiting for the change to complete, looks at Calipso’s
+ dashboard focused on MOZ’s environment, monitoring results for
+ service down event (as expected), impact on other objects in the
+ service chain - the entire Calipso clique for that object (as
+ expected).
+
+- Once operations has reported back to NOC about change done, run the
+ same snapshotting again as *post snapshot* and run a comparison to
+ make sure all virtual networking are back to the ‘as designed’ stage
+ and all networking services are back.
+
+**Example snapshot taken at one stage on Calipso for the Moz virtual
+networking:**
+
+|image0|
+
+ *Troubleshooting:*
+
+ Dreamhost NOC uses Calipso dashboards for Moz’s environment for
+ their daily health-check. Troubleshooting starts in two cases:
+
+1. When a failure is detected on Calipso for any of Moz’s objects on
+ their virtual networking topologies,
+
+2. When a service case has been opened by Moz with “High Priority,
+ service down” flag.
+
+3. Networking department needs to know which virtual services are
+ connected to which ACI switches ports.
+
+ The following actions are taken, using Calipso dashboards:
+
+- Kickoff a discovery through Calipso API for all objects related to
+ Moz.
+
+- For a service request with no Calipso error detected: using Calipso’s
+ impact analysis, create all cliques for all objects as focal point.
+
+- For an error detected by Calipso: using Calipso’s impact analysis,
+ create cliques for objects with errors as focal point.
+
+- Resulted cliques are then analyzed using detailed messaging facility
+ in Calipso (looking deeply into any message generated regarding the
+ related objects).
+
+- Report with ACI ports to virtual services mappings is sent to
+ networking department for further analysis.
+
+ |image1|
+
+- If this is a failure on any physical device (host or switch) and/or
+ on any physical NIC (switch or host side), Calipso immediately points
+ this out and using the specific set of messages generated the
+ administrator can figure out the root cause (like optical failure,
+ driver, disconnect etc.).
+
+- In virtual object failures Calipso saves time pinpointing the servers
+ where erroneous objects are running, and their previous and new
+ connectivity details.
+
+- Calipso alerts on dependencies for :
+
+1. All related objects in the clique for that objects.
+
+2. Related hosts
+
+3. Related projects and networks
+
+4. Related application (\* in case Murano app has been added)
+
+- Administrators connects directly to the specific servers and now,
+ using the specific object attributes can start he’s manual
+ troubleshooting (actual fixing of the software issues is not
+ currently part of the Calipso features).
+
+- The NOC operators approves closing the service ticket only when all
+ related Calipso cliques are showing up as healthy and connectivity is
+ back to it’s original “as designed” stage, using Calipso older
+ snapshots.
+
+**Lookup of message – to – graph object in messaging facility:**
+
+|image2|
+
+**Finding the right object related to a specific logging/monitoring
+message**:
+
+|image3|
+
+***Service Provider use-case story (Calipso ‘P’ release):***
+
+BoingBoing is a specialized video casting service and blogging site. It
+is using several locations to run their service (regional hubs and
+central corporate campus, some hosted and some are private).
+
+BoingBoing contracted AT&T to build an NFV service for them, deployed on
+2 new hosted regional hubs, to be brought up dynamically for special
+sporting, news or cloture events. On each one of the 2 hosted virtual
+environments the following service chain is created:
+
+1. Two vyatta 5600 virtual routers are front-end routing aggregation
+ function.
+
+2. Two Steelhead virtual wan acceleration appliances connected to
+ central campus for accelerating and caching of video casting
+ services.
+
+3. Two f5 BIG-IP Traffic Management (load balancing) virtual appliances.
+
+4. Two Cisco vASA for virtual firewall and remote-access VPN services.
+
+As a major milestone for BoingBoing’s due diligence for choosing AT&T
+NFV service, BoingBoing acquires 2 shared hosting facilities and
+automatic service from AT&T, that is cost-effective and stable, it
+includes This NFV service consist of a total of 16 virtual appliance
+across those 2 sites, to be created on-demand and maintained with a
+certain SLA once provisioned, all NFV devices are connected using
+several networks, provisioned using VPP ml2 on an OpenStack based
+environment..
+
+AT&T executives instruct their infrastructure operations department to
+make sure proper SLA and Monitoring is in-place so the due diligence and
+final production deployment of BoingBoing’s services in the AT&T
+datacenters goes well and that BoingBoing’s engineers receive excellent
+service experience.
+
+BoingBoing received the following SLA with their current VPS contract:
+
+- 30-day money back guarantee, in case of a single service down event
+ or any dissatisfaction.
+
+- 99.9 % uptime/availability with a weekly total downtime of 10
+ minutes.
+
+- 24/7/365 on-call service with a total of 2 hours MTTR.
+
+- Full HA for all networking services.
+
+- Managed service using Control Panel IaaS provisioning with overall
+ health visibility.
+
+- Dedicated RAM, from16GB to 64GB from within control panel.
+
+- Guaranteed usage of SSD or equivalent speeds, storage capacity from
+ 10GB to 80GB.
+
+- Backup service based on cinder-backup and Ceph’s dedicated backup
+ volumes, with restoration time below 4 hours.
+
+- End-to-end throughput from central campus to dynamically created
+ regional sites to be always above 2Gbps, including all devices on the
+ service chain and the virtual networking in place.
+
+AT&T’s operations factored all requirement and has decided to include
+real-time monitoring and analysis for the NFV environment for
+BoingBoing.
+
+One of the tools used now for BoingBoing environment in AT&T is Calipso
+for virtual networking.
+
+Here are some benefits provided by Calipso for AT&T operations during
+service cycles:
+
+*Reporting:*
+
+Special handling of virtual networking is in place:
+
+- AT&T designed a certain virtual networking (SFC) setup and
+ connectivity that provides the HA and performance required by the SLA
+ and decided on several physical locations for BoingBoing’s virtual
+ appliances in different availability zones.
+
+- Scheduling of discovery has been created, Calipso takes a snapshot of
+ BoingBoing’s environment every Sunday at midnight, reporting on
+ connectivity among all 16 instances (8 per regional site, 4 pairs on
+ each) and overall health of that connectivity.
+
+- Every Sunday morning at 8am, before the week’s automatic
+ snapshotting, the NOC administrator runs a manual discovery and saves
+ that snapshot, she then runs a comparison check against last week’s
+ snapshot and against initial design to find any gaps or changes that
+ might happen due to other shared services deployments, virtual
+ instances and their connectivity are analyzed and reported with
+ Calipso’s topology and health monitoring.
+
+- Reports are saved for a bi-weekly reporting sent to BoingBoing’s
+ networking engineers.
+
+- Throughput is measured by a special traffic sampling technology
+ inside the VPP virtual switches and sent back to Calipso for
+ references to virtual objects and topological inventory. Dependencies
+ are analyzed so SFC topologies are now visualized across all sites
+ and includes graphing facility on the Calipso UI to visualize the
+ throughput.
+
+ *Change management:*
+
+ If infrastructure changes needs to happen on any virtual service
+ (NFV virtual appliances, internal routers, switches, firewalls etc.)
+ or on any physical server or physical switch the following special
+ guidelines apply:
+
+- Run a lookup on Calipso search-engine for the name of the virtual
+ service, switch or host, including names of NFV appliances as updated
+ in the Calipso inventory by the NFV provisioning application. Lookup
+ if BoingBoing environment is using this object (using the object’s
+ attributes).
+
+ **Running a lookup on Calipso search-engine**
+
+|image4|
+
+- Using Calipso’s impact analysis, fill a report stating all
+ BoingBoing’s objects, on which host, connected to which switch that
+ is affected by the planed change.
+
+- Run clique-type scan, using the specific object as ‘focal-point’ to
+ create a dedicated topology with accompanied health report before
+ conducting the change itself, use this a *pre snapshot*.
+
+- Simulate the change, using BoingBoing’s testing environment only,
+ make sure HA services are in places and downtime is confirmed to be
+ in the SLA boundaries.
+
+- Using all reports provided by Calipso, along with application and
+ storage reports, send a detailed change request to NOC and later to
+ the end-customer for review.
+
+- During the change, make sure HA is operational, by running the same
+ clique-type snapshotting every 10 minutes and running a comparison.
+
+- NOC, while waiting for the change to complete, looks at Calipso’s
+ dashboard focused on BoingBoing’s environment, monitoring results for
+ SFC service down event (as expected), impact on other objects in the
+ service chain - the entire Calipso clique for that object (as
+ expected).
+
+- Once operations has reported back to NOC about change done, run the
+ same snapshotting again as *post snapshot* and run a comparison to
+ make sure all virtual networking are back to the ‘as designed’ stage
+ and all networking services are back.
+
+**Example snapshot taken at one stage for the BoingBoing virtual
+networking and SFC:**
+
+|image5|
+
+ *Troubleshooting:*
+
+ AT&T NOC uses Calipso dashboards for BoingBoing’s environment for
+ their daily health-check. Troubleshooting starts in two cases:
+
+1. When a failure is detected on Calipso for any of BoingBoing’s objects
+ on their virtual networking topologies,
+
+2. When a service case has been opened by BoingBoing with “High
+ Priority, SFC down” flag.
+
+ The following actions are taken, using Calipso dashboards:
+
+- Kickoff a discovery through Calipso API for all objects related to
+ BoingBoing.
+
+- For a service request with no Calipso error detected: using Calipso’s
+ impact analysis, create all cliques for all objects as focal point.
+
+- For an error detected by Calipso: using Calipso’s impact analysis,
+ create cliques for objects with errors as focal point.
+
+- Resulted cliques are then analyzed using detailed messaging facility
+ in Calipso (looking deeply into any message generated regarding the
+ related objects).
+
+- If this is a failure on any physical device (host or switch) and/or
+ on any physical NIC (switch or host side), Calipso immediately points
+ this out and using the specific set of messages generated the
+ administrator can figure out the root cause (like optical failure,
+ driver, disconnect etc.).
+
+- In virtual object failures Calipso saves time pinpointing the servers
+ where erroneous objects are running, and their previous and new
+ connectivity details.
+
+- \*Sources of alerts ...OpenStack, Calipso’s and Sensu are built-in
+ sources, other NFV related monitoring and alerting sources can be
+ added to Calipso messaging system.
+
+- Calipso alerts on dependencies for :
+
+1. All related objects in the clique for that objects.
+
+2. Related hosts
+
+3. Related projects and networks
+
+4. Related NFV service and SFC (\* in case NFV tacker has been added)
+
+- Administrators connects directly to the specific servers and now,
+ using the specific object attributes can start he’s manual
+ troubleshooting (actual fixing of the software issues is not
+ currently part of the Calipso features).
+
+- The NOC operators approves closing the service ticket only when all
+ related Calipso cliques are showing up as healthy and connectivity is
+ back to it’s original “as designed” stage, using Calipso older
+ snapshots.
+
+**Calipso’s monitoring dashboard shows virtual services are back to
+operational state:**
+
+|image6|
+
+.. |image0| image:: media/image101.png
+ :width: 7.14372in
+ :height: 2.84375in
+.. |image1| image:: media/image102.png
+ :width: 6.99870in
+ :height: 2.87500in
+.. |image2| image:: media/image103.png
+ :width: 6.50000in
+ :height: 0.49444in
+.. |image3| image:: media/image104.png
+ :width: 6.50000in
+ :height: 5.43472in
+.. |image4| image:: media/image105.png
+ :width: 7.24398in
+ :height: 0.77083in
+.. |image5| image:: media/image106.png
+ :width: 6.50000in
+ :height: 3.58611in
+.. |image6| image:: media/image107.png
+ :width: 7.20996in
+ :height: 2.94792in
diff --git a/docs/release/apex-scenario-guide.rst b/docs/release/apex-scenario-guide.rst
new file mode 100644
index 0000000..c240b0a
--- /dev/null
+++ b/docs/release/apex-scenario-guide.rst
@@ -0,0 +1,282 @@
+| Calipso.io
+| Installation Guide
+
+|image0|
+
+Project “Calipso” tries to illuminate complex virtual networking with
+real time operational state visibility for large and highly distributed
+Virtual Infrastructure Management (VIM).
+
+We believe that Stability is driven by accurate Visibility.
+
+Calipso provides visible insights using smart discovery and virtual
+topological representation in graphs, with monitoring per object in the
+graph inventory to reduce error vectors and troubleshooting, maintenance
+cycles for VIM operators and administrators.
+
+Table of Contents
+
+Calipso.io Installation Guide 1
+
+1 Pre Requisites 3
+
+1.1 Pre Requisites for Calipso “all in one” application 3
+
+1.2 Pre Requisites for Calipso UI application 3
+
+2 Installation Option used with Apex 4
+
+2.1 Micro Services App, single line install 4
+
+3 OPNFV Scenario 5
+
+3.1 APEX automatic configurator and setup 5
+
+3.2 Apex scenario 5
+
+3.3 Calipso functest 6
+
+TBD 6
+
+Pre Requisites
+===============
+
+Pre Requisites for Calipso “all in one” application
+----------------------------------------------------
+
+ Calipso’s main application is written with Python3.5 for Linux
+ Servers, tested successfully on Centos 7.3 and Ubuntu 16.04. When
+ running using micro-services many of the required software packages
+ and libraries are delivered per micro service, but for an “all in
+ one” application case there are several dependencies.
+
+ Here is a list of the required software packages, and the official
+ supported steps required to install them:
+
+1. Python3.5.x for Linux :
+ https://docs.python.org/3.5/using/unix.html#on-linux
+
+2. Pip for Python3 : https://docs.python.org/3/installing/index.html
+
+3. Python3 packages to install using pip3 :
+
+ **sudo pip3 install falcon (>1.1.0)**
+
+ **sudo pip3 install pymongo (>3.4.0)**
+
+ **sudo pip3 install gunicorn (>19.6.0)**
+
+ **sudo pip3 install ldap3 (>2.1.1)**
+
+ **sudo pip3 install setuptools (>34.3.2)**
+
+ **sudo pip3 install python3-dateutil (>2.5.3-2)**
+
+ **sudo pip3 install bcrypt (>3.1.1)**
+
+ **sudo pip3 install bson**
+
+ **sudo pip3 install websocket**
+
+ **sudo pip3 install datetime**
+
+ **sudo pip3 install typing**
+
+ **sudo pip3 install kombu**
+
+ **sudo pip3 install boltons**
+
+ **sudo pip3 install paramiko**
+
+ **sudo pip3 install requests **
+
+ **sudo pip3 install httplib2**
+
+ **sudo pip3 install mysql.connector**
+
+ **sudo pip3 install xmltodict**
+
+ **sudo pip3 install cryptography**
+
+ **sudo pip3 install docker**
+
+1. Git : https://git-scm.com/book/en/v2/Getting-Started-Installing-Git
+
+2. Docker : https://docs.docker.com/engine/installation/
+
+Pre Requisites for Calipso UI application
+------------------------------------------
+
+ Calipso UI is developed and maintained using Meteor Framework
+ (https://www.meteor.com/tutorials). For stability and manageability
+ reasons we decided to always build the latest Calipso UI as a Docker
+ container pre-parameterized for stable and supported behavior. The
+ required steps for installing the Calipso UI with several options
+ are listed below.
+
+Installation Option used with Apex
+==================================
+
+Micro Services App, single line install
+---------------------------------------
+
+ For most users, this will be the fastest and more reliable install
+ option. We currently have Calipso divided into 7 major containers,
+ those are installed using a single installer. The Calipso containers
+ are pre-packaged and fully customized per our design needs. Here are
+ the required steps for installation using this option:
+
+1. Follow steps 1- 5 per section 2.1 above.
+
+2. Install Docker : https://docs.docker.com/engine/installation/
+
+3. Install the following python3 libraries using pip3 : docker, pymongo
+
+4. Although Calipso installer can download all needed containers, if
+ they doesn’t exist locally already, we recommend doing a manual
+ download of all 7 containers, providing better control and logging:
+
+ **sudo docker login** # use your DockerHub username and password to
+ login.
+
+ **sudo docker pull korenlev/calipso:scan** # scan container used to
+ scan VIM
+
+ **sudo docker pull korenlev/calipso:listen** # listen container to
+ attach to VIM’s BUS.
+
+ **sudo docker pull korenlev/calipso:api** # api container for
+ application integration
+
+ **sudo docker pull korenlev/calipso:sensu** # sensu server container
+ for monitoring
+
+ **sudo docker pull korenlev/calipso:mongo** # calipso mongo DB
+ container
+
+ **sudo docker pull korenlev/calipso:ui** # calipso ui container
+
+ **sudo docker pull korenlev/calipso:ldap** # calipso ldap container
+
+1. Check that all containers were downloaded and registered
+ successfully:
+
+ **sudo docker images**
+
+ Expected results (As of Aug 2017):
+
+ **REPOSITORY TAG IMAGE ID CREATED SIZE**
+
+ **korenlev/calipso listen 12086aaedbc3 6 hours ago 1.05GB**
+
+ **korenlev/calipso api 34c4c6c1b03e 6 hours ago 992MB**
+
+ **korenlev/calipso scan 1ee60c4e61d5 6 hours ago 1.1GB**
+
+ **korenlev/calipso sensu a8a17168197a 6 hours ago 1.65GB**
+
+ **korenlev/calipso mongo 17f2d62f4445 22 hours ago 1.31GB**
+
+ **korenlev/calipso ui ab37b366e812 11 days ago 270MB**
+
+ **korenlev/calipso ldap 316bc94b25ad 2 months ago 269MB**
+
+1. Run the calipso installer using single line arguments:
+
+ **python3 calipso/app/install/calipso-installer.py--command
+ start-all --copy q**
+
+ This should launch all calipso modules in sequence along with all
+ needed configuration files placed in /home/calipso.
+
+OPNFV Scenario
+===============
+
+Although calipso is designed for any VIM and for enterprise use-cases
+too, service providers can use additional capability to install calipso
+with Apex for OPNFV.
+
+APEX automatic configurator and setup
+-------------------------------------
+
+ When using apex to install OPNFV, the Triple-O based OpenStack is
+ installed automatically and calipso installation can be initiated
+ automatically after apex completes the VIM installation process for
+ a certain scenario.
+
+ In this case setup\_apex\_environment.py can be used for creating a
+ new environment automatically into mongoDB and UI of Calipso
+ (instead of using the calipso UI to do that as typical user would
+ do), then detailed scanning can start immediately, the following
+ options are available for setup\_apex\_environment.py:
+
+ **-m [MONGO\_CONFIG], --mongo\_config [MONGO\_CONFIG]**
+
+ **name of config file with MongoDB server access details**
+
+ **(Default: /local\_dir/calipso\_mongo\_access.conf)**
+
+ **-d [CONFIG\_DIR], --config\_dir [CONFIG\_DIR]**
+
+ **path to directory with config data (Default:**
+
+ **/home/calipso/apex\_setup\_files)**
+
+ **-i [INSTALL\_DB\_DIR], --install\_db\_dir [INSTALL\_DB\_DIR]**
+
+ **path to directory with DB data (Default:**
+
+ **/home/calipso/Calipso/app/install/db)**
+
+ **-a [APEX], --apex [APEX]**
+
+ **name of environment to Apex host**
+
+ **-e [ENV], --env [ENV]**
+
+ **name of environment to create(Default: Apex-Euphrates)**
+
+ **-l [LOGLEVEL], --loglevel [LOGLEVEL]**
+
+ **logging level (default: "INFO")**
+
+ **-f [LOGFILE], --logfile [LOGFILE]**
+
+ **log file (default:**
+
+ **"/home/calipso/log/apex\_environment\_fetch.log")**
+
+ **-g [GIT], --git [GIT]**
+
+ **URL to clone Git repository (default:**
+
+ **https://git.opnfv.org/calipso)**
+
+Apex scenario
+-------------
+
+ Starting Euphrates 1.0 the following scenario added with Apex
+ installer:
+
+ **os-nosdn-calipso-noha**
+
+ Following CI jobs defined:
+
+ https://build.opnfv.org/ci/job/calipso-verify-euphrates/
+
+ https://build.opnfv.org/ci/job/apex-testsuite-os-nosdn-calipso-noha-baremetal-euphrates/
+
+ https://build.opnfv.org/ci/job/apex-os-nosdn-calipso-noha-baremetal-euphrates/
+
+ Note: destination deploy server needs to have pre-requisites
+ detailed above.
+
+Calipso functest
+----------------
+
+TBD
+----
+
+.. |image0| image:: media/image1.png
+ :width: 6.50000in
+ :height: 4.27153in
diff --git a/docs/release/developer-guide.pdf b/docs/release/developer-guide.pdf
new file mode 100644
index 0000000..2ed302e
--- /dev/null
+++ b/docs/release/developer-guide.pdf
Binary files differ
diff --git a/docs/release/developer-guide.rst b/docs/release/developer-guide.rst
new file mode 100644
index 0000000..0de3f57
--- /dev/null
+++ b/docs/release/developer-guide.rst
@@ -0,0 +1,1338 @@
+| Calipso
+| Developer Guide
+
+|image0|
+
+Project “Calipso” tries to illuminate complex virtual networking with
+real time operational state visibility for large and highly distributed
+Virtual Infrastructure Management (VIM).
+
+We believe that Stability is driven by accurate Visibility.
+
+Calipso provides visible insights using smart discovery and virtual
+topological representation in graphs, with monitoring per object in the
+graph inventory to reduce error vectors and troubleshooting, maintenance
+cycles for VIM operators and administrators.
+
+Project architecture
+====================
+
+Calipso comprises two major parts: application and UI. We’ll focus on
+the former in this developer guide.
+
+Current project structure is as follows:
+
+- root/
+
+ - app/
+
+ - api/
+
+ - responders/
+
+ - auth/
+
+ - resource/
+
+ - *server.py*
+
+ - config/
+
+ - *events.json*
+
+ - *scanners.json*
+
+ - discover/
+
+ - events/
+
+ - listeners/
+
+ - *default\_listener.py*
+
+ - *listener\_base.py*
+
+ - handlers/
+
+ - *event\_base.py*
+
+ - *event\_\*.py*
+
+ - fetchers/
+
+ - aci/
+
+ - api/
+
+ - cli/
+
+ - db/
+
+ - *event\_manager.py*
+
+ - *scan.py*
+
+ - *scan\_manager.py*
+
+ - monitoring/
+
+ - checks/
+
+ - handlers/
+
+ - *monitor.py*
+
+ - setup/
+
+ - *monitoring\_setup\_manager.py*
+
+ - test/
+
+ - api/
+
+ - event\_based\_scan/
+
+ - fetch/
+
+ - scan/
+
+ - utils/
+
+ - ui/
+
+Application structure
+---------------------
+
+‘API’ package
+~~~~~~~~~~~~~
+
+Calipso API is designed to be used by native and third-party
+applications that are planning to use Calipso discovery application.
+
+***api/responders***
+
+This package contains all exposed API endpoint handlers:
+
+*auth* package contains token management handlers,
+
+*resource* package contains resource handlers.
+
+***server.py***
+
+API server startup script. In order for it to work correctly, connection
+arguments for a Mongo database used by a Calipso application instance
+are required:
+
+-m [MONGO\_CONFIG], --mongo\_config [MONGO\_CONFIG]
+
+name of config file with mongo access details
+
+--ldap\_config [LDAP\_CONFIG]
+
+name of the config file with ldap server config
+
+details
+
+-l [LOGLEVEL], --loglevel [LOGLEVEL]
+
+logging level (default: 'INFO')
+
+-b [BIND], --bind [BIND]
+
+binding address of the API server (default
+
+127.0.0.1:8000)
+
+-y [INVENTORY], --inventory [INVENTORY]
+
+name of inventory collection (default: 'inventory')
+
+-t [TOKEN\_LIFETIME], --token-lifetime [TOKEN\_LIFETIME]
+
+lifetime of the token
+
+For detailed reference and endpoints guide, see the API Guide document.
+
+‘Discover’ package
+~~~~~~~~~~~~~~~~~~
+
+‘Discover’ package contains the core Calipso functionality which
+involves:
+
+- scanning a network topology using a defined suite of scanners (see
+ `Scanning concepts <#scanning-concepts>`__, `Scanners configuration
+ file structure <#the-scanners-configuration-file-structure>`__) that
+ use fetchers to get all needed data on objects of the topology;
+
+- tracking live events that modifies the topology in any way (by adding
+ new object, updating existing or deleting them) using a suite of
+ event handlers and event listeners;
+
+- managing the aforementioned suites using specialized manager scripts
+ (*scan\_manager.py* and *event\_manager.py*)
+
+‘Tests’ package
+~~~~~~~~~~~~~~~
+
+‘Tests’ package contains unit tests for main Calipso components: API,
+event handlers, fetchers, scanners and utils.
+
+Other packages
+~~~~~~~~~~~~~~
+
+***Install***
+
+Installation and deployment scripts (with initial data for Calipso
+database).
+
+***Monitoring***
+
+Monitoring configurations, checks and handlers (see
+`Monitoring <#monitoring>`__ section and Monitoring Guide document).
+
+***Utils***
+
+Utility modules for app-wide use (inventory manager, mongo access,
+loggers, etc.).
+
+Scanning Guide
+==============
+
+ Introduction to scanning
+-------------------------
+
+Architecture overview
+~~~~~~~~~~~~~~~~~~~~~
+
+Calipso backend will scan any OpenStack environment to discover the
+objects that it is made of, and place the objects it discovered in a
+MongoDB database.
+
+Following discovery of objects, Calipso will:
+
+| Find what links exist between these objects, and save these links to
+ MongoDB as well.
+| For example, it will create a pnic-network link from a pNIC (physical
+ NIC) and the network it is connected to.
+
+Based on user definitions, it will create a 'clique' for each object
+using the links it previously found. These cliques are later used to
+present graphs for objects being viewed in the Calipso UI. This is not a
+clique by graph theory definition, but more like the social definition
+of clique: a graph of related, interconnected nodes.
+
+
+OpenStack Scanning is done using the following methods, in order of
+preference:
+
+1. OpenStack API
+
+2. MySQL DB - fetch any extra detail we can from the infrastructure
+ MySQL DB used by OpenStack
+
+3. CLI - connect by SSH to the hosts in the OpenStack environment to run
+ commands, e.g. ifconfig, that will provide the most in-depth details.
+
+
+| *Note*: 'environment' in Calipso means a single deployment of
+ OpenStack, possibly containing multiple tenants (projects), hosts and
+ instances (VMs). A single Calipso instance can handle multiple
+ OpenStack environments. 
+| However, we expect that typically Calipso will reside inside an
+ OpenStack control node and will handle just that node's OpenStack
+ environment.
+
+
+***Environment***
+
+| The Calipso scan script, written in Python, is called scan.py.
+| It uses Python 3, along with the following libraries:
+
+- pymongo - for MongoDB access
+
+- mysql-connector - For MySQL DB access
+
+- paramiko - for SSH access
+
+- requests - For handling HTTP requests and responses to the OpenStack
+ API
+
+- xmltodict - for handling XML output of CLI commands
+
+- cryptography - used by Paramiko
+
+See Calipso installation guide for environment setup instructions.
+
+***Configuration***
+
+The configuration for accessing the OpenStack environment, by API, DB or
+SSH, is saved in the Calipso MongoDB *“environments\_config”*
+collection.
+
+Calipso can work with a remote MongoDB instance, the details of which
+are read from a configuration file (default: */etc/calipso/mongo.conf*).
+
+| The first column is the configuration key while the second is the
+ configuration value, in the case the value is the server host name or
+ IP address.
+| Other possible keys for MongoDB access:
+
+- port: IP port number
+
+- Other parameters for the PyMongo MongoClient class constructor
+
+Alternate file location can be specified using the CLI -m parameter.
+
+Scanning concepts
+~~~~~~~~~~~~~~~~~
+
+***DB Schema***
+
+Objects are stored in the inventory collection, named *“inventory”* by
+default, along with the accompanying collections, named by
+default: \ *"links", "cliques", "clique\_types" and
+"clique\_constraints"*. For development, separate sets of collections
+can be defined per environment (collection names are created by
+appending the default collection name to the alternative inventory
+collection name).
+
+The inventory, links and cliques collections are all designed to work
+with a multi-environment scenario, so documents are marked with an
+*"environment"* attribute.
+
+The clique\_types collection allows Calipso users (typically
+administrators) to define how the "clique" graphs are to be defined. 
+
+It defines a set of link types to be traversed when an object such as an
+instance is clicked in the UI (therefore referred to as the focal
+point). See "Clique Scanning" below. This definition can differ between
+environments.
+
+Example: for focal point type "instance", the link types are often set
+to
+
+- instance-vnic
+
+- vnic-vconnector
+
+- vconnector-vedge
+
+- vedge-pnic
+
+- pnic-network
+
+| The clique\_constraints collection defines a constraint on links
+ traversed for a specific clique when starting from a given focal
+ point. 
+| For example: instance cliques are constrained to a specific
+ network. If we wouldn't have this constraint, the resulting graph
+ would stretch to include objects from neighboring networks that are
+ not really related to the instance.
+
+\ ***Hierarchy of Scanning***
+
+The initial scanning is done hierarchically, starting from the
+environment level and discovering lower levels in turn.
+
+Examples:
+
+- Under environment we scan for regions and projects (tenants).
+
+- Under availability zone we have hosts, and under hosts we have
+ instances and host services
+
+The actual scanning order is not always same as the logical hierarchical
+order of objects, to improve scanning performance.
+
+Some objects are referenced multiple times in the hierarchy. For
+example, hosts are always in an availability zone, but can also be part
+of a host aggregate. Such extra references are saved as references to
+the main object.
+
+***Clique Scanning***
+
+| For creating cliques based on the discovered objects and links, clique
+ types need to be defined for the given environment.
+| A clique type specifies the list of link types used in building a
+ clique for a specific focal point object type.
+
+For example, it can define that for instance objects we want to have the
+following link types:
+
+- instance-vnic
+
+- vnic-vconnector
+
+- vconnector-vedge
+
+- vedge-pnic
+
+- pnic-network
+
+
+As in many cases the same clique types are used, default clique types
+will be provided with a new Calipso deployment.
+
+\ ***Clique creation algorithm***
+
+- For each clique type CT:
+
+ - For each focal point object F of the type specified as the clique
+ type focal point type:
+
+ - Create a new clique C
+
+ - Add F to the list of objects included in the clique
+
+ - For each link type X-Y of the link types in CT:
+
+ - Find all the source objects of type x that are already
+ included in the clique
+
+ - For each such source object S:
+
+ - for all links L of type X-Y that have S as their source
+
+ - Add the object T of type Y that is the target in L to the
+ list of objects included in the clique
+
+ - Add L to the list of links in the clique C
+
+How to run scans
+----------------
+
+For running environment scans Calipso uses a specialized daemon script
+called *scan manager*. If Calipso application is deployed in docker
+containers, scan manager will run inside the *calipso-scan* container.
+
+Scan manager uses MongoDB connection to fetch requests for environment
+scans and execute them by running a *scan* script. It also performs
+extra checks and procedures connected to scan failure/completion, such
+as marking *environment* as scanned and reporting errors (see
+`details <#scan-manager>`__).
+
+Scan script workflow:
+
+1. Loads specific scanners definitions from a predefined metadata file
+ (which can be extended in order to support scanning of new object
+ types).
+
+2. Runs the root scanner and then children scanners recursively (see
+ `Hierarchy of scanning <#Hierarchy_of_scanning>`__)
+
+ a. Scanners do all necessary work to insert objects in *inventory*.
+
+3. Finalizes the scan and publishes successful scan completion.
+
+Scan manager
+~~~~~~~~~~~~
+
+Scan manager is a script which purpose is to manage the full lifecycle
+of scans requested through API. It runs indefinitely while:
+
+1. Polling the database (*scans* and *scheduled\_scans* collections) for
+ new and scheduled scan requests;
+
+2. Parsing their configurations;
+
+3. Running the scans;
+
+4. Logging the results.
+
+Scan manager can be run in a separate container provided that it has
+connection to the database and the topology source system.
+
+Monitoring
+----------
+
+***Monitoring Subsystem Overview***
+
+Calipso monitoring uses Sensu to remotely track actual state of hosts.
+
+A Sensu server is installed as a Docker image along with the other
+Calipso components.
+
+
+Remote hosts send check events to the Sensu server. 
+
+We use a filtering of events such that the first occurrence of a check
+is always used, after that cases where status is unchanged are ignored.
+
+When handling a check event, the Calipso Sensu handlers will find the
+matching Calipso object, and update its status.
+
+We also keep the timestamp of the last status update, along with the
+full check output.
+
+Setup of checks and handlers code on the server and the remote hosts can
+be done by Calipso. It is also possible to have this done using another
+tool, e.g. Ansible or Puppet.
+
+More info is available in Monitoring Guide document.
+
+
+***Package Structure***
+
+Monitoring package is divided like this:
+
+1.      Checks: these are the actual check scripts that will be run on
+the hosts;
+
+2.      Handlers: the code that does handling of check events;
+
+3.      Setup: code for setting up handlers and checks.
+
+Events Guide
+============
+
+Introduction
+------------
+
+Events
+~~~~~~
+
+Events in general sense are any changes to the monitored topology
+objects that are trackable by Calipso. We currently support subscription
+to Neutron notification queues for several OpenStack distributions as a
+source of events.
+
+The two core concepts of working with events are *listening to events*
+and *event handling*, so the main module groups in play are the *event
+listener* and *event handlers*.
+
+Event listeners
+~~~~~~~~~~~~~~~
+
+An event listener is a module that handles connection to the event
+source, listening to the new events and routing them to respective event
+handlers.
+
+An event listener class should be designed to run indefinitely in
+foreground or background (daemon) while maintaining a connection to the
+source of events (generally a message queue like RabbitMQ or Apache
+Kafka). Each incoming event is examined and, if it has the correct
+format, is routed to the corresponding event handler class. The routing
+can be accomplished through a dedicated event router class using a
+metadata file and a metadata parser (see `Metadata
+parsers <#metadata-parsers>`__).
+
+Event handlers
+~~~~~~~~~~~~~~
+
+An event handler is a specific class that parses the incoming event
+payload and performs a certain CUD (Create/Update/Delete) operation on
+zero or more database objects. Event handler should be independent of
+the event listener implementation.
+
+Event manager
+~~~~~~~~~~~~~
+
+Event manager is a script which purpose is to manage event listeners. It
+runs indefinitely and performs the following operations:
+
+1. Starts a process for each valid entry in *environments\_config*
+ collection that is scanned (*scanned == true*) and has the *listen*
+ flag set to *true*;
+
+2. Checks the *operational* statuses of event listeners and updating
+ them in *environments\_config* collection;
+
+3. Stops the event listeners that no longer qualify for listening (see
+ step 1);
+
+4. Restarts the event listeners that quit unexpectedly;
+
+5. Repeats steps 1-5
+
+Event manager can be run in a separate container provided that it has
+connection to the database and to all events source systems that event
+listeners use.
+
+Contribution
+~~~~~~~~~~~~
+
+You can contribute to Calipso *events* system in several ways:
+
+- create custom event handlers for an existing listener;
+
+- create custom event listeners and reuse existing handlers;
+
+- create custom event handlers and listeners.
+
+See `Creating new event handlers <#creating-new-event-handlers>`__ and
+`Creating new event listeners <#creating-new-event-listeners>`__ for the
+respective guides.
+
+Contribution
+============
+
+This section covers the designed approach to contribution to Calipso.
+
+The main scenario of contribution consists of introducing a new *object*
+type to the discovery engine, defining *links* that connect this new
+object to existing ones, and describing a *clique* (or cliques) that
+makes use of the object and its links. Below we describe how this
+scenario should be implemented, step-by-step.
+
+*Note*: Before writing any new code, you need to create your own
+environment using UI (see User Guide document) or API (see the API guide
+doc). Creating an entry directly in *“environments\_config”* collection
+is not recommended.
+
+Creating new object types
+-------------------------
+
+Before you proceed with creation of new object type, you need to make
+sure the following requirements are met:
+
+- New object type has a unique name and purpose
+
+- New object type has an existing parent object type
+
+First of all, you need to create a fetcher that will take care of
+getting info on objects of the new type, processing it and adding new
+entries in Calipso database.
+
+Creating new fetchers
+~~~~~~~~~~~~~~~~~~~~~
+
+A fetcher is a common name for a class that handles fetching of all
+objects of a certain type that have a common parent object. The source
+of this data may be already implemented in Calipso (like OpenStack API,
+CLI and DB sources) or you may create one yourself.
+
+***Common fetchers***
+
+Fetchers package structure should adhere to the following pattern (where
+*%source\_name%* is a short prefix like *api, cli, db*):
+
+- app
+
+ - discover
+
+ - fetchers
+
+ - *%source\_name%*
+
+ - *%source\_name%*\ \_\ *%fetcher\_name%.*\ py
+
+If you reuse the existing data source, your new fetcher should subclass
+the class located in *%source\_name%\_access* module inside the
+*%source\_name%* directory.
+
+Fetcher class name should repeat the module name, except in CamelCase
+instead of snake\_case.
+
+Example: if you are adding a new cli fetcher, you should subclass
+*CliAccess* class found by *app/discover/fetchers/cli/cli\_access.py*
+path. If the module is named *cli\_fetch\_new\_objects.py*, fetcher
+class should be named *CliFetchNewObjects*.
+
+If you are creating a fetcher that uses new data source, you may
+consider adding an “access” class for this data source to store
+convenience methods. In this case, the “access” class should subclass
+the base Fetcher class (found in *app/discover/fetcher.py*) and the
+fetcher class should subclass the “access” class.
+
+All business logic of a fetcher should be defined inside the overridden
+method from base Fetcher class *get(self, parent\_id)*. You should use
+the second argument that is automatically passed by parent scanner to
+get the parent entity from database and any data you may need. This
+method has to return a list of new objects (dicts) that need to be
+inserted in Calipso database. Their parent object should be passed along
+other fields (see example).
+
+*Note*: types of returned objects should match the one their fetcher is
+designed for.
+
+***Example***:
+
+**app/discover/fetchers/cli/cli\_fetch\_new\_objects.py**
+
+ | **from** discover.fetchers.cli.cli\_access **import** CliAccess
+ | **from** utils.inventory\_mgr **import** InventoryMgr
+ | **class** CliFetchNewObjects(CliAccess):
+ | **def** \_\_init\_\_(self):
+ | super().\_\_init\_\_()
+ | self.inv = InventoryMgr()
+ | **def** get(self, parent\_id):
+ | parent = self.inv.get\_by\_id(self.env, parent\_id)
+ | *# do something
+ *\ objects = [{**"type"**: **"new\_type"**, **"id"**: **"1234"**,
+ **"parent"**: parent},
+ | {**"type"**: **"new\_type"**, **"id"**: **"2345"**, **"parent"**:
+ parent}]
+ | **return** objects
+
+This is an example of a fetcher that deals with the objects of type
+*“new\_type”*. It uses the parent id to fetch the parent object, then
+performs some operations in order to fetch the new objects and
+ultimately returns the objects list, at which point it has gathered all
+required information.
+
+\ ***Folder fetcher***
+
+A special type of fetchers is the folder fetcher. It serves as a dummy
+object used to aggregate objects in a specific point in objects
+hierarchy. If you would like to logically separate children objects from
+parent, you may use folder fetcher found at
+*app/discover/fetchers/folder\_fetcher.py*.
+
+Usage is described `here <#Folder_scanner>`__.
+
+The scanners configuration file structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**Scanners.json** (full path *app/config/scanners.json*) is an essential
+configuration file that defines scanners hierarchy. It has a forest
+structure, meaning that it is a set of trees, where each tree has a
+*root* scanner, potentially many levels of *children* scanners and
+pointers from parent scanners to children scanners. Scanning hierarchy
+is described `here <#Hierarchy_of_scanning>`__.
+
+A scanner is essentially a list of fetchers with configuration (we’ll
+call those **Fetch types**). Fetch types can be **Simple** and
+**Folder**, described below.
+
+***Simple fetch type***
+
+A simple fetch type looks like this:
+
+ | {
+ | **"type"**: **"project"**,
+ | **"fetcher"**: **"ApiFetchProjects"**,
+ | **"object\_id\_to\_use\_in\_child"**: **"name"**,
+
+ | **"environment\_condition"**: {
+ | **"mechanism\_drivers"**: **"VPP"
+ ** },
+ | **"children\_scanner"**: **"ScanProject"
+ **}
+
+Supported fields include:
+
+- *“fetcher”* – class name of fetcher that the scanner uses;
+
+- *“type”* – object type that the fetcher works with;
+
+- *“children\_scanner”* – (optional) full name of a scanner that should
+ run after current one finishes;
+
+- *“environment\_condition”* – (optional) specific constraints that
+ should be checked against the environment in *environments\_config*
+ collection before execution;
+
+- *“object\_id\_to\_use\_in\_child*\ ” – (optional) which parent field
+ should be passed as parent id to the fetcher (default: “id”).
+
+ \ ***Folder fetch type***
+
+Folder fetch types deal with folder fetchers (described
+`here <#Folder_fetcher>`__) and have a slightly different structure:
+
+ | {
+ | **"type"**: **"aggregates\_folder"**,
+ | **"fetcher"**: {
+ | **"folder"**: **true**,
+ | **"types\_name"**: **"aggregates"**,
+ | **"parent\_type"**: **"region"
+ **},
+
+ **"object\_id\_to\_use\_in\_child"**: **"name"**,
+
+ | **"environment\_condition"**: {
+ | **"mechanism\_drivers"**: **"VPP"
+ ** },
+ | **"children\_scanner"**: **"ScanAggregatesRoot"
+ **}
+
+The only difference is that *“fetcher”* field is now a dictionary with
+the following fields:
+
+- *“folder”* – should always be **true**;
+
+- *“types\_name”* – type name in plural (with added ‘s’) of objects
+ that serve as folder’s children
+
+- *“parent\_type”* – folder’s parent type (basically the parent type of
+ folder’s objects).
+
+Updating scanners
+~~~~~~~~~~~~~~~~~
+
+After creating a new fetcher, you should integrate it into scanners
+hierarchy. There are several possible courses of action:
+
+***Add new scanner as a child of an existing one***
+
+If the parent type of your newly added object type already has a
+scanner, you can add your new scanner as a child of an existing one.
+There are two ways to do that:
+
+1. Add new scanner as a *“children\_scanner”* field to parent scanner
+
+ ***Example***
+
+ Before:
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | }
+ | ],
+
+ After:
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | **"children\_scanner"**: **"NewTypeScanner"
+ **}
+ | ],
+ | **"NewTypeScanner"**: [
+ | {
+ | **"type"**: **"new\_type"**,
+ | **"fetcher"**: **"CliFetchNewType"
+ **}
+ | ]
+
+1. Add new fetch type to parent scanner (in case if children scanner
+ already exists)
+
+ ***Example***
+
+ Before:
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | **"children\_scanner"**: **"ScanHostPnic"
+ **}
+ | ],
+
+ After:
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | **"children\_scanner"**: **"ScanHostPnic"
+ **},
+ | {
+ | **"type"**: **"new\_type"**,
+ | **"fetcher"**: **"CliFetchNewType"
+ **}
+ | ],
+
+***Add new scanner and set an existing one as a child***
+
+***Example***
+
+ Before:
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | **"children\_scanner"**: **"ScanHostPnic"
+ **}
+ | ],
+
+ After:
+
+ | **"NewTypeScanner"**: [
+ | {
+ | **"type"**: **"new\_type"**,
+ | **"fetcher"**: **"CliFetchNewType"**,
+ | **"children\_scanner"**: **"ScanHost"
+ **}
+ | ]
+
+ | **"ScanHost"**: [
+ | {
+ | **"type"**: **"host"**,
+ | **"fetcher"**: **"ApiFetchProjectHosts"**,
+ | **"children\_scanner"**: **"ScanHostPnic"
+ **}
+ | ],
+
+***Other cases***
+
+You may choose to combine approaches or use none of them and create an
+isolated scanner if needed.
+
+Updating constants collection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before testing your new scanner and fetcher you need to add the newly
+created object type to *“constants”* collection in Calipso database:
+
+1. **constants.object\_types** document
+
+ Append a *{“value”: “new\_type”, “label”: “new\_type”}* object to
+ **data** list.
+
+1. **constants.scan\_object\_types** document
+
+ Append a *{“value”: “new\_type”, “label”: “new\_type”}* object to
+ **data** list.
+
+1. **constants.object\_types\_for\_links** document
+
+ If you’re planning to build links using this object type (you
+ probably are), append a *{“value”: “new\_type”, “label”:
+ “new\_type”}* object to **data** list.
+
+Setting up monitoring
+~~~~~~~~~~~~~~~~~~~~~
+
+In order to setup monitoring for the new object type you have defined,
+you’ll need to add a Sensu check:
+
+1. Add a check script in app/monitoring/checks:
+
+ a. | Checks should return the following values:
+ | 0: **OK**
+ | 1: **Warning**
+ | 2: **Error**
+
+ b. Checks can print the underlying query results to stdout. Do so
+ within reason, as this output is later stored in the DB, so avoid
+ giving too much output;
+
+ c. Test your script on a remote host:
+
+ i. Write it in */etc/sensu/plugins* directory;
+
+ ii. Update the Sensu configuration on the remote host to run this
+ check;
+
+ iii. Add the check in the “checks” section of
+ */etc/sensu/conf.d/client.json*;
+
+ iv. The name under which you save the check will be used by the
+ handler to determine the DB object that it relates to;
+
+ v. Restart the client with the command: *sudo service
+ sensu-client restart*;
+
+ vi. Check the client log file to see the check is run and
+ produces the expected output (in */var/log/sensu* directory).
+
+ d. Add the script to the source directory (*app/monitoring/checks*).
+
+2. Add a handler in app/monitoring/handlers:
+
+ a. If you use a standard check naming scheme and check an object, the
+ *BasicCheckHandler* can take care of this, but add the object type
+ in *basic\_handling\_types* list in *get\_handler()*;
+
+ b. If you have a more complex naming scheme, override
+ MonitoringCheckHandler. See HandleOtep for example.
+
+3. If you deploy monitoring using Calipso:
+
+ a. Add the check in the *monitoring\_config\_templates* collection.
+
+*Check Naming*
+
+The check name should start with the type of the related object,
+followed by an underscore (“\_”). For example, the name for a check
+related to an OTEP (type “otep”)  will start with “otep\_“. It should
+then be followed by the object ID.
+
+
+For checks related to links, the check name will have this
+format:
link\_<link type>\_<from\_id>\_<to\_id>
+
+Creating new link types
+-----------------------
+
+After you’ve added a new object type you may consider adding new link
+types to connect objects of new type to existing objects in topology.
+Your new object type may serve as a *source* and/or *target* type for
+the new link type.
+
+The process of new link type creation includes several steps:
+
+1. Write a link finder class;
+
+2. Add the link finder class to the link finders configuration file;
+
+3. Update *“constants”* collection with the new link types.
+
+Writing link finder classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A new link finder class should:
+
+1. Subclass *app.discover.link\_finders.FindLinks* class;
+
+2. Be located in the *app.discover.link\_finders* package;
+
+3. Define an instance method called *add\_links(self)* with no
+ additional arguments. This method is the only entry point for link
+ finder classes.
+
+*FindLinks* class provides access to inventory manager to its subclasses
+which they should use to their advantage. It also provides a convenience
+method *create\_links(self, …)* for saving links to database. It is
+reasonable to call this method at the end of *add\_links* method.
+
+You may opt to add more than one link type at a time in a single link
+finder.
+
+***Example***
+
+ | **from** discover.find\_links **import** FindLinks
+ | **class** FindLinksForNewType(FindLinks):
+ | **def** add\_links(self):
+ | new\_objects = self.inv.find\_items({\ **"environment"**:
+ self.get\_env(),
+ | **"type"**: **"new\_type"**})
+ | **for** new\_object **in** new\_objects:
+ | old\_object = self.inv.get\_by\_id(environment=self.get\_env(),
+ | item\_id=new\_object[**"old\_object\_id"**])
+ | link\_type = **"old\_type-new\_type"
+ **\ link\_name = **"{}-{}"**.format(old\_object[**"name"**],
+ new\_object[**"name"**])
+ | state = **"up"** *# TBD
+ *\ link\_weight = 0 *# TBD
+ *\ self.create\_link(env=self.get\_env(),
+ | source=old\_object[**"\_id"**],
+ | source\_id=old\_object[**"id"**],
+ | target=new\_object[**"\_id"**],
+ | target\_id=new\_object[**"id"**],
+ | link\_type=link\_type,
+ | link\_name=link\_name,
+ | state=state,
+ | link\_weight=link\_weight)
+
+Updating the link finders configuration file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Default link finders configuration file can be found at
+*/app/config/link\_finders.json* and has the following structure:
+
+ | {
+ | **"finders\_package"**: **"discover.link\_finders"**,
+ | **"base\_finder"**: **"FindLinks"**,
+ | **"link\_finders"**: [
+ | **"FindLinksForInstanceVnics"**,
+ | **"FindLinksForOteps"**,
+ | **"FindLinksForPnics"**,
+ | **"FindLinksForVconnectors"**,
+ | **"FindLinksForVedges"**,
+ | **"FindLinksForVserviceVnics"
+ **]
+ | }
+
+File contents:
+
+- *finders\_package* – python path to the package that contains link
+ finders (relative to $PYTHONPATH environment variable);
+
+- *base\_finder* – base link finder class name;
+
+- *link\_finders* – class names of actual link finders.
+
+If your new fetcher meets the requirements described in `Writing link
+finder classes <#writing-link-finder-classes>`__ section, you can append
+its name to the *“link\_finders”* list in *link\_finders.json* file.
+
+Updating constants collection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before testing your new links finder, you need to add the newly created
+link types to *“constants”* collection in Calipso database:
+
+1. **constants.link\_types** document
+
+ Append a *{“value”: “source\_type-target\_type”, “label”:
+ “source\_type-target\_type”}* object to **data** list for each new
+ link type.
+
+Creating custom link finders configuration file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you consider writing a custom list finders configuration file, you
+should also follow the guidelines from 4.2.1-4.2.3 while designing link
+finder classes and including them in the new link finders source file.
+
+The general approach is the following:
+
+1. Custom configuration file should have the same key structure with the
+ basic one;
+
+2. You should create a *base\_finder* class that subclasses the basic
+ FindLinks class (see `Writing link finder
+ classes <#writing-link-finder-classes>`__);
+
+3. Your link finder classes should be located in the same package with
+ your *base\_finder* class;
+
+4. Your link finder classes should subclass your *base\_finder* class
+ and override the *add\_links(self)* method.
+
+Creating new clique types
+-------------------------
+
+Two steps in creating new clique types and including them in clique
+finder are:
+
+1. Designing new clique types
+
+2. Updating clique types collection
+
+Designing new clique types
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A clique type is basically a list of links that will be traversed during
+clique scans (see `Clique creation algorithm <#clique_creation>`__). The
+process of coming up with clique types involves general networking
+concepts knowledge as well as expertise in monitored system details
+(e.g. OpenStack distribution specifics). In a nutshell, it is not a
+trivial process, so the clique design should be considered carefully.
+
+The predefined clique types (in *clique\_types* collection) may give you
+some idea about the rationale behind clique design.
+
+Updating clique types collection
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After designing the new clique type, you need to update the
+*clique\_types* collection in order for the clique finder to use it. For
+this purpose, you should add a document of the following structure:
+
+ { 
+
+     "environment": "ANY", 
+
+     "link\_types": [
+
+         "instance-vnic", 
+
+         "vnic-vconnector", 
+
+         "vconnector-vedge", 
+
+         "vedge-otep", 
+
+         "otep-vconnector", 
+
+         "vconnector-host\_pnic", 
+
+         "host\_pnic-network"
+
+     ], 
+
+     "name": "instance", 
+
+     "focal\_point\_type": "instance"
+
+ }
+
+Document fields are:
+
+- *environment* – can either hold the environment name, for which the
+ new clique type is designed, or **“ANY”** if the new clique type
+ should be added to all environments;
+
+- *name* – display name for the new clique type;
+
+- *focal\_point\_type* – the aggregate object type for the new clique
+ type to use as a starting point;
+
+- *link\_types* – a list of links that constitute the new clique type.
+
+Creating new event handlers
+---------------------------
+
+There are three steps to creating a new event handler:
+
+1. Determining *event types* that will be handled by the new handler;
+
+2. Writing the new handler module and class;
+
+3. Adding the (event type -> handler) mapping to the event handlers
+ configuration file.
+
+Writing custom handler classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each event handler should adhere to the following design:
+
+1. Event handler class should subclass
+ the app.discover.events.event\_base.EventBase class;
+
+2. Event handler class should override handle method of EventBase.
+ Business logic of the event handler should be placed inside
+ the handle method;
+
+ a. Handle method accepts two arguments: environment name (str) and
+ notification contents (dict). No other event data will be provided
+ to the method;
+
+ b. Handle method returns an EventResult object, which accepts the
+ following arguments in its constructor:
+
+ i. *result* (mandatory) - determines whether the event handling
+ was successful;
+
+ ii. *retry* (optional) - determines whether the message should be
+ put back in the queue in order to be processed later. This
+ argument is checked only if result was set to False;
+
+ iii. *message* (optional) - (Currently unused) a string comment on
+ handling status;
+
+ iv. *related\_object* (optional) – id of the object related to
+ the handled event;
+
+ v. *display\_context* (optional) – (Calipso UI requirement).
+
+3. Module containing event handler class should have the same name as
+ the relevant handler class except translated
+ from UpperCamelCase to snake\_case.
+
+ ***Example:***
+
+ **app/discover/events/event\_new\_object\_add.py**
+
+ | **from** discover.events.event\_base **import** EventBase,
+ EventResult
+ | **class** EventNewObjectAdd(EventBase):
+ | **def** handle(self, env: str, notification: dict) -> EventResult:
+ | obj\_id = notification[**'payload'**][**'new\_object'**][**'id'**]
+ | obj = {
+ | **'id'**: obj\_id,
+ | **'type'**: **'new\_object'
+ **}
+ | self.inv.set(obj)
+ | **return** EventResult(result=\ **True**)
+
+ Modifications in *events.json*:
+
+ <...>
+
+ | **"event\_handlers"**: {
+ | <...>
+ | **"new\_object.create"**: **"EventNewObjectAdd"**,
+ | <...>**
+ **}
+
+ <...>
+
+After these changes are implemented, any event of type
+new\_object.create will be consumed by the event listener and the
+payload will be passed to EventNewObjectAdd handler which will insert a
+new document in the database.
+
+Event handlers configuration file structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**Events.json** (full path *app/config/events.json*) is a configuration
+file that contains information about events and event handlers,
+including:
+
+- Event subscription details (queues and exchanges for Neutron
+ notifications);
+
+- Location of event handlers package;
+
+- Mappings between event types and respective event handlers.
+
+The structure of *events.json* is as following:
+
+ | {
+ | **"handlers\_package"**: **"discover.events"**,
+ | **"queues"**: [
+ | {
+ | **"queue"**: **"notifications.nova"**,
+ | **"exchange"**: **"nova"
+ **},
+ | <…>
+ | ],
+ | **"event\_handlers"**: {
+ | **"compute.instance.create.end"**: **"EventInstanceAdd"**,
+ | **"compute.instance.update"**: **"EventInstanceUpdate"**,
+ | **"compute.instance.delete.end"**: **"EventInstanceDelete"**,
+ | **"network.create.end"**: **"EventNetworkAdd"**,
+
+ | <…>**
+ **}
+ | }
+
+The root object contains the following fields:
+
+- **handlers\_package** - python path to the package that contains
+ event handlers (relative to $PYTHONPATH environment variable)
+
+- **queues –** RabbitMQ queues and exchanges to consume messages from
+ (for Neutron notifications case)
+
+- **event\_handlers** – mappings of event types to the respective
+ handlers. The structure suggests that any event can have only one
+ handler.
+
+In order to add a new event handler to the configuration file, you
+should add another mapping to the event\_handlers object, where key is
+the event type being handled and value is the handler class name (module
+name will be determined automatically).
+
+If your event is being published to a queue and/or exchange that the
+listener is not subscribed to, you should add another entry to the
+queues list.
+
+Creating new event listeners
+----------------------------
+
+At the moment, the only guideline for creation of new event listeners is
+that they should subclass the *ListenerBase* class (full path
+*app/discover/events/listeners/listener\_base.py*) and override the
+*listen(self)* method that listens to incoming events indefinitely
+(until terminated by a signal).
+
+In future versions, a comprehensive guide to listeners structure is
+planned.
+
+Metadata parsers
+----------------
+
+Metadata parsers are specialized classes that are designed to verify
+metadata files (found in *app/*\ config directory), use data from them
+to load instances of implementation classes (e.g. scanners, event
+handlers, link finders) in memory, and supply them by request. Scanners
+and link finders configuration files are used in scanner, event handlers
+configuration file – in event listener.
+
+In order to create a new metadata parser, you should consider
+subclassing *MetadataParser* class (found in
+*app/utils/metadata\_parser.py*). *MetadataParser* supports parsing and
+validating of json files out of the box. Entry point for the class is
+the *parse\_metadata\_file* method, which requires the abstract
+*get\_required\_fields* method to be overridden in subclasses. This
+method should return a list of keys that the metadata file is required
+to contain.
+
+For different levels of customization you may consider:
+
+1. Overriding *validate\_metadata* method to provide more precise
+ validation of metadata;
+
+2. Overriding *parse\_metadata\_file* to provide custom metadata
+ handling required by your use case.
+
+.. |image0| image:: media/image1.png
+ :width: 6.50000in
+ :height: 4.27153in
diff --git a/docs/release/install-guide.pdf b/docs/release/install-guide.pdf
index 1bc24f5..c6476b8 100644
--- a/docs/release/install-guide.pdf
+++ b/docs/release/install-guide.pdf
Binary files differ
diff --git a/docs/release/install-guide.rst b/docs/release/install-guide.rst
index 28bba01..be0d3da 100644
--- a/docs/release/install-guide.rst
+++ b/docs/release/install-guide.rst
@@ -1,12 +1,6 @@
| Calipso.io
| Installation Guide
-Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) and others
-All rights reserved. This program and the accompanying materials
-are made available under the terms of the Apache License, Version 2.0
-which accompanies this distribution, and is available at
-http://www.apache.org/licenses/LICENSE-2.0
-
|image0|
Project “Calipso” tries to illuminate complex virtual networking with
@@ -46,10 +40,10 @@ Calipso.io Installation Guide 1
3.2 Fuel scenarios 7
-1. Pre Requisites
+Pre Requisites
===============
-1.1 Pre Requisites for Calipso “all in one” application
+Pre Requisites for Calipso “all in one” application
----------------------------------------------------
Calipso’s main application is written with Python3.5 for Linux
@@ -108,11 +102,13 @@ Calipso.io Installation Guide 1
**sudo pip3 install docker**
-4. Git : https://git-scm.com/book/en/v2/Getting-Started-Installing-Git
+ **sudo pip3 install inflect (>0.2.5)**
+
+1. Git : https://git-scm.com/book/en/v2/Getting-Started-Installing-Git
-5. Docker : https://docs.docker.com/engine/installation/
+2. Docker : https://docs.docker.com/engine/installation/
-1.2 Pre Requisites for Calipso UI application
+Pre Requisites for Calipso UI application
------------------------------------------
Calipso UI is developed and maintained using Meteor Framework
@@ -122,10 +118,10 @@ Calipso.io Installation Guide 1
required steps for installing the Calipso UI with several options
are listed below.
-2. Installation Options
+Installation Options
====================
-2.1 Monolithic App
+Monolithic App
---------------
For development use, one might require Calipso to be installed as a
@@ -143,16 +139,16 @@ Calipso.io Installation Guide 1
**git clone https://git.opnfv.org/calipso/**
-4. Move to the default install directory: **cd calipso**
+1. Move to the default install directory: **cd calipso**
-5. Setup Python3 environment for calipso:
+2. Setup Python3 environment for calipso:
**export PYTHONPATH=/home/calipso/calipso/app**
-6. Follow quick-start guide on how to use calipso modules for monolithic
+1. Follow quick-start guide on how to use calipso modules for monolithic
scenario, and run each module manually.
-2.2 Micro Services App, single line install
+Micro Services App, single line install
---------------------------------------
For most users, this will be the fastest and more reliable install
@@ -163,10 +159,12 @@ Calipso.io Installation Guide 1
1. Follow steps 1- 5 per section 2.1 above.
-2. Install the following python3 libraries using pip3 : docker, pymongo
+2. Install Docker : https://docs.docker.com/engine/installation/
+
+3. Install the following python3 libraries using pip3 : docker, pymongo
-3. Although Calipso installer can download all needed containers, if
- they does not exist locally already, we recommend doing a manual
+4. Although Calipso installer can download all needed containers, if
+ they doesn’t exist locally already, we recommend doing a manual
download of all 7 containers, providing better control and logging:
**sudo docker login** # use your DockerHub username and password to
@@ -181,8 +179,8 @@ Calipso.io Installation Guide 1
**sudo docker pull korenlev/calipso:api** # api container for
application integration
- **sudo docker pull korenlev/calipso:sensu** # sensu server container
- for monitoring
+ **sudo docker pull korenlev/calipso:monitor** # sensu server
+ container for monitoring
**sudo docker pull korenlev/calipso:mongo** # calipso mongo DB
container
@@ -191,7 +189,7 @@ Calipso.io Installation Guide 1
**sudo docker pull korenlev/calipso:ldap** # calipso ldap container
-4. Check that all containers were downloaded and registered
+1. Check that all containers were downloaded and registered
successfully:
**sudo docker images**
@@ -206,7 +204,7 @@ Calipso.io Installation Guide 1
**korenlev/calipso scan 1ee60c4e61d5 6 hours ago 1.1GB**
- **korenlev/calipso sensu a8a17168197a 6 hours ago 1.65GB**
+ **korenlev/calipso monitor a8a17168197a 6 hours ago 1.65GB**
**korenlev/calipso mongo 17f2d62f4445 22 hours ago 1.31GB**
@@ -214,7 +212,7 @@ Calipso.io Installation Guide 1
**korenlev/calipso ldap 316bc94b25ad 2 months ago 269MB**
-5. Run the calipso installer using single line arguments:
+1. Run the calipso installer using single line arguments:
**python3 calipso/app/install/calipso-installer.py--command
start-all --copy q**
@@ -222,7 +220,7 @@ Calipso.io Installation Guide 1
This should launch all calipso modules in sequence along with all
needed configuration files placed in /home/calipso.
-2.3 Micro Services App, customized single line install
+Micro Services App, customized single line install
--------------------------------------------------
Calipso app includes the following directory in its default
@@ -276,28 +274,38 @@ Calipso.io Installation Guide 1
an optional argument, default ‘calipso’ (calipso-mongo container’s
default) is deployed if not used.
-7. **--dbpassword **
-
- Allows to enter a password to be used for mongoDB access on the host,
- an optional argument, default ‘calipso\_default’ (calipso-mongo
- container’s default) is deployed if not used.
-
-8. **--apiport **
- Allows to enter a TCP port to be used for the Calipso API (default=8000)
-
-9. **--uchiwaport **
- Allows to enter a TCP port to be used for the Sensu UI (default=3000)
-
-10. **--rabbitmport **
- Allows to enter a TCP port to be used for the RabbitMQ mgmt (default=15672)
-
-11. **--sensuport **
- Allows to enter a TCP port to be used for the Sensu API (default=4567)
-
-12. **--rabbitport **
- Allows to enter a TCP port to be used for the RabbitMQ BUS (default=5671)
-
-2.4 Micro Services App, customized interactive install
+7. **--dbpassword**
+
+ Allows to enter a password to be used for mongoDB access on the
+ host, an optional argument, default ‘calipso\_default’
+ (calipso-mongo container’s default) is deployed if not used.
+
+1. **--apiport**
+
+ Allows to enter a TCP port to be used for the Calipso API
+ (default=8000)
+
+1. **--uchiwaport**
+
+ Allows to enter a TCP port to be used for the Sensu UI
+ (default=3000)
+
+1. **--rabbitmport**
+
+ Allows to enter a TCP port to be used for the RabbitMQ mgmt
+ (default=15672)
+
+1. **--sensuport**
+
+ Allows to enter a TCP port to be used for the Sensu API
+ (default=4567)
+
+1. **--rabbitport**
+
+ Allows to enter a TCP port to be used for the RabbitMQ BUS
+ (default=5671)
+
+Micro Services App, customized interactive install
--------------------------------------------------
Calipso’s application containers can be initiated and stopped
@@ -308,7 +316,7 @@ Calipso.io Installation Guide 1
1. **Action? (stop, start, or 'q' to quit):**
2. **Container? (all, calipso-mongo, calipso-scan, calipso-listen,
- calipso-ldap, calipso-api, calipso-sensu, calipso-ui or 'q' to
+ calipso-ldap, calipso-api, calipso-monitor, calipso-ui or 'q' to
quit):**
3. **create initial calipso DB ? (copy json files from 'db' folder to
@@ -320,7 +328,7 @@ automatically creates and place 2 configuration files under
those are mandatory configuration files used by calipso containers to
interact with each other!
-2.5 OPNFV Options
+OPNFV Options
=============
Although calipso is designed for any VIM and for enterprise use-cases
@@ -330,35 +338,58 @@ with Apex for OPNFV.
APEX scenarios
---------------
-When using apex to install OPNFV, the Triple-O based OpenStack is
-installed automatically and calipso installation can be initiated
-automatically after apex completes the VIM installation process for
-a certain scenario.
-
-In this case setup_apex_environment.py can be used for creating a new environment automatically into mongoDB and UI of Calipso,
-instead of using the calipso UI to do that as typical user would do, then detailed scanning can start immediately,
-the following options are available for setup_apex_environment.py:
- -m [MONGO_CONFIG], --mongo_config [MONGO_CONFIG]
- name of config file with MongoDB server access details
- (Default: /local_dir/calipso_mongo_access.conf)
- -d [CONFIG_DIR], --config_dir [CONFIG_DIR]
- path to directory with config data (Default:
- /home/calipso/apex_setup_files)
- -i [INSTALL_DB_DIR], --install_db_dir [INSTALL_DB_DIR]
- path to directory with DB data (Default:
- /home/calipso/Calipso/app/install/db)
- -a [APEX], --apex [APEX]
- name of environment to Apex host
- -e [ENV], --env [ENV]
- name of environment to create(Default: Apex-Euphrates)
- -l [LOGLEVEL], --loglevel [LOGLEVEL]
- logging level (default: "INFO")
- -f [LOGFILE], --logfile [LOGFILE]
- log file (default:
- "/home/calipso/log/apex_environment_fetch.log")
- -g [GIT], --git [GIT]
- URL to clone Git repository (default:
- https://git.opnfv.org/calipso)
+ When using apex to install OPNFV, the Triple-O based OpenStack is
+ installed automatically and calipso installation can be initiated
+ automatically after apex completes the VIM installation process for
+ a certain scenario.
+
+ In this case setup\_apex\_environment.py can be used for creating a
+ new environment automatically into mongoDB and UI of Calipso
+ (instead of using the calipso UI to do that as typical user would
+ do), then detailed scanning can start immediately, the following
+ options are available for setup\_apex\_environment.py:
+
+ **-m [MONGO\_CONFIG], --mongo\_config [MONGO\_CONFIG]**
+
+ **name of config file with MongoDB server access details**
+
+ **(Default: /local\_dir/calipso\_mongo\_access.conf)**
+
+ **-d [CONFIG\_DIR], --config\_dir [CONFIG\_DIR]**
+
+ **path to directory with config data (Default:**
+
+ **/home/calipso/apex\_setup\_files)**
+
+ **-i [INSTALL\_DB\_DIR], --install\_db\_dir [INSTALL\_DB\_DIR]**
+
+ **path to directory with DB data (Default:**
+
+ **/home/calipso/Calipso/app/install/db)**
+
+ **-a [APEX], --apex [APEX]**
+
+ **name of environment to Apex host**
+
+ **-e [ENV], --env [ENV]**
+
+ **name of environment to create(Default: Apex-Euphrates)**
+
+ **-l [LOGLEVEL], --loglevel [LOGLEVEL]**
+
+ **logging level (default: "INFO")**
+
+ **-f [LOGFILE], --logfile [LOGFILE]**
+
+ **log file (default:**
+
+ **"/home/calipso/log/apex\_environment\_fetch.log")**
+
+ **-g [GIT], --git [GIT]**
+
+ **URL to clone Git repository (default:**
+
+ **https://git.opnfv.org/calipso)**
Fuel scenarios
---------------
diff --git a/docs/release/media/image101.png b/docs/release/media/image101.png
new file mode 100644
index 0000000..b0a8a5c
--- /dev/null
+++ b/docs/release/media/image101.png
Binary files differ
diff --git a/docs/release/media/image102.png b/docs/release/media/image102.png
new file mode 100644
index 0000000..8c8d413
--- /dev/null
+++ b/docs/release/media/image102.png
Binary files differ
diff --git a/docs/release/media/image103.png b/docs/release/media/image103.png
new file mode 100644
index 0000000..cc65824
--- /dev/null
+++ b/docs/release/media/image103.png
Binary files differ
diff --git a/docs/release/media/image104.png b/docs/release/media/image104.png
new file mode 100644
index 0000000..2418dcf
--- /dev/null
+++ b/docs/release/media/image104.png
Binary files differ
diff --git a/docs/release/media/image105.png b/docs/release/media/image105.png
new file mode 100644
index 0000000..1d7fc26
--- /dev/null
+++ b/docs/release/media/image105.png
Binary files differ
diff --git a/docs/release/media/image106.png b/docs/release/media/image106.png
new file mode 100644
index 0000000..029589a
--- /dev/null
+++ b/docs/release/media/image106.png
Binary files differ
diff --git a/docs/release/media/image107.png b/docs/release/media/image107.png
new file mode 100644
index 0000000..7ac129d
--- /dev/null
+++ b/docs/release/media/image107.png
Binary files differ
diff --git a/docs/release/monitoring-guide.pdf b/docs/release/monitoring-guide.pdf
index 2d075e5..40f5b8b 100644
--- a/docs/release/monitoring-guide.pdf
+++ b/docs/release/monitoring-guide.pdf
Binary files differ
diff --git a/docs/release/monitoring-guide.rst b/docs/release/monitoring-guide.rst
index dd0f047..de87ca0 100644
--- a/docs/release/monitoring-guide.rst
+++ b/docs/release/monitoring-guide.rst
@@ -1,12 +1,6 @@
| Calipso.io
| Monitoring Guide
-Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) and others
-All rights reserved. This program and the accompanying materials
-are made available under the terms of the Apache License, Version 2.0
-which accompanies this distribution, and is available at
-http://www.apache.org/licenses/LICENSE-2.0
-
|image0|
Project “Calipso” tries to illuminate complex virtual networking with
@@ -28,7 +22,7 @@ Calipso.io Monitoring Guide 1
1.1 Calipso monitoring provisioning 3
-1.2 Calipso-sensu container 5
+1.2 Calipso-monitor container 5
2 Monitoring configurations 5
@@ -67,7 +61,7 @@ Monitoring deployment options
config files and all needed plugins and scripts on all clients and
the central server.
-2. Calipso sensu module: listens for the results of the customized
+2. Calipso monitor module: listens for the results of the customized
events and updates the inventories with state and statuses, while
generating all related messages.
@@ -168,8 +162,8 @@ Calipso monitoring provisioning
|image1|
-Calipso-sensu container
------------------------
+Calipso-monitor container
+-------------------------
Once sensu clients and all needed configurations and plugins are
deployed properly, the sensu server should start receiving results
@@ -655,6 +649,6 @@ Calipso Apex monitoring integration
.. |image0| image:: media/image1.png
:width: 6.50000in
:height: 4.27153in
-.. |image1| image:: media/image10.png
+.. |image1| image:: media/image2.png
:width: 6.50000in
:height: 3.62708in
diff --git a/docs/release/scenarios/os-nosdn-calipso-noha/index.rst b/docs/release/scenarios/os-nosdn-calipso-noha/index.rst
deleted file mode 100644
index 0e65a74..0000000
--- a/docs/release/scenarios/os-nosdn-calipso-noha/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. _os-nosdn-calipso-noha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-==============================================
-os-nosdn-calipso-noha overview and description
-==============================================
-
-.. toctree::
- :numbered:
- :maxdepth: 4
-
- apex-scenario-guide.rst
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/tox.ini b/tox.ini
index c1993d9..dd5076d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,12 +4,12 @@ envlist = docs,docs-linkcheck
skipsdist = true
[testenv:docs]
-deps = -r{toxinidir}/etc/requirements.txt
+deps = -rdocs/requirements.txt
commands =
sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
echo "Generated docs available in {toxinidir}/docs/_build/html"
whitelist_externals = echo
[testenv:docs-linkcheck]
-deps = -r{toxinidir}/etc/requirements.txt
-commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck \ No newline at end of file
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck