diff options
author | Koren Lev <korenlev@gmail.com> | 2017-12-18 19:16:16 +0200 |
---|---|---|
committer | Koren Lev <korenlev@gmail.com> | 2017-12-18 19:16:16 +0200 |
commit | 98c3ac7c859e34fe60d061b9ca591aba429e4118 (patch) | |
tree | 3ff2629def8938b12c0a0147e463e74e475c9032 /app | |
parent | 4709d96cc240c0c4c5308d40361ca3c3da1152fd (diff) |
release 1.2 + new tagging
Change-Id: I1e876451ec4a330f458dd57adadb15e39969b225
Signed-off-by: Koren Lev <korenlev@gmail.com>
Diffstat (limited to 'app')
63 files changed, 2489 insertions, 889 deletions
diff --git a/app/api/responders/resource/clique_types.py b/app/api/responders/resource/clique_types.py index ff42f8c..e2e9e71 100644 --- a/app/api/responders/resource/clique_types.py +++ b/app/api/responders/resource/clique_types.py @@ -21,31 +21,53 @@ class CliqueTypes(ResponderBase): "focal_point_type": True, "link_types": True, "environment": True, - "name": True + "name": True, + "distribution": True, + "distribution_version": True, + "mechanism_drivers": True, + "type_drivers": True, + "use_implicit_links": True } RESERVED_NAMES = ["ANY"] + def __init__(self): + super().__init__() + self.focal_point_types = self.get_constants_by_name("object_types") + self.link_types = self.get_constants_by_name("link_types") + self.mechanism_drivers = self.get_constants_by_name("mechanism_drivers") + self.type_drivers = self.get_constants_by_name("type_drivers") + def on_get(self, req, resp): self.log.debug("Getting clique types") filters = self.parse_query_params(req) - focal_point_types = self.get_constants_by_name("object_types") - link_types = self.get_constants_by_name("link_types") filters_requirements = { - 'env_name': self.require(str, mandatory=True), + 'env_name': self.require(str), 'id': self.require(ObjectId, convert_to_type=True), + 'distribution': self.require(str), + 'distribution_version': self.require(str), + 'mechanism_drivers': self.require(str, + validate=DataValidate.LIST, + requirement=self.mechanism_drivers), + 'type_drivers': self.require(str, + validate=DataValidate.LIST, + requirement=self.type_drivers), 'focal_point_type': self.require(str, validate=DataValidate.LIST, - requirement=focal_point_types), + requirement=self.focal_point_types), 'link_type': self.require([list, str], validate=DataValidate.LIST, - requirement=link_types), + requirement=self.link_types), 'name': self.require(str), 'page': self.require(int, convert_to_type=True), 'page_size': self.require(int, convert_to_type=True) } self.validate_query_data(filters, filters_requirements) + if 'distribution_version' in filters and 'distribution' not in filters: + self.bad_request("Distribution version without distribution " + "is not allowed") + page, page_size = self.get_pagination(filters) query = self.build_query(filters) if self.ID in query: @@ -64,40 +86,46 @@ class CliqueTypes(ResponderBase): error, clique_type = self.get_content_from_request(req) if error: self.bad_request(error) - focal_point_types = self.get_constants_by_name("object_types") - link_types = self.get_constants_by_name("link_types") + clique_type_requirements = { - 'environment': self.require(str, mandatory=True), + 'environment': self.require(str), 'focal_point_type': self.require(str, mandatory=True, validate=DataValidate.LIST, - requirement=focal_point_types), + requirement=self.focal_point_types), 'link_types': self.require(list, mandatory=True, validate=DataValidate.LIST, - requirement=link_types), - 'name': self.require(str, mandatory=True) + requirement=self.link_types), + 'name': self.require(str, mandatory=True), + 'distribution': self.require(str), + 'distribution_version': self.require(str), + 'mechanism_drivers': self.require(str, + validate=DataValidate.LIST, + requirement=self.mechanism_drivers), + 'type_drivers': self.require(str, + validate=DataValidate.LIST, + requirement=self.type_drivers), + 'use_implicit_links': self.require(bool) } self.validate_query_data(clique_type, clique_type_requirements) - - env_name = clique_type['environment'] - if not self.check_environment_name(env_name): - self.bad_request("Unknown environment: {}".format(env_name)) - elif env_name.upper() in self.RESERVED_NAMES: - self.bad_request("Environment name '{}' is reserved".format(env_name)) + self.validate_required_fields(clique_type) + self.validate_focal_point_type(clique_type) + self.validate_duplicate_configuration(clique_type) self.write(clique_type, self.COLLECTION) self.set_successful_response(resp, - {"message": "created a new clique_type " - "for environment {0}" - .format(env_name)}, + {"message": "created a new clique_type"}, "201") def build_query(self, filters): query = {} - filters_keys = ['name', 'focal_point_type'] + filters_keys = ['name', 'focal_point_type', + 'distribution', 'distribution_version', + 'mechanism_drivers', 'type_drivers'] self.update_query_with_filters(filters, filters_keys, query) + link_types = filters.get('link_type') if link_types: if type(link_types) != list: @@ -107,5 +135,71 @@ class CliqueTypes(ResponderBase): if _id: query[self.ID] = _id - query['environment'] = filters['env_name'] + env_name = filters.get('env_name') + if env_name: + query['environment'] = filters['env_name'] return query + + def validate_required_fields(self, clique_type): + env_name = clique_type.get('environment') + distribution = clique_type.get('distribution') + distribution_version = clique_type.get('distribution_version') + if distribution_version and not distribution: + self.bad_request("Distribution version without distribution " + "is not allowed") + + configuration_specified = ((distribution and distribution_version) + or clique_type.get('mechanism_drivers') + or clique_type.get('type_drivers')) + if env_name: + if configuration_specified: + self.bad_request("Either environment or configuration " + "should be specified (not both).") + + if not self.check_environment_name(env_name): + self.bad_request("Unknown environment: {}".format(env_name)) + elif env_name.upper() in self.RESERVED_NAMES: + self.bad_request( + "Environment name '{}' is reserved".format(env_name)) + elif not configuration_specified: + self.bad_request("Either environment or configuration " + "should be specified.") + + def validate_focal_point_type(self, clique_type): + focal_point_type = clique_type['focal_point_type'] + environment = clique_type.get('environment') + if environment: + env_match = self.read( + matches={"environment": environment, + "focal_point_type": focal_point_type}, + collection="clique_types" + ) + if env_match: + self.bad_request("Clique type with focal point {} " + "is already registered for environment {}" + .format(focal_point_type, environment)) + else: + pass + + def validate_duplicate_configuration(self, clique_type): + if clique_type.get('environment'): + return + + search = {'focal_point_type': clique_type['focal_point_type']} + for field in ['distribution', 'mechanism_drivers', 'type_drivers']: + value = clique_type.get(field) + if value: + search[field] = value + if field == 'distribution': + dv = clique_type.get('distribution_version') + if dv: + search['distribution_version'] = dv + # Got a match with higher score, no need to look further + break + + env_match = self.read(matches=search, + collection="clique_types") + if env_match: + self.bad_request("Clique type with configuration '{}' " + "is already registered" + .format(search)) diff --git a/app/api/responders/resource/environment_configs.py b/app/api/responders/resource/environment_configs.py index c24aec8..76cc8a9 100644 --- a/app/api/responders/resource/environment_configs.py +++ b/app/api/responders/resource/environment_configs.py @@ -13,7 +13,6 @@ from api.responders.responder_base import ResponderBase from bson.objectid import ObjectId from datetime import datetime from utils.constants import EnvironmentFeatures -from utils.inventory_mgr import InventoryMgr class EnvironmentConfigs(ResponderBase): @@ -27,9 +26,13 @@ class EnvironmentConfigs(ResponderBase): "distribution": True } CONFIGURATIONS_NAMES = ["mysql", "OpenStack", "CLI", "AMQP", - "Monitoring", "NFV_provider", "ACI"] - OPTIONAL_CONFIGURATIONS_NAMES = ["AMQP", "Monitoring", - "NFV_provider", "ACI"] + "Monitoring", "NFV_provider", "ACI", + "Kubernetes", "VMware", "Bare-metal"] + REQUIRED_CONFIGURATIONS_NAMES = { + "OpenStack": ["OpenStack", "mysql", "CLI"], + "Kubernetes": ["Kubernetes", "CLI"], + } + DEFAULT_ENV_TYPE = "OpenStack" def __init__(self): super().__init__() @@ -49,6 +52,8 @@ class EnvironmentConfigs(ResponderBase): get_constants_by_name("environment_operational_status") self.type_drivers = self.\ get_constants_by_name("type_drivers") + self.environment_types = self.\ + get_constants_by_name("environment_types") self.CONFIGURATIONS_REQUIREMENTS = { "mysql": { @@ -108,6 +113,7 @@ class EnvironmentConfigs(ResponderBase): }, "Monitoring": { "name": self.require(str, mandatory=True), + "install_monitoring_client": self.require(bool), "config_folder": self.require(str, mandatory=True, validate=DataValidate.REGEX, @@ -169,6 +175,20 @@ class EnvironmentConfigs(ResponderBase): requirement=[regex.IP, regex.HOSTNAME]), "user": self.require(str, mandatory=True), "pwd": self.require(str, mandatory=True) + }, + "Kubernetes": { + "name": self.require(str, mandatory=True), + "host": self.require(str, + mandatory=True, + validate=DataValidate.REGEX, + requirement=[regex.IP, regex.HOSTNAME]), + "port": self.require(int, + mandatory=True, + convert_to_type=True, + validate=DataValidate.REGEX, + requirement=regex.PORT), + "user": self.require(str, mandatory=True), + "token": self.require(str, mandatory=True) } } self.AUTH_REQUIREMENTS = { @@ -201,6 +221,9 @@ class EnvironmentConfigs(ResponderBase): "operational": self.require(str, validate=DataValidate.LIST, requirement=self.operational_values), + "environment_type": self.require(str, + validate=DataValidate.LIST, + requirement=self.environment_types), "page": self.require(int, convert_to_type=True), "page_size": self.require(int, convert_to_type=True) } @@ -223,7 +246,8 @@ class EnvironmentConfigs(ResponderBase): query = {} filters_keys = ["name", "distribution", "distribution_version", "type_drivers", "user", "listen", - "monitoring_setup_done", "scanned", "operational"] + "monitoring_setup_done", "scanned", "operational", + "environment_type"] self.update_query_with_filters(filters, filters_keys, query) mechanism_drivers = filters.get("mechanism_drivers") if mechanism_drivers: @@ -272,16 +296,26 @@ class EnvironmentConfigs(ResponderBase): "enable_monitoring": self.require(bool, convert_to_type=True), "monitoring_setup_done": self.require(bool, convert_to_type=True), "auth": self.require(dict), - "aci_enabled": self.require(bool, convert_to_type=True) + "aci_enabled": self.require(bool, convert_to_type=True), + "environment_type": self.require(str, + validate=DataValidate.LIST, + requirement=self.environment_types), } self.validate_query_data(env_config, environment_config_requirement, - can_be_empty_keys=["last_scanned"] - ) + can_be_empty_keys=["last_scanned", + "environment_type"]) self.check_and_convert_datetime("last_scanned", env_config) + # validate the configurations + environment_type = env_config.get("environment_type") + if not environment_type: + environment_type = self.DEFAULT_ENV_TYPE configurations = env_config['configuration'] - config_validation = self.validate_environment_config(configurations) + config_validation = ( + self.validate_environment_config(configurations=configurations, + environment_type=environment_type) + ) if not config_validation['passed']: self.bad_request(config_validation['error_message']) @@ -310,12 +344,11 @@ class EnvironmentConfigs(ResponderBase): .format(env_config["name"])}, "201") - def validate_environment_config(self, configurations, + def validate_environment_config(self, configurations, environment_type=None, require_mandatory=True): configurations_of_names = {} validation = {"passed": True} - if [config for config in configurations - if 'name' not in config]: + if any('name' not in config for config in configurations): validation['passed'] = False validation['error_message'] = "configuration must have name" return validation @@ -338,12 +371,19 @@ class EnvironmentConfigs(ResponderBase): "configuration for {0}".format(name) return validation configurations_of_names[name] = configs[0] - elif require_mandatory: - if name not in self.OPTIONAL_CONFIGURATIONS_NAMES: - validation["passed"] = False - validation['error_message'] = "configuration for {0} " \ - "is mandatory".format(name) - return validation + + if require_mandatory: + required_list = ( + self.REQUIRED_CONFIGURATIONS_NAMES.get(environment_type, []) + ) + if any(required_conf not in configurations_of_names + for required_conf + in required_list): + validation["passed"] = False + validation['error_message'] = ("configurations for ({})" + "are mandatory for " + "this environment type" + .format(", ".join(required_list))) for name, config in configurations_of_names.items(): error_message = self.validate_configuration(name, config) diff --git a/app/api/responders/responder_base.py b/app/api/responders/responder_base.py index e59f4cf..0ac08d6 100644 --- a/app/api/responders/responder_base.py +++ b/app/api/responders/responder_base.py @@ -71,7 +71,7 @@ class ResponderBase(DataValidate, DictNamingConverter): def validate_query_data(self, data, data_requirements, additional_key_reg=None, - can_be_empty_keys=[]): + can_be_empty_keys=None): error_message = self.validate_data(data, data_requirements, additional_key_reg, can_be_empty_keys) @@ -197,7 +197,9 @@ class ResponderBase(DataValidate, DictNamingConverter): ': no "value" key for data: ' + str(d)) return consts - def read(self, collection, matches={}, projection=None, skip=0, limit=1000): + def read(self, collection, matches=None, projection=None, skip=0, limit=1000): + if matches is None: + matches = {} collection = self.get_collection_by_name(collection) skip *= limit query = collection.find(matches, projection).skip(skip).limit(limit) diff --git a/app/api/validation/data_validate.py b/app/api/validation/data_validate.py index 6928c4b..4dfb214 100644 --- a/app/api/validation/data_validate.py +++ b/app/api/validation/data_validate.py @@ -75,7 +75,9 @@ class DataValidate: def validate_data(self, data, requirements, additional_key_re=None, - can_be_empty_keys=[]): + can_be_empty_keys=None): + if can_be_empty_keys is None: + can_be_empty_keys = [] illegal_keys = [key for key in data.keys() if key not in requirements.keys()] diff --git a/app/config/link_finders.json b/app/config/link_finders.json index 55c31f6..b421ee9 100644 --- a/app/config/link_finders.json +++ b/app/config/link_finders.json @@ -7,6 +7,7 @@ "FindLinksForVconnectors", "FindLinksForVedges", "FindLinksForVserviceVnics", - "FindLinksForPnics" + "FindLinksForPnics", + "FindImplicitLinks" ] }
\ No newline at end of file diff --git a/app/config/scanners.json b/app/config/scanners.json index c5efb06..a96029a 100644 --- a/app/config/scanners.json +++ b/app/config/scanners.json @@ -36,7 +36,8 @@ "types_name": "regions", "parent_type": "environment" }, - "children_scanner": "ScanRegionsRoot" + "children_scanner": "ScanRegionsRoot", + "environment_condition": {"environment_type": "OpenStack"} }, { "type": "projects_folder", @@ -45,7 +46,20 @@ "types_name": "projects", "parent_type": "environment" }, - "children_scanner": "ScanProjectsRoot" + "children_scanner": "ScanProjectsRoot", + "environment_condition": {"environment_type": "OpenStack"} + }, + { + "type": "namespaces_folder", + "fetcher": { + "folder": true, + "types_name": "namespaces", + "parent_type": "environment" + }, + "children_scanner": "ScanNamespacesRoot", + "environment_condition": { + "environment_type": "Kubernetes" + } } ], "ScanHostNetworkAgentsRoot": [ @@ -377,6 +391,13 @@ "type": "vservice", "fetcher": "CliFetchHostVservices" } + ], + "ScanNamespacesRoot": [ + { + "type": "namespace", + "fetcher": "KubeFetchNamespaces", + "environment_condition": {"environment_type": "Kubernetes"} + } ] } } diff --git a/app/connection_test/connection_test.py b/app/connection_test/connection_test.py deleted file mode 100644 index d9d6af7..0000000 --- a/app/connection_test/connection_test.py +++ /dev/null @@ -1,283 +0,0 @@ -############################################################################### -# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # -# and others # -# # -# All rights reserved. This program and the accompanying materials # -# are made available under the terms of the Apache License, Version 2.0 # -# which accompanies this distribution, and is available at # -# http://www.apache.org/licenses/LICENSE-2.0 # -############################################################################### -import argparse -import datetime -from kombu import Connection - -import time - -import pymongo -from functools import partial - -from discover.fetchers.api.api_access import ApiAccess -from discover.fetchers.db.db_access import DbAccess -from discover.manager import Manager -from utils.constants import ConnectionTestStatus, ConnectionTestType -from utils.logging.file_logger import FileLogger -from utils.mongo_access import MongoAccess -from utils.ssh_connection import * - - -def test_openstack(config, test_request): - try: - api = ApiAccess(config) - ConnectionTest.report_success(test_request, - ConnectionTestType.OPENSTACK.value) - if api: - pass - except ValueError: - pass - - -def test_mysql(config, test_request): - db_access = DbAccess(config) - ConnectionTest.report_success(test_request, ConnectionTestType.MYSQL.value) - if db_access: - pass - - -def test_ssh_connect(config) -> bool: - ssh = SshConnection(config.get('host', ''), - config.get('user', ''), - _pwd=config.get('pwd'), - _key=config.get('key'), - _port=int(config.get('port', - SshConnection.DEFAULT_PORT))) - ret = ssh.connect() - return ret - - -def test_cli(config, test_request): - ret = test_ssh_connect(config) - ConnectionTest.set_test_result(test_request, - ConnectionTestType.CLI.value, - ret) - - -def test_amqp_connect(config): - connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \ - .format(user=config.get("user", ''), - pwd=config.get('pwd', ''), - host=config.get('host', ''), - port=int(config.get('port', 5671))) - conn = Connection(connect_url) - conn.connect() - - -def test_amqp(config, test_request): - test_amqp_connect(config) - ConnectionTest.report_success(test_request, ConnectionTestType.AMQP.value) - - -def test_monitoring(config, test_request): - # for monitoring configuration test, need to test: - # 1. SSH access - # 2. RabbitMQ access - ssh_config = { - 'host': config.get('server_ip'), - 'user': config.get('ssh_user'), - 'pwd': config.get('ssh_password'), - 'port': int(config.get('ssh_port', 0)) - } - if not test_ssh_connect(ssh_config): - return - amqp_connect_config = { - 'user': config.get('rabbitmq_user', ''), - 'pwd': config.get('rabbitmq_pass', ''), - 'host': config.get('server_ip'), - 'port': int(config.get('rabbitmq_port', 5672)), - } - test_amqp_connect(amqp_connect_config) - ConnectionTest.report_success(test_request, ConnectionTestType.AMQP.value) - - -def test_aci(config, test_request): - pass - - -TEST_HANDLERS = { - ConnectionTestType.OPENSTACK.value: test_openstack, - ConnectionTestType.MYSQL.value: test_mysql, - ConnectionTestType.CLI.value: test_cli, - ConnectionTestType.AMQP.value: test_amqp, - ConnectionTestType.ACI.value: test_aci, - ConnectionTestType.MONITORING.value: test_monitoring -} - - -class ConnectionTest(Manager): - - DEFAULTS = { - 'mongo_config': '', - 'connection_tests': 'connection_tests', - 'environments': 'environments_config', - 'interval': 1, - 'loglevel': 'WARNING' - } - - def __init__(self): - self.args = self.get_args() - super().__init__(log_directory=self.args.log_directory, - mongo_config_file=self.args.mongo_config) - self.db_client = None - self.connection_tests_collection = None - self.environments_collection = None - - @staticmethod - def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument('-m', '--mongo_config', nargs='?', type=str, - default=ConnectionTest.DEFAULTS['mongo_config'], - help='Name of config file ' + - 'with MongoDB server access details') - parser.add_argument('-c', '--connection_tests_collection', nargs='?', - type=str, - default=ConnectionTest.DEFAULTS['connection_tests'], - help='connection_tests collection to read from') - parser.add_argument('-e', '--environments_collection', nargs='?', - type=str, - default=ConnectionTest.DEFAULTS['environments'], - help='Environments collection to update ' - 'after tests') - parser.add_argument('-i', '--interval', nargs='?', type=float, - default=ConnectionTest.DEFAULTS['interval'], - help='Interval between collection polls' - '(must be more than {} seconds)' - .format(ConnectionTest.MIN_INTERVAL)) - parser.add_argument('-l', '--loglevel', nargs='?', type=str, - default=ConnectionTest.DEFAULTS['loglevel'], - help='Logging level \n(default: {})' - .format(ConnectionTest.DEFAULTS['loglevel'])) - parser.add_argument('-d', '--log_directory', nargs='?', type=str, - default=FileLogger.LOG_DIRECTORY, - help='File logger directory \n(default: {})' - .format(FileLogger.LOG_DIRECTORY)) - args = parser.parse_args() - return args - - def configure(self): - self.db_client = MongoAccess() - self.connection_tests_collection = \ - self.db_client.db[self.args.connection_tests_collection] - self.environments_collection = \ - self.db_client.db[self.args.environments_collection] - self._update_document = \ - partial(MongoAccess.update_document, - self.connection_tests_collection) - self.interval = max(self.MIN_INTERVAL, self.args.interval) - self.log.set_loglevel(self.args.loglevel) - - self.log.info('Started ConnectionTest with following configuration:\n' - 'Mongo config file path: {0.args.mongo_config}\n' - 'connection_tests collection: ' - '{0.connection_tests_collection.name}\n' - 'Polling interval: {0.interval} second(s)' - .format(self)) - - def _build_test_args(self, test_request: dict): - args = { - 'mongo_config': self.args.mongo_config - } - - def set_arg(name_from: str, name_to: str = None): - if name_to is None: - name_to = name_from - val = test_request.get(name_from) - if val: - args[name_to] = val - - set_arg('object_id', 'id') - set_arg('log_level', 'loglevel') - set_arg('environment', 'env') - set_arg('scan_only_inventory', 'inventory_only') - set_arg('scan_only_links', 'links_only') - set_arg('scan_only_cliques', 'cliques_only') - set_arg('inventory') - set_arg('clear') - set_arg('clear_all') - - return args - - def _finalize_test(self, test_request: dict): - # update the status and timestamps. - self.log.info('Request {} has been tested.' - .format(test_request['_id'])) - start_time = test_request['submit_timestamp'] - end_time = datetime.datetime.utcnow() - test_request['response_timestamp'] = end_time - test_request['response_time'] = \ - str(end_time - start_time.replace(tzinfo=None)) - test_request['status'] = ConnectionTestStatus.RESPONSE.value - self._update_document(test_request) - - @staticmethod - def set_test_result(test_request, target, result): - test_request.get('test_results', {})[target] = result - - @staticmethod - def report_success(test_request, target): - ConnectionTest.set_test_result(test_request, target, True) - - @staticmethod - def handle_test_target(target, test_request): - targets_config = test_request.get('targets_configuration', []) - try: - config = next(t for t in targets_config if t['name'] == target) - except StopIteration: - raise ValueError('failed to find {} in targets_configuration' - .format(target)) - handler = TEST_HANDLERS.get(target) - if not handler: - raise ValueError('unknown test target: {}'.format(target)) - handler(config, test_request) - - def do_test(self, test_request): - targets = [t for t in test_request.get('test_targets', [])] - test_request['test_results'] = {t: False for t in targets} - for test_target in test_request.get('test_targets', []): - self.log.info('testing connection to: {}'.format(test_target)) - try: - self.handle_test_target(test_target, test_request) - except Exception as e: - self.log.exception(e) - if 'errors' not in test_request: - test_request['errors'] = {} - test_request['errors'][test_target] = str(e) - self.log.error('Test of target {} failed (id: {}):\n{}' - .format(test_target, - test_request['_id'], - str(e))) - self._finalize_test(test_request) - self._set_env_operational(test_request['environment']) - - # if environment_config document for this specific environment exists, - # update the value of the 'operational' field to 'running' - def _set_env_operational(self, env): - self.environments_collection. \ - update_one({'name': env}, {'$set': {'operational': 'running'}}) - - def do_action(self): - while True: - # Find a pending request that is waiting the longest time - results = self.connection_tests_collection \ - .find({'status': ConnectionTestStatus.REQUEST.value, - 'submit_timestamp': {'$ne': None}}) \ - .sort('submit_timestamp', pymongo.ASCENDING) \ - .limit(1) - - # If no connection tests are pending, sleep for some time - if results.count() == 0: - time.sleep(self.interval) - else: - self.do_test(results[0]) - - -if __name__ == '__main__': - ConnectionTest().run() diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py index 57b2e3b..4d68eb4 100644 --- a/app/discover/clique_finder.py +++ b/app/discover/clique_finder.py @@ -48,61 +48,53 @@ class CliqueFinder(Fetcher): self.find_cliques_for_type(clique_type) self.log.info("finished scanning for cliques") - # Calculate priority score + # Calculate priority score for clique type per environment and configuration def _get_priority_score(self, clique_type): + # environment-specific clique type takes precedence if self.env == clique_type['environment']: + return 16 + if (self.env_config['distribution'] == clique_type.get('distribution') + and + self.env_config['distribution_version'] == + clique_type.get('distribution_version')): + return 8 + if clique_type.get('mechanism_drivers') \ + in self.env_config['mechanism_drivers']: return 4 - if (self.env_config['distribution'] == clique_type.get('distribution') and - self.env_config['distribution_version'] == clique_type.get('distribution_version')): - return 3 - if clique_type.get('mechanism_drivers') in self.env_config['mechanism_drivers']: - return 2 if self.env_config['type_drivers'] == clique_type.get('type_drivers'): + return 2 + if clique_type.get('environment', '') == 'ANY': + # environment=ANY serves as fallback option, but it's not mandatory return 1 else: return 0 # Get clique type with max priority - # for given environment configuration and focal point type - def _get_clique_type(self, focal_point, clique_types): - # If there's no configuration match for the specified environment, - # we use the default clique type definition with environment='ANY' - fallback_type = next( - filter(lambda t: t['environment'] == 'ANY', clique_types), - None - ) - if not fallback_type: - raise ValueError("No fallback clique type (ANY) " - "defined for focal point type '{}'" - .format(focal_point)) - - clique_types.remove(fallback_type) - - priority_scores = [self._get_priority_score(clique_type) - for clique_type - in clique_types] - max_score = max(priority_scores) if priority_scores else 0 - - return (fallback_type - if max_score == 0 - else clique_types[priority_scores.index(max_score)]) + # for given focal point type + def _get_clique_type(self, clique_types): + scored_clique_types = [{'score': self._get_priority_score(clique_type), + 'clique_type': clique_type} + for clique_type in clique_types] + max_score = max(scored_clique_types, key=lambda t: t['score']) + if max_score['score'] == 0: + self.log.warn('No matching clique types for focal point type: {}' + .format(clique_types[0].get('focal_point_type'))) + return None + return max_score.get('clique_type') def get_clique_types(self): if not self.clique_types_by_type: - clique_types_by_focal_point = self.clique_types.aggregate([{ - "$group": { - "_id": "$focal_point_type", - "types": {"$push": "$$ROOT"} - } - }]) - - self.clique_types_by_type = { - cliques['_id']: self._get_clique_type(cliques['_id'], - cliques['types']) - for cliques in - clique_types_by_focal_point - } - + clique_types_candidates = {} + for clique in self.clique_types.find({}): + fp_type = clique.get('focal_point_type', '') + if not clique_types_candidates.get(fp_type): + clique_types_candidates[fp_type] = [] + clique_types_candidates[fp_type].append(clique) + for t in clique_types_candidates.keys(): + selected = self._get_clique_type(clique_types_candidates[t]) + if not selected: + continue + self.clique_types_by_type[t] = selected return self.clique_types_by_type def find_cliques_for_type(self, clique_type): @@ -125,11 +117,14 @@ class CliqueFinder(Fetcher): .find_one({"focal_point_type": o['type']}) constraints = [] if not constraint else constraint["constraints"] clique_types = self.get_clique_types() - clique_type = clique_types[o['type']] - new_clique = self.construct_clique_for_focal_point(o, clique_type, - constraints) - if not new_clique: + clique_type = clique_types.get(o['type']) + if not clique_type: self.cliques.delete({'_id': clique['_id']}) + else: + new_clique = self.construct_clique_for_focal_point(o, clique_type, + constraints) + if not new_clique: + self.cliques.delete({'_id': clique['_id']}) def construct_clique_for_focal_point(self, o, clique_type, constraints): # keep a hash of nodes in clique that were visited for each type @@ -146,12 +141,15 @@ class CliqueFinder(Fetcher): for c in constraints: val = o[c] if c in o else None clique["constraints"][c] = val + allow_implicit = clique_type.get('use_implicit_links', False) for link_type in clique_type["link_types"]: - self.check_link_type(clique, link_type, nodes_of_type) + self.check_link_type(clique, link_type, nodes_of_type, + allow_implicit=allow_implicit) # after adding the links to the clique, create/update the clique if not clique["links"]: return None + clique["clique_type"] = clique_type["_id"] focal_point_obj = self.inventory.find({"_id": clique["focal_point"]}) if not focal_point_obj: return None @@ -198,25 +196,32 @@ class CliqueFinder(Fetcher): '-'.join(link_type_parts) return CliqueFinder.link_type_reversed.get(link_type) - def check_link_type(self, clique, link_type, nodes_of_type): + def check_link_type(self, clique, link_type, nodes_of_type, + allow_implicit=False): # check if it's backwards link_type_reversed = self.get_link_type_reversed(link_type) # handle case of links like T<-->T self_linked = link_type == link_type_reversed use_reversed = False if not self_linked: - matches = self.links.find_one({ + link_search_condition = { "environment": self.env, "link_type": link_type_reversed - }) + } + if not allow_implicit: + link_search_condition['implicit'] = False + matches = self.links.find_one(link_search_condition) use_reversed = True if matches else False if self_linked or not use_reversed: - self.check_link_type_forward(clique, link_type, nodes_of_type) + self.check_link_type_forward(clique, link_type, nodes_of_type, + allow_implicit=allow_implicit) if self_linked or use_reversed: - self.check_link_type_back(clique, link_type, nodes_of_type) + self.check_link_type_back(clique, link_type, nodes_of_type, + allow_implicit=allow_implicit) def check_link_type_for_direction(self, clique, link_type, nodes_of_type, - is_reversed=False): + is_reversed=False, + allow_implicit=False): if is_reversed: link_type = self.get_link_type_reversed(link_type) from_type = link_type[:link_type.index("-")] @@ -233,7 +238,8 @@ class CliqueFinder(Fetcher): clique, link_type, side_to_match, - other_side) + other_side, + allow_implicit=allow_implicit) nodes_to_add = nodes_to_add | matches if other_side_type not in nodes_of_type: nodes_of_type[other_side_type] = set() @@ -241,13 +247,17 @@ class CliqueFinder(Fetcher): nodes_of_type[other_side_type] | nodes_to_add def find_matches_for_point(self, match_point, clique, link_type, - side_to_match, other_side) -> set: + side_to_match, other_side, + allow_implicit=False) -> set: nodes_to_add = set() - matches = self.links.find({ + link_search_condition = { "environment": self.env, "link_type": link_type, side_to_match: ObjectId(match_point) - }) + } + if not allow_implicit: + link_search_condition['implicit'] = False + matches = self.links.find(link_search_condition) for link in matches: link_id = link["_id"] if link_id in clique["links"]: @@ -260,10 +270,14 @@ class CliqueFinder(Fetcher): nodes_to_add.add(other_side_point) return nodes_to_add - def check_link_type_forward(self, clique, link_type, nodes_of_type): + def check_link_type_forward(self, clique, link_type, nodes_of_type, + allow_implicit=False): self.check_link_type_for_direction(clique, link_type, nodes_of_type, - is_reversed=False) + is_reversed=False, + allow_implicit=allow_implicit) - def check_link_type_back(self, clique, link_type, nodes_of_type): + def check_link_type_back(self, clique, link_type, nodes_of_type, + allow_implicit=False): self.check_link_type_for_direction(clique, link_type, nodes_of_type, - is_reversed=True) + is_reversed=True, + allow_implicit=allow_implicit) diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py index 4855acc..c01916c 100644 --- a/app/discover/event_manager.py +++ b/app/discover/event_manager.py @@ -113,8 +113,8 @@ class EventManager(Manager): def get_listener(self, env: str): env_config = self.inv.get_env_config(env) return (self.LISTENERS.get(env_config.get('distribution'), {}) - .get(env_config.get('distribution_version', - DefaultListener))) + .get(env_config.get('distribution_version'), + DefaultListener)) def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict): listener.listen({ diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py index f685faf..1fca202 100644 --- a/app/discover/fetchers/api/api_access.py +++ b/app/discover/fetchers/api/api_access.py @@ -12,21 +12,18 @@ import re import requests
import time
-from discover.configuration import Configuration
-from discover.fetcher import Fetcher
+from utils.api_access_base import ApiAccessBase
from utils.string_utils import jsonify
-class ApiAccess(Fetcher):
+class ApiAccess(ApiAccessBase):
+
+ ADMIN_PORT = "35357"
+
subject_token = None
initialized = False
regions = {}
- config = None
- api_config = None
- host = ""
- base_url = ""
- admin_token = ""
tokens = {}
admin_endpoint = ""
admin_project = None
@@ -38,28 +35,19 @@ class ApiAccess(Fetcher): # identity API v2 version with admin token
def __init__(self, config=None):
- super(ApiAccess, self).__init__()
- if ApiAccess.initialized:
+ super().__init__('OpenStack', config)
+ self.base_url = "http://" + self.host + ":" + self.port
+ if self.initialized:
return
- ApiAccess.config = {'OpenStack': config} if config else Configuration()
- ApiAccess.api_config = ApiAccess.config.get("OpenStack")
- host = ApiAccess.api_config.get("host", "")
- ApiAccess.host = host
- port = ApiAccess.api_config.get("port", "")
- if not (host and port):
- raise ValueError('Missing definition of host or port ' +
- 'for OpenStack API access')
- ApiAccess.base_url = "http://" + host + ":" + port
- ApiAccess.admin_token = ApiAccess.api_config.get("admin_token", "")
- ApiAccess.admin_project = ApiAccess.api_config.get("admin_project",
- "admin")
- ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+ ApiAccess.admin_project = self.api_config.get("admin_project", "admin")
+ ApiAccess.admin_endpoint = "http://" + self.host + ":" + self.ADMIN_PORT
token = self.v2_auth_pwd(ApiAccess.admin_project)
if not token:
raise ValueError("Authentication failed. Failed to obtain token")
else:
self.subject_token = token
+ self.initialized = True
@staticmethod
def parse_time(time_str):
@@ -95,9 +83,9 @@ class ApiAccess(Fetcher): subject_token = self.get_existing_token(project_id)
if subject_token:
return subject_token
- req_url = ApiAccess.base_url + "/v2.0/tokens"
+ req_url = self.base_url + "/v2.0/tokens"
response = requests.post(req_url, json=post_body, headers=headers,
- timeout=5)
+ timeout=self.CONNECT_TIMEOUT)
response = response.json()
ApiAccess.auth_response[project_id] = response
if 'error' in response:
@@ -120,8 +108,8 @@ class ApiAccess(Fetcher): return token_details
def v2_auth_pwd(self, project):
- user = ApiAccess.api_config["user"]
- pwd = ApiAccess.api_config["pwd"]
+ user = self.api_config["user"]
+ pwd = self.api_config["pwd"]
post_body = {
"auth": {
"passwordCredentials": {
@@ -148,23 +136,6 @@ class ApiAccess(Fetcher): auth_response = ApiAccess.auth_response.get('admin', {})
return auth_response
- def get_rel_url(self, relative_url, headers):
- req_url = ApiAccess.base_url + relative_url
- return self.get_url(req_url, headers)
-
- def get_url(self, req_url, headers):
- response = requests.get(req_url, headers=headers)
- if response.status_code != requests.codes.ok:
- # some error happened
- if "reason" in response:
- msg = ", reason: {}".format(response.reason)
- else:
- msg = ", response: {}".format(response.text)
- self.log.error("req_url: {} {}".format(req_url, msg))
- return None
- ret = response.json()
- return ret
-
def get_region_url(self, region_name, service):
if region_name not in self.regions:
return None
@@ -174,7 +145,7 @@ class ApiAccess(Fetcher): return None
orig_url = s["adminURL"]
# replace host name with the host found in config
- url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + self.host, orig_url)
return url
# like get_region_url(), but remove everything starting from the "/v2"
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py index 56cffda..bf8513a 100644 --- a/app/discover/fetchers/api/api_fetch_host_instances.py +++ b/app/discover/fetchers/api/api_fetch_host_instances.py @@ -18,7 +18,7 @@ class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton): def __init__(self): super(ApiFetchHostInstances, self).__init__() self.inv = InventoryMgr() - self.endpoint = ApiAccess.base_url.replace(":5000", ":8774") + self.endpoint = self.base_url.replace(":5000", ":8774") self.projects = None self.db_fetcher = DbFetchInstances() diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py index 5b911f5..2aeb24f 100644 --- a/app/discover/fetchers/api/api_fetch_project_hosts.py +++ b/app/discover/fetchers/api/api_fetch_project_hosts.py @@ -11,9 +11,11 @@ import json from discover.fetchers.api.api_access import ApiAccess from discover.fetchers.db.db_access import DbAccess +from discover.fetchers.cli.cli_access import CliAccess +from utils.ssh_connection import SshError -class ApiFetchProjectHosts(ApiAccess, DbAccess): +class ApiFetchProjectHosts(ApiAccess, DbAccess, CliAccess): def __init__(self): super(ApiFetchProjectHosts, self).__init__() @@ -107,6 +109,7 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess): s = services["nova-compute"] if s["available"] and s["active"]: self.add_host_type(doc, "Compute", az['zoneName']) + self.fetch_host_os_details(doc) return doc # fetch more details of network nodes from neutron DB agents table @@ -121,7 +124,12 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess): """.format(self.neutron_db) results = self.get_objects_list(query, "") for r in results: - host = hosts[r["host"]] + host = r["host"] + if host not in hosts: + self.log.error("host from agents table not in hosts list: {}" + .format(host)) + continue + host = hosts[host] host["config"] = json.loads(r["configurations"]) self.add_host_type(host, "Network", '') @@ -136,9 +144,33 @@ class ApiFetchProjectHosts(ApiAccess, DbAccess): for db_row in results: doc.update(db_row) - def add_host_type(self, doc, type, zone): - if not type in doc["host_type"]: - doc["host_type"].append(type) - if type == 'Compute': + @staticmethod + def add_host_type(doc, host_type, zone): + if host_type not in doc["host_type"]: + doc["host_type"].append(host_type) + if host_type == 'Compute': doc['zone'] = zone doc['parent_id'] = zone + + def fetch_host_os_details(self, doc): + cmd = 'cat /etc/os-release && echo "ARCHITECURE=`arch`"' + try: + lines = self.run_fetch_lines(cmd, ssh_to_host=doc['host']) + except SshError as e: + self.log.error('{}: {}', cmd, str(e)) + os_attributes = {} + attributes_to_fetch = { + 'NAME': 'name', + 'VERSION': 'version', + 'ID': 'ID', + 'ID_LIKE': 'ID_LIKE', + 'ARCHITECURE': 'architecure' + } + for attr in attributes_to_fetch: + matches = [l for l in lines if l.startswith(attr + '=')] + if matches: + line = matches[0] + attr_name = attributes_to_fetch[attr] + os_attributes[attr_name] = line[line.index('=')+1:].strip('"') + if os_attributes: + doc['OS'] = os_attributes diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py index 23a3736..4e83b01 100644 --- a/app/discover/fetchers/api/api_fetch_regions.py +++ b/app/discover/fetchers/api/api_fetch_regions.py @@ -13,7 +13,7 @@ from discover.fetchers.api.api_access import ApiAccess class ApiFetchRegions(ApiAccess):
def __init__(self):
super(ApiFetchRegions, self).__init__()
- self.endpoint = ApiAccess.base_url
+ self.endpoint = self.base_url
def get(self, regions_folder_id):
token = self.v2_auth_pwd(self.admin_project)
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py index 090ab84..5ff49d5 100644 --- a/app/discover/fetchers/db/db_access.py +++ b/app/discover/fetchers/db/db_access.py @@ -38,8 +38,7 @@ class DbAccess(Fetcher): conn = None query_count_per_con = 0 - # connection timeout set to 30 seconds, - # due to problems over long connections + # connection timeout set to 5 seconds TIMEOUT = 5 def __init__(self, mysql_config=None): @@ -47,6 +46,9 @@ class DbAccess(Fetcher): self.config = {'mysql': mysql_config} if mysql_config \ else Configuration() self.conf = self.config.get("mysql") + self.connect_timeout = int(self.conf['connect_timeout']) \ + if 'connect_timeout' in self.conf \ + else self.TIMEOUT self.connect_to_db() self.neutron_db = self.get_neutron_db_name() @@ -55,16 +57,18 @@ class DbAccess(Fetcher): return try: connector = mysql.connector - DbAccess.conn = connector.connect(host=_host, port=_port, - connection_timeout=self.TIMEOUT, - user=_user, - password=_pwd, - database=_database, - raise_on_warnings=True) + conn = connector.connect(host=_host, port=_port, + connection_timeout=self.connect_timeout, + user=_user, + password=_pwd, + database=_database, + raise_on_warnings=True) + DbAccess.conn = conn DbAccess.conn.ping(True) # auto-reconnect if necessary except Exception as e: - self.log.critical("failed to connect to MySQL DB: {}" - .format(str(e))) + msg = "failed to connect to MySQL DB: {}".format(str(e)) + self.log.critical(msg) + raise ScanError(msg) return DbAccess.query_count_per_con = 0 @@ -93,8 +97,11 @@ class DbAccess(Fetcher): DbAccess.conn = None self.conf = self.config.get("mysql") cnf = self.conf + pwd = cnf.get('pwd', '') + if not pwd: + raise ScanError('db_access: attribute pwd is missing') self.db_connect(cnf.get('host', ''), cnf.get('port', ''), - cnf.get('user', ''), cnf.get('pwd', ''), + cnf.get('user', ''), pwd, cnf.get('schema', 'nova')) @with_cursor diff --git a/app/discover/fetchers/kube/__init__.py b/app/discover/fetchers/kube/__init__.py new file mode 100644 index 0000000..b0637e9 --- /dev/null +++ b/app/discover/fetchers/kube/__init__.py @@ -0,0 +1,9 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### diff --git a/app/discover/fetchers/kube/kube_access.py b/app/discover/fetchers/kube/kube_access.py new file mode 100644 index 0000000..38bb978 --- /dev/null +++ b/app/discover/fetchers/kube/kube_access.py @@ -0,0 +1,28 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +from kubernetes.client import Configuration as KubConf, CoreV1Api + +from utils.api_access_base import ApiAccessBase + + +class KubeAccess(ApiAccessBase): + + def __init__(self, config=None): + super().__init__('Kubernetes', config) + self.base_url = 'https://{}:{}'.format(self.host, self.port) + self.bearer_token = self.api_config.get('token', '') + conf = KubConf() + conf.host = self.base_url + conf.user = self.api_config.get('user') + conf.api_key_prefix['authorization'] = 'Bearer' + conf.api_key['authorization'] = self.bearer_token + conf.verify_ssl = False + self.api = CoreV1Api() + diff --git a/app/discover/fetchers/kube/kube_fetch_namespaces.py b/app/discover/fetchers/kube/kube_fetch_namespaces.py new file mode 100644 index 0000000..951ddb8 --- /dev/null +++ b/app/discover/fetchers/kube/kube_fetch_namespaces.py @@ -0,0 +1,32 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +from discover.fetchers.kube.kube_access import KubeAccess + + +class KubeFetchNamespaces(KubeAccess): + + def __init__(self, config=None): + super().__init__(config) + + def get(self, object_id): + namespaces = self.api.list_namespace() + return [self.get_namespace(i) for i in namespaces.items] + + @staticmethod + def get_namespace(namespace): + attrs = ['creation_timestamp', 'self_link', 'uid'] + namespace_details = { + 'name': namespace.metadata.name, + 'status': namespace.status.phase + } + namespace_details.update({x: getattr(namespace.metadata, x, '') + for x in attrs}) + namespace_details['id'] = namespace_details['uid'] + return namespace_details diff --git a/app/discover/link_finders/find_implicit_links.py b/app/discover/link_finders/find_implicit_links.py new file mode 100644 index 0000000..01eaa7b --- /dev/null +++ b/app/discover/link_finders/find_implicit_links.py @@ -0,0 +1,128 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +from discover.link_finders.find_links import FindLinks + + +class FindImplicitLinks(FindLinks): + + def __init__(self): + super().__init__() + self.links = [] + self.constraint_attributes = self.get_constraint_attributes() + + def add_links(self): + self.log.info('adding implicit links') + self.get_existing_links() + self.get_transitive_closure() + + def get_constraint_attributes(self) -> list: + attributes = set() + for c in self.inv.find({'environment': self.get_env()}, + collection='clique_constraints'): + for a in c['constraints']: + attributes.add(a) + return list(attributes) + + def get_existing_links(self): + self.log.info('fetching existing links') + existing_links = self.inv.find({'environment': self.get_env()}, + collection='links') + for l in existing_links: + self.links.append({'pass': 0, 'link': l}) + + def constraints_match(self, link1, link2): + if 'attributes' not in link1 or 'attributes' not in link2: + return True + attr1 = link1['attributes'] + attr2 = link2['attributes'] + for a in self.constraint_attributes: + if a in attr1 and a in attr2 and attr1[a] != attr2[a]: + return False + return True + + def links_match(self, start, dest): + if start['link_type'] == dest['link_type']: + return False # obviously we cannot make an implicit link of this + if start['source_id'] == dest['target_id']: + return False # avoid cyclic links + if not self.constraints_match(start, dest): + return False + return start['target_id'] == dest['source_id'] + + def add_matching_links(self, link, pass_no): + self.log.debug('looking for matches for link: {};{}' + .format(link['source_id'], link['target_id'])) + matches = [l for l in self.links + if l['pass'] == 0 # take only original links + and self.links_match(link, l['link'])] + for l in matches: + implicit = self.add_implicit_link(link, l['link']) + self.links.append({'pass': pass_no, 'link': implicit}) + return len(matches) + + def get_link_constraint_attributes(self, link1, link2) -> dict: + attributes = {} + for a in self.constraint_attributes: + # constraints_match() verified the attribute values don't conflict + if a in link1.get('attributes', {}): + attributes[a] = link1['attributes'][a] + elif a in link2.get('attributes', {}): + attributes[a] = link2['attributes'][a] + return attributes + + @staticmethod + def get_attr(attr, link1, link2): + if attr not in link1 and attr not in link2: + return None + if attr not in link1: + return link2[attr] + if attr not in link2 or link1[attr] == link2[attr]: + return link1[attr] + return None + + def add_implicit_link(self, link1, link2): + link_type_from = link1['link_type'].split('-')[0] + link_type_to = link2['link_type'].split('-')[1] + link_type = '{}-{}'.format(link_type_from, link_type_to) + link_name = '' + state = 'down' \ + if link1['state'] == 'down' or link2['state'] == 'down' \ + else 'up' + link_weight = 0 # TBD + host = self.get_attr('host', link1, link2) + switch = self.get_attr('switch', link1, link2) + extra_attributes = self.get_link_constraint_attributes(link1, link2) + self.log.debug('adding implicit link: link type: {}, from: {}, to: {}' + .format(link_type, + link1['source_id'], + link2['target_id'])) + implicit = self.create_link(self.get_env(), + link1['source'], link1['source_id'], + link2['target'], link2['target_id'], + link_type, link_name, state, link_weight, + host=host, switch=switch, + implicit=True, + extra_attributes=extra_attributes) + return implicit + + def get_transitive_closure(self): + pass_no = 1 + while True: + match_count = 0 + last_pass_links = [l for l in self.links if l['pass'] == pass_no-1] + for l in last_pass_links: + match_count += self.add_matching_links(l['link'], pass_no) + self.log.info('Transitive closure pass #{}: ' + 'found {} implicit links' + .format(pass_no, match_count)) + if match_count == 0: + break + pass_no += 1 + self.log.info('done adding implicit links') diff --git a/app/discover/link_finders/find_links.py b/app/discover/link_finders/find_links.py index d234479..31d39e5 100644 --- a/app/discover/link_finders/find_links.py +++ b/app/discover/link_finders/find_links.py @@ -19,6 +19,7 @@ class FindLinks(Fetcher): def create_link(self, env, source, source_id, target, target_id, link_type, link_name, state, link_weight, host=None, switch=None, + implicit=False, extra_attributes=None): if extra_attributes is None: extra_attributes = {} @@ -27,9 +28,11 @@ class FindLinks(Fetcher): link = self.inv.create_link(env, source, source_id, target, target_id, link_type, link_name, state, link_weight, + implicit=implicit, source_label=source_label, target_label=target_label, host=host, switch=switch, extra_attributes=extra_attributes) if self.inv.monitoring_setup_manager: self.inv.monitoring_setup_manager.create_setup(link) + return link diff --git a/app/discover/link_finders/find_links_for_instance_vnics.py b/app/discover/link_finders/find_links_for_instance_vnics.py index 975ab1a..1dfb818 100644 --- a/app/discover/link_finders/find_links_for_instance_vnics.py +++ b/app/discover/link_finders/find_links_for_instance_vnics.py @@ -49,6 +49,8 @@ class FindLinksForInstanceVnics(FindLinks): network_id = net['network']['id'] v['network'] = network_id self.inv.set(v) + if self.inv.monitoring_setup_manager: + self.inv.monitoring_setup_manager.create_setup(instance) break state = "up" # TBD link_weight = 0 # TBD diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py index 6c46d47..91dd06c 100644 --- a/app/discover/scan_manager.py +++ b/app/discover/scan_manager.py @@ -219,71 +219,74 @@ class ScanManager(Manager): for interval in self.INTERVALS.keys(): self._prepare_scheduled_requests_for_interval(interval) + def handle_scans(self): + self._prepare_scheduled_requests() + + # Find a pending request that is waiting the longest time + results = self.scans_collection \ + .find({'status': ScanStatus.PENDING.value, + 'submit_timestamp': {'$ne': None}}) \ + .sort("submit_timestamp", pymongo.ASCENDING) \ + .limit(1) + + # If no scans are pending, sleep for some time + if results.count() == 0: + time.sleep(self.interval) + else: + scan_request = results[0] + env = scan_request.get('environment') + scan_feature = EnvironmentFeatures.SCANNING + if not self.inv.is_feature_supported(env, scan_feature): + self.log.error("Scanning is not supported for env '{}'" + .format(scan_request.get('environment'))) + self._fail_scan(scan_request) + return + + scan_request['start_timestamp'] = datetime.datetime.utcnow() + scan_request['status'] = ScanStatus.RUNNING.value + self._update_document(scan_request) + + # Prepare scan arguments and run the scan with them + try: + scan_args = self._build_scan_args(scan_request) + + self.log.info("Starting scan for '{}' environment" + .format(scan_args.get('env'))) + self.log.debug("Scan arguments: {}".format(scan_args)) + result, message = ScanController().run(scan_args) + except ScanArgumentsError as e: + self.log.error("Scan request '{id}' " + "has invalid arguments. " + "Errors:\n{errors}" + .format(id=scan_request['_id'], + errors=e)) + self._fail_scan(scan_request) + except Exception as e: + self.log.exception(e) + self.log.error("Scan request '{}' has failed." + .format(scan_request['_id'])) + self._fail_scan(scan_request) + else: + # Check is scan returned success + if not result: + self.log.error(message) + self.log.error("Scan request '{}' has failed." + .format(scan_request['_id'])) + self._fail_scan(scan_request) + return + + # update the status and timestamps. + self.log.info("Request '{}' has been scanned. ({})" + .format(scan_request['_id'], message)) + end_time = datetime.datetime.utcnow() + scan_request['end_timestamp'] = end_time + self._complete_scan(scan_request, message) + def do_action(self): self._clean_up() try: while True: - self._prepare_scheduled_requests() - - # Find a pending request that is waiting the longest time - results = self.scans_collection \ - .find({'status': ScanStatus.PENDING.value, - 'submit_timestamp': {'$ne': None}}) \ - .sort("submit_timestamp", pymongo.ASCENDING) \ - .limit(1) - - # If no scans are pending, sleep for some time - if results.count() == 0: - time.sleep(self.interval) - else: - scan_request = results[0] - env = scan_request.get('environment') - scan_feature = EnvironmentFeatures.SCANNING - if not self.inv.is_feature_supported(env, scan_feature): - self.log.error("Scanning is not supported for env '{}'" - .format(scan_request.get('environment'))) - self._fail_scan(scan_request) - continue - - scan_request['start_timestamp'] = datetime.datetime.utcnow() - scan_request['status'] = ScanStatus.RUNNING.value - self._update_document(scan_request) - - # Prepare scan arguments and run the scan with them - try: - scan_args = self._build_scan_args(scan_request) - - self.log.info("Starting scan for '{}' environment" - .format(scan_args.get('env'))) - self.log.debug("Scan arguments: {}".format(scan_args)) - result, message = ScanController().run(scan_args) - except ScanArgumentsError as e: - self.log.error("Scan request '{id}' " - "has invalid arguments. " - "Errors:\n{errors}" - .format(id=scan_request['_id'], - errors=e)) - self._fail_scan(scan_request) - except Exception as e: - self.log.exception(e) - self.log.error("Scan request '{}' has failed." - .format(scan_request['_id'])) - self._fail_scan(scan_request) - else: - # Check is scan returned success - if not result: - self.log.error(message) - self.log.error("Scan request '{}' has failed." - .format(scan_request['_id'])) - self._fail_scan(scan_request) - continue - - # update the status and timestamps. - self.log.info("Request '{}' has been scanned. ({})" - .format(scan_request['_id'], message)) - end_time = datetime.datetime.utcnow() - scan_request['end_timestamp'] = end_time - self._complete_scan(scan_request, message) + self.handle_scans() finally: self._clean_up() diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py index df27e18..8757f79 100644 --- a/app/discover/scan_metadata_parser.py +++ b/app/discover/scan_metadata_parser.py @@ -49,21 +49,28 @@ class ScanMetadataParser(MetadataParser): self.add_error('missing or empty fetcher in scanner {} type #{}' .format(scanner_name, str(type_index))) elif isinstance(fetcher, str): + error_str = None try: - module_name = ClassResolver.get_module_file_by_class_name(fetcher) + get_module = ClassResolver.get_module_file_by_class_name + module_name = get_module(fetcher) fetcher_package = module_name.split("_")[0] if package: fetcher_package = ".".join((package, fetcher_package)) - instance = ClassResolver.get_instance_of_class(package_name=fetcher_package, - module_name=module_name, - class_name=fetcher) - except ValueError: - instance = None - if not instance: + # get the fetcher qualified class but not a class instance + # instances will be created just-in-time (before fetching): + # this avoids init of access classes not needed in some envs + get_class = ClassResolver.get_fully_qualified_class + class_qualified = get_class(fetcher, fetcher_package, + module_name) + except ValueError as e: + class_qualified = None + error_str = str(e) + if not class_qualified: self.add_error('failed to find fetcher class {} in scanner {}' - ' type #{}' - .format(fetcher, scanner_name, type_index)) - scan_type[self.FETCHER] = instance + ' type #{} ({})' + .format(fetcher, scanner_name, type_index, + error_str)) + scan_type[self.FETCHER] = class_qualified elif isinstance(fetcher, dict): is_folder = fetcher.get('folder', False) if not is_folder: @@ -81,7 +88,6 @@ class ScanMetadataParser(MetadataParser): def validate_children_scanner(self, scanner_name: str, type_index: int, scanners: dict, scan_type: dict): - scanner = scanners[scanner_name] if 'children_scanner' in scan_type: children_scanner = scan_type.get('children_scanner') if not isinstance(children_scanner, str): diff --git a/app/discover/scanner.py b/app/discover/scanner.py index 1fbcc68..8aac40b 100644 --- a/app/discover/scanner.py +++ b/app/discover/scanner.py @@ -26,6 +26,10 @@ from utils.ssh_connection import SshError class Scanner(Fetcher): + + ENV_TYPE_OPENSTACK = 'OpenStack' + ENV_TYPE_KUBERNETES = 'Kubernetes' + config = None environment = None env = None @@ -82,16 +86,21 @@ class Scanner(Fetcher): def check_type_env(self, type_to_fetch): # check if type is to be run in this environment - if "environment_condition" not in type_to_fetch: - return True - env_cond = type_to_fetch.get("environment_condition", {}) + basic_cond = {'environment_type': self.ENV_TYPE_OPENSTACK} + env_cond = type_to_fetch.get("environment_condition", {}) \ + if "environment_condition" in type_to_fetch \ + else basic_cond if not env_cond: - return True + env_cond = basic_cond + if 'environment_type' not in env_cond: + env_cond.update(basic_cond) if not isinstance(env_cond, dict): self.log.warn('illegal environment_condition given ' 'for type {}'.format(type_to_fetch['type'])) return True conf = self.config.get_env_config() + if 'environment_type' not in conf: + conf.update(basic_cond) for attr, required_val in env_cond.items(): if attr == "mechanism_drivers": if "mechanism_drivers" not in conf: @@ -120,6 +129,9 @@ class Scanner(Fetcher): # get Fetcher instance fetcher = type_to_fetch["fetcher"] + if not isinstance(fetcher, Fetcher): + type_to_fetch['fetcher'] = fetcher() # make it an instance + fetcher = type_to_fetch["fetcher"] fetcher.set_env(self.get_env()) # get children_scanner instance @@ -254,7 +266,6 @@ class Scanner(Fetcher): def load_link_finders_metadata(self): parser = FindLinksMetadataParser() - conf = self.config.get_env_config() finders_file = os.path.join(self.get_run_app_path(), 'config', FindLinksMetadataParser.FINDERS_FILE) diff --git a/app/install/calipso-installer.py b/app/install/calipso-installer.py index 84b10da..78bb927 100644 --- a/app/install/calipso-installer.py +++ b/app/install/calipso-installer.py @@ -292,6 +292,22 @@ def start_ui(host, dbuser, dbpassword, webport, dbport): environment=[root_url, mongo_url, LDAP_CONFIG]) +def start_test(): + name = "calipso-test" + if container_started(name): + return + print("\nstarting container {}...\n".format(name)) + image_name = "korenlev/calipso:test" + download_image(image_name) + ports = {'22/tcp': 10022} + DockerClient.containers.run(image_name, + detach=True, + name=name, + ports=ports, + restart_policy=RESTART_POLICY, + environment=[PYTHON_PATH, MONGO_CONFIG], + volumes=calipso_volume) + # check and stop a calipso container by given name def container_stop(container_name): if not container_started(container_name, print_message=False): @@ -395,7 +411,7 @@ else: container = "" action = "" -container_names = ["calipso-ui", "calipso-scan", "calipso-listen", +container_names = ["calipso-ui", "calipso-scan", "calipso-test", "calipso-listen", "calipso-ldap", "calipso-api", "calipso-sensu", "calipso-mongo"] container_actions = ["stop", "start"] while action not in container_actions: @@ -460,6 +476,9 @@ if action == "start": if container == "calipso-scan" or container == "all": start_scan() time.sleep(1) + if container == "calipso-test" or container == "all": + start_test() + time.sleep(1) if container == "calipso-sensu" or container == "all": start_sensu(args.uchiwaport, args.sensuport, args.rabbitport, args.rabbitmport) time.sleep(1) diff --git a/app/install/db/clique_types.json b/app/install/db/clique_types.json index 77e2d7d..624de70 100644 --- a/app/install/db/clique_types.json +++ b/app/install/db/clique_types.json @@ -26,6 +26,23 @@ ], "name" : "vservice" }, +{ + "environment" : "config_based_example", + "focal_point_type" : "vservice", + "link_types" : [ + "vservice-vnic", + "vnic-vedge", + "vedge-otep", + "otep-vconnector", + "vconnector-host_pnic", + "host_pnic-network" + ], + "name" : "vservice_config_based", + "distribution" : "Mirantis", + "distribution_version" : "6.0", + "mechanism_drivers" : "OVS", + "type_drivers" : "vxlan" +}, { "environment" : "ANY", "focal_point_type" : "network", @@ -135,5 +152,14 @@ "vnic-vservice" ], "name" : "network" +}, +{ + "name" : "instance", + "use_implicit_links" : true, + "link_types" : [ + "instance-network" + ], + "environment" : "implicit-links-ex", + "focal_point_type" : "instance" } ] diff --git a/app/install/db/constants.json b/app/install/db/constants.json index 6912eeb..8ea89e9 100644 --- a/app/install/db/constants.json +++ b/app/install/db/constants.json @@ -58,6 +58,27 @@ ], "name" : "log_levels" }, +{ + "data" : [ + { + "label" : "OpenStack", + "value" : "OpenStack" + }, + { + "label" : "Kubernetes", + "value" : "Kubernetes" + }, + { + "label" : "VMware", + "value" : "VMware" + }, + { + "label" : "Bare-metal", + "value" : "Bare-metal" + } + ], + "name" : "environment_types" +}, { "data" : [ { @@ -530,6 +551,10 @@ "label" : "10239" }, { + "label" : "10307", + "value" : "10307" + }, + { "value" : "10918", "label" : "10918" }, @@ -727,6 +752,14 @@ { "label" : "switch", "value" : "switch" + }, + { + "value" : "namespace", + "label" : "namespace" + }, + { + "value" : "namespaces_folder", + "label" : "namespaces_folder" } ] }, diff --git a/app/install/db/environments_config.json b/app/install/db/environments_config.json index d7157e7..80bc6aa 100644 --- a/app/install/db/environments_config.json +++ b/app/install/db/environments_config.json @@ -37,7 +37,8 @@ "server_name" : "sensu_server", "env_type" : "production", "provision" : "None", - "name" : "Monitoring", + "name" : "Monitoring", + "install_monitoring_client": false, "ssh_port" : "20022", "rabbitmq_pass" : "dummy_pwd", "ssh_password" : "dummy_pwd", @@ -55,7 +56,7 @@ } ], "enable_monitoring" : true, - "name" : "DEMO-ENVIRONMENT-SCHEME", + "name" : "DEMO-OpenStack", "distribution" : "Mirantis", "distribution_version" : "8.0", "last_scanned" : "filled-by-scanning", @@ -74,6 +75,93 @@ "wNLeBJxNDyw8G7Ssg" ] }, - "type" : "environment" + "type" : "environment", + "environment_type" : "OpenStack" +}, +{ + "user" : "wNLeBJxNDyw8G7Ssg", + "name" : "DEMO-Kubernetes", + "last_scanned" : "filled-by-scanning", + "auth" : { + "view-env" : [ + "wNLeBJxNDyw8G7Ssg" + ], + "edit-env" : [ + "wNLeBJxNDyw8G7Ssg" + ] + }, + "type_drivers" : "vxlan", + "distribution_version" : "8.0", + "enable_monitoring" : true, + "operational" : "stopped", + "mechanism_drivers" : [ + "OVS" + ], + "type" : "environment", + "distribution" : "Mirantis", + "listen" : true, + "configuration" : [ + { + "user" : "adminuser", + "name" : "OpenStack", + "pwd" : "dummy_pwd", + "host" : "10.0.0.1", + "admin_token" : "dummy_token", + "port" : "5000" + }, + { + "host" : "10.56.20.78", + "name" : "Kubernetes", + "user" : "koren", + "token" : "baba-token-xyz", + "port" : "6443" + }, + { + "user" : "mysqluser", + "name" : "mysql", + "pwd" : "dummy_pwd", + "port" : "3307", + "host" : "10.0.0.1" + }, + { + "user" : "sshuser", + "name" : "CLI", + "pwd" : "dummy_pwd", + "host" : "10.0.0.1" + }, + { + "user" : "rabbitmquser", + "name" : "AMQP", + "pwd" : "dummy_pwd", + "port" : "5673", + "host" : "10.0.0.1" + }, + { + "name" : "Monitoring", + "install_monitoring_client": false, + "api_port" : 4567, + "ssh_port" : "20022", + "rabbitmq_pass" : "dummy_pwd", + "env_type" : "production", + "rabbitmq_port" : "5671", + "server_ip" : "10.0.0.1", + "config_folder" : "/local_dir/sensu_config", + "type" : "Sensu", + "provision" : "None", + "ssh_user" : "root", + "ssh_password" : "dummy_pwd", + "rabbitmq_user" : "sensu", + "server_name" : "sensu_server" + }, + { + "user" : "admin", + "name" : "ACI", + "pwd" : "dummy_pwd", + "host" : "10.1.1.104" + } + ], + "app_path" : "/home/scan/calipso_prod/app", + "scanned" : false, + "environment_type" : "Kubernetes" } ] diff --git a/app/install/db/monitoring_config_templates.json b/app/install/db/monitoring_config_templates.json index 9bddfa2..b5c47df 100644 --- a/app/install/db/monitoring_config_templates.json +++ b/app/install/db/monitoring_config_templates.json @@ -311,6 +311,34 @@ "type" : "client_check_link_vnic-vconnector.json"
},
{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vconnector_ovs.py {name}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vconnector_ovs.json"
+},
+{
"side" : "client",
"order" : "1",
"condition" : {
@@ -394,5 +422,28 @@ },
"monitoring_system" : "sensu",
"type" : "client_check_vservice.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "standalone" : true,
+ "interval" : 15,
+ "command" : "PYTHONPATH=/etc/sensu/plugins check_instance_communications.py {services_and_vnics}",
+ "handlers" : [
+ "file",
+ "osdna-monitor"
+ ],
+ "type" : "metric",
+ "subscribers" : [
+ "base"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_instance.json"
}
]
diff --git a/app/install/db/supported_environments.json b/app/install/db/supported_environments.json index c2c376b..baa3150 100644 --- a/app/install/db/supported_environments.json +++ b/app/install/db/supported_environments.json @@ -12,6 +12,21 @@ "monitoring" : true } }, + { + "environment" : { + "distribution_version" : [ + "10307" + ], + "distribution" : "Mercury", + "type_drivers" : "vlan", + "mechanism_drivers" : "OVS" + }, + "features" : { + "scanning" : true, + "monitoring" : false, + "listening" : true + } + }, { "environment" : { "distribution" : "Devstack", diff --git a/app/monitoring/checks/check_instance_communictions.py b/app/monitoring/checks/check_instance_communictions.py new file mode 100644 index 0000000..d3a94b7 --- /dev/null +++ b/app/monitoring/checks/check_instance_communictions.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### + +# find status of instance network +# For each instance vNIC - take the MAC address +# For each vService in the same network as the instance, +# use local_service_id attribute in the following command in the network node: +# "ip netns exec <local_service_id> arp -n" +# look for the instance vNIC's mac_address to appear in the response +# for each mac_address: +# - if Flag 'C' = 'Complete' - mark result OK for that instance, +# - 'I' = 'Incomplete' - mark as 'warn', +# - no mac_address mark as 'error' + +import sys +import subprocess + +from binary_converter import binary2str + + +arp_headers = ['Address', 'HWtype', 'HWaddress', 'Flags', 'Mask', 'Iface'] +arp_mac_pos = arp_headers.index('HWaddress') +arp_flags_pos = arp_headers.index('Flags') + + +def check_vnic_tuple(vnic_and_service: str): + tuple_parts = vnic_and_service.split(',') + local_service_id = tuple_parts[0] + mac_address = tuple_parts[1] + check_output = None + try: + netns_cmd = 'ip netns exec {} arp -n'.format(local_service_id) + check_output = 'MAC={}, local_service_id={}\n'\ + .format(mac_address, local_service_id) + netns_out = subprocess.check_output([netns_cmd], + stderr=subprocess.STDOUT, + shell=True) + netns_out = binary2str(netns_out) + check_output += '{}\n'.format(netns_out) + netns_lines = netns_out.splitlines() + if not netns_lines or \ + netns_lines[0].endswith('No such file or directory'): + check_rc = 2 + else: + mac_found = False + flags = None + for l in netns_lines: + line_parts = l.split() + line_mac = line_parts[arp_mac_pos] + if len(line_parts) > arp_mac_pos and line_mac == mac_address: + mac_found = True + flags = line_parts[arp_flags_pos] + break + if mac_found: + check_rc = 1 if flags == 'I' else 0 + else: + check_rc = 2 + except subprocess.CalledProcessError as e: + check_output = str(e) + check_rc = 2 + return check_rc, check_output + + +if len(sys.argv) < 2: + print('usage: ' + sys.argv[0] + + ' <vService local_service_id>,<MAC>[;<>,<>]...') + exit(1) + +rc = 0 +output = '' +vnics = str(sys.argv[1]).split(';') +for vnic_tuple in vnics: + tuple_ret, out = check_vnic_tuple(vnic_tuple) + rc = min(rc, tuple_ret) + output += out +print(output) +exit(rc) diff --git a/app/monitoring/checks/check_vconnector.py b/app/monitoring/checks/check_vconnector.py new file mode 100644 index 0000000..237a195 --- /dev/null +++ b/app/monitoring/checks/check_vconnector.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### + +# find status of vconnector +# vconnector object name defines name of bridge +# use "brctl showmacs <bridge>", return ERROR if 'No such device' is returned + +import sys +import subprocess + +from binary_converter import binary2str + + +if len(sys.argv) < 2: + print('usage: ' + sys.argv[0] + ' <bridge>') + exit(1) +bridge_name = str(sys.argv[1]) + +rc = 0 + +cmd = None +out = '' +try: + cmd = "brctl showmacs {}".format(bridge_name) + out = subprocess.check_output([cmd], + stderr=subprocess.STDOUT, + shell=True) + out = binary2str(out) + lines = out.splitlines() + if not lines or lines[0].endswith('No such device'): + rc = 2 + else: + print(out) +except subprocess.CalledProcessError as e: + rc = 2 + out = str(e) + +if rc != 0: + print('Failed to find vConnector {}:\n{}\n' + .format(bridge_name, out)) + +exit(rc) diff --git a/app/monitoring/handlers/handle_vconnector.py b/app/monitoring/handlers/handle_vconnector.py new file mode 100644 index 0000000..85ee05f --- /dev/null +++ b/app/monitoring/handlers/handle_vconnector.py @@ -0,0 +1,28 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +# handle monitoring event for pNIC objects + +from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler +from utils.special_char_converter import SpecialCharConverter + + +class HandleVconnector(MonitoringCheckHandler): + + def handle(self, obj_id, check_result): + object_id = obj_id[:obj_id.index('-')] + mac = obj_id[obj_id.index('-')+1:] + converter = SpecialCharConverter() + mac_address = converter.decode_special_characters(mac) + object_id += '-' + mac_address + doc = self.doc_by_id(object_id) + if not doc: + return 1 + self.keep_result(doc, check_result) + return check_result['status'] diff --git a/app/monitoring/handlers/monitor.py b/app/monitoring/handlers/monitor.py index 9caed74..2495110 100755 --- a/app/monitoring/handlers/monitor.py +++ b/app/monitoring/handlers/monitor.py @@ -12,11 +12,15 @@ # handle monitoring events import argparse +import datetime import json import sys +from discover.configuration import Configuration +from monitoring.handlers.monitoring_check_handler import MonitoringCheckHandler from utils.inventory_mgr import InventoryMgr from utils.mongo_access import MongoAccess +from utils.special_char_converter import SpecialCharConverter from utils.util import ClassResolver @@ -32,7 +36,9 @@ class Monitor: MongoAccess.set_config_file(self.args.mongo_config) self.inv = InventoryMgr() self.inv.set_collections(self.args.inventory) + self.configuration = Configuration() self.input_text = None + self.converter = SpecialCharConverter() def get_args(self): parser = argparse.ArgumentParser() @@ -125,13 +131,83 @@ class Monitor: return handler def get_handler(self, check_type, obj_type): - basic_handling_types = ['vedge', 'vservice'] + basic_handling_types = ['instance', 'vedge', 'vservice', 'vconnector'] if obj_type not in basic_handling_types: return self.get_handler_by_type(check_type, obj_type) from monitoring.handlers.basic_check_handler \ import BasicCheckHandler return BasicCheckHandler(self.args) + def check_link_interdependency_for(self, + object_id: str, + from_type: str=None, + to_type: str=None): + if from_type is not None and to_type is not None or \ + from_type is None and to_type is None: + raise ValueError('check_link_interdependency: ' + 'supply one of from_type/to_type') + obj_id = self.converter.decode_special_characters(object_id) + obj = self.inv.get_by_id(environment=self.args.env, item_id=obj_id) + if not obj: + self.inv.log.error('check_link_interdependency: ' + 'failed to find object with ID: {}' + .format(object_id)) + return + if 'status' not in obj: + return + id_attr = 'source_id' if from_type is None else 'target_id' + link_type = '{}-{}'.format( + from_type if from_type is not None else obj['type'], + to_type if to_type is not None else obj['type']) + condition = { + 'environment': self.args.env, + 'link_type': link_type, + id_attr: obj_id + } + link = self.inv.find_one(search=condition, collection='links') + if not link: + self.inv.log.error('check_link_interdependency: ' + 'failed to find {} link with {}: {}' + .format(link_type, id_attr, obj_id)) + return + other_id_attr = '{}_id' \ + .format('source' if from_type is not None else 'target') + other_obj = self.inv.get_by_id(environment=self.args.env, + item_id=link[other_id_attr]) + if not other_obj: + self.inv.log.error('check_link_interdependency: ' + 'failed to find {} with ID: {} (link type: {})' + .format(other_id_attr, link[other_id_attr], + link_type)) + return + if 'status' not in other_obj: + return + status = 'Warning' + if obj['status'] == 'OK' and other_obj['status'] == 'OK': + status = 'OK' + elif obj['status'] == 'OK' and other_obj['status'] == 'OK': + status = 'OK' + link['status'] = status + time_format = MonitoringCheckHandler.TIME_FORMAT + timestamp1 = obj['status_timestamp'] + t1 = datetime.datetime.strptime(timestamp1, time_format) + timestamp2 = other_obj['status_timestamp'] + t2 = datetime.datetime.strptime(timestamp2, time_format) + timestamp = max(t1, t2) + link['status_timestamp'] = datetime.datetime.strftime(timestamp, + time_format) + self.inv.set(link, self.inv.collections['links']) + + def check_link_interdependency(self, object_id: str, object_type: str): + conf = self.configuration.get_env_config() + if 'OVS' in conf['mechanism_drivers']: + if object_type == 'vedge': + self.check_link_interdependency_for(object_id, + to_type='host_pnic') + if object_type == 'host_pnic': + self.check_link_interdependency_for(object_id, + from_type='vedge') + def process_input(self): check_result_full = json.loads(self.input_text) check_client = check_result_full['client'] @@ -142,14 +218,19 @@ class Monitor: monitor.find_object_type_and_id(name) if 'environment' in check_client: self.args.env = check_client['environment'] + else: + raise ValueError('Check client should contain environment name') + self.configuration.use_env(self.args.env) check_handler = self.get_handler(check_type, object_type) if check_handler: check_handler.handle(object_id, check_result) + self.check_link_interdependency(object_id, object_type) def process_check_result(self): self.read_input() self.process_input() + monitor = Monitor() monitor.process_check_result() diff --git a/app/monitoring/handlers/monitoring_check_handler.py b/app/monitoring/handlers/monitoring_check_handler.py index 1436a46..c1f70fb 100644 --- a/app/monitoring/handlers/monitoring_check_handler.py +++ b/app/monitoring/handlers/monitoring_check_handler.py @@ -21,13 +21,13 @@ from utils.logging.full_logger import FullLogger from utils.special_char_converter import SpecialCharConverter from utils.string_utils import stringify_datetime -TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z' SOURCE_SYSTEM = 'Sensu' ERROR_LEVEL = ['info', 'warn', 'error'] class MonitoringCheckHandler(SpecialCharConverter): STATUS_LABEL = ['OK', 'Warning', 'Error'] + TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z' def __init__(self, args): super().__init__() @@ -61,7 +61,7 @@ class MonitoringCheckHandler(SpecialCharConverter): else status if status_text: doc['status_text'] = status_text - doc['status_timestamp'] = strftime(TIME_FORMAT, timestamp) + doc['status_timestamp'] = strftime(self.TIME_FORMAT, timestamp) if 'link_type' in doc: self.inv.write_link(doc) else: diff --git a/app/monitoring/setup/monitoring_check_handler.py b/app/monitoring/setup/monitoring_check_handler.py index c453439..d1b863d 100644 --- a/app/monitoring/setup/monitoring_check_handler.py +++ b/app/monitoring/setup/monitoring_check_handler.py @@ -8,7 +8,6 @@ # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### from monitoring.setup.monitoring_handler import MonitoringHandler -from utils.inventory_mgr import InventoryMgr from utils.special_char_converter import SpecialCharConverter @@ -28,14 +27,13 @@ class MonitoringCheckHandler(MonitoringHandler, SpecialCharConverter): type_str = values['check_type'] if 'check_type' in values else \ (o['type'] if 'type' in o else 'link_' + o['link_type']) file_type = 'client_check_' + type_str + '.json' - host = o['host'] + host = values['host'] if 'host' in values else o['host'] sub_dir = '/host/' + host content = self.prepare_config_file( file_type, {'side': 'client', 'type': file_type}) # need to put this content inside client.json file client_file = 'client.json' - host = o['host'] client_file_content = self.get_config_from_db(host, client_file) # merge checks attribute from current content into client.json checks = client_file_content['config']['checks'] \ @@ -53,3 +51,14 @@ class MonitoringCheckHandler(MonitoringHandler, SpecialCharConverter): } content = client_file_content self.write_config_file(client_file, sub_dir, host, content) + + def get_check_from_db(self, o, postfix=''): + client_config = self.get_config_from_db(o. get('host', ''), + 'client.json') + if not client_config: + return {} + checks = client_config.get('config', {}).get('checks', {}) + objid = self.encode_special_characters(o.get('id', '')) + object_check_id = '{}_{}{}'.format(o.get('type'), objid, postfix) + check = checks.get(object_check_id, {}) + return check diff --git a/app/monitoring/setup/monitoring_host.py b/app/monitoring/setup/monitoring_host.py index 9450cf6..0b9f420 100644 --- a/app/monitoring/setup/monitoring_host.py +++ b/app/monitoring/setup/monitoring_host.py @@ -12,6 +12,7 @@ import os from os.path import join, sep from monitoring.setup.monitoring_handler import MonitoringHandler +from monitoring.setup.sensu_client_installer import SensuClientInstaller RABBITMQ_CONFIG_FILE = 'rabbitmq.json' RABBITMQ_CONFIG_ATTR = 'rabbitmq' @@ -27,13 +28,14 @@ class MonitoringHost(MonitoringHandler): # add monitoring setup for remote host def create_setup(self, o): + host_id = o.get('host', '') + self.install_sensu_on_host(host_id) sensu_host_files = [ 'transport.json', 'rabbitmq.json', 'client.json' ] server_ip = self.env_monitoring_config['server_ip'] - host_id = o['host'] sub_dir = join('/host', host_id) config = copy.copy(self.env_monitoring_config) env_name = self.configuration.env_name @@ -88,3 +90,10 @@ class MonitoringHost(MonitoringHandler): # this configuration requires SSL # keep the path of the files for later use self.fetch_ssl_files.append(path) + + def install_sensu_on_host(self, host_id): + auto_install = self.env_monitoring_config \ + .get('install_monitoring_client', False) + if auto_install: + installer = SensuClientInstaller(self.env, host_id) + installer.install() diff --git a/app/monitoring/setup/monitoring_instance.py b/app/monitoring/setup/monitoring_instance.py new file mode 100644 index 0000000..b376441 --- /dev/null +++ b/app/monitoring/setup/monitoring_instance.py @@ -0,0 +1,67 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject + + +class MonitoringInstance(MonitoringSimpleObject): + + def __init__(self, env): + super().__init__(env) + + # monitoring setup for instance can only be done after vNIC is found + # and network for vNIC is set, so the first call will not do anything + def create_setup(self, instance: dict): + vnics = self.inv.find_items({ + 'environment': self.get_env(), + 'type': 'vnic', + 'vnic_type': 'instance_vnic', + 'id_path': {'$regex': '^{}/'.format(instance['id_path'])} + }) + for vnic in vnics: + self.add_instance_communication_monitoring(instance, vnic) + + # for instance we keep list of instance vNICs and services to use in call + # to check_instance_communications.py + # add this vNIC to the list with the corresponding + def add_instance_communication_monitoring(self, instance: dict, vnic: dict): + service = self.get_service_for_vnic(vnic) + if not service: + return + check = self.get_check_from_db(instance) + services_and_vnics = check.get('command', '') + if services_and_vnics: + services_and_vnics = \ + services_and_vnics[services_and_vnics.index('.py')+4:] + services_and_vnics_list = \ + services_and_vnics.split(';') if services_and_vnics \ + else [] + service_and_vnic = '{},{}'.format(service.get('local_service_id', ''), + vnic.get('id')) + if service_and_vnic in services_and_vnics_list: + return # we already have this tuple define + services_and_vnics_list.append(service_and_vnic) + values = { + 'objtype': 'instance', + 'objid': self.encode_special_characters(instance['id']), + 'host': service['host'], + 'services_and_vnics': ';'.join(services_and_vnics_list) + } + self.create_monitoring_for_object(instance, values) + + def get_service_for_vnic(self, vnic: dict) -> dict: + services = self.inv.find_items({'environment': self.get_env(), + 'type': 'vservice', + 'network': vnic.get('network', '')}) + if not services: + return {} + dhcp = next(s for s in services if s.get('service_type') == 'dhcp') + if dhcp: + return dhcp # If we have both DHCP and router, return the DHCP + return services[0] # currently only DHCP and router services diff --git a/app/monitoring/setup/monitoring_setup_manager.py b/app/monitoring/setup/monitoring_setup_manager.py index bc4fe01..8b7693a 100644 --- a/app/monitoring/setup/monitoring_setup_manager.py +++ b/app/monitoring/setup/monitoring_setup_manager.py @@ -11,12 +11,14 @@ from monitoring.setup.monitoring_handler import MonitoringHandler from monitoring.setup.monitoring_host import MonitoringHost +from monitoring.setup.monitoring_instance import MonitoringInstance from monitoring.setup.monitoring_link_vnic_vconnector \ import MonitoringLinkVnicVconnector from monitoring.setup.monitoring_pnic import MonitoringPnic from monitoring.setup.monitoring_otep import MonitoringOtep from monitoring.setup.monitoring_vedge import MonitoringVedge from monitoring.setup.monitoring_vnic import MonitoringVnic +from monitoring.setup.monitoring_vconnector import MonitoringVconnector from monitoring.setup.monitoring_vservice import MonitoringVservice @@ -31,7 +33,9 @@ class MonitoringSetupManager(MonitoringHandler): "otep": MonitoringOtep(env), "vedge": MonitoringVedge(env), "host_pnic": MonitoringPnic(env), + "instance": MonitoringInstance(env), "vnic": MonitoringVnic(env), + "vconnector": MonitoringVconnector(env), "vservice": MonitoringVservice(env), "vnic-vconnector": MonitoringLinkVnicVconnector(env)} diff --git a/app/monitoring/setup/monitoring_vconnector.py b/app/monitoring/setup/monitoring_vconnector.py new file mode 100644 index 0000000..9ddc6af --- /dev/null +++ b/app/monitoring/setup/monitoring_vconnector.py @@ -0,0 +1,24 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +from monitoring.setup.monitoring_simple_object import MonitoringSimpleObject + + +class MonitoringVconnector(MonitoringSimpleObject): + + # add monitoring setup for remote host + def create_setup(self, o): + type = 'vconnector' + env_config = self.configuration.get_env_config() + vpp_or_ovs = 'vpp' if 'VPP' in env_config['mechanism_drivers'] \ + else 'ovs' + type_str = '{}_{}'.format(type, vpp_or_ovs) + self.setup(type, o, values={'check_type': type_str, + 'name': o['name']}) + diff --git a/app/monitoring/setup/sensu_client_installer.py b/app/monitoring/setup/sensu_client_installer.py new file mode 100644 index 0000000..72a8bbb --- /dev/null +++ b/app/monitoring/setup/sensu_client_installer.py @@ -0,0 +1,158 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +import os.path +from pkg_resources import parse_version + +from monitoring.setup.monitoring_handler import MonitoringHandler +from utils.inventory_mgr import InventoryMgr + + +class SensuClientInstaller(MonitoringHandler): + + UBUNTU = 'ubuntu' + CENTOS = 'centos' + + INSTALL_CMD = { + UBUNTU: 'dpkg -i {}', + CENTOS: 'rpm -i {}' + } + PERMISSIONS_CMD = { + UBUNTU: '', + CENTOS: 'usermod -aG wheel sensu' + } + SUDOERS_FILE = '/etc/sudoers' + + available_downloads = {} + + def __init__(self, env: str, host_id: str): + super().__init__(env) + self.cli_ssh = self.get_ssh(host_id) + self.inv = InventoryMgr() + self.host = self.inv.get_by_id(env, host_id) + self.server = self.env_monitoring_config.get('server_ip') + self.server_cli_ssh = self.get_ssh(self.server) + self.ubuntu_dist = None + self.required_package = None + + def install(self): + pkg_to_install = self.get_pkg_to_install() + if not pkg_to_install: + return + try: + self.fetch_package(pkg_to_install) + self.install_package(pkg_to_install) + self.set_permissions() + except SystemError as e: + self.log.error('Sensu install on host {} failed: {}' + .format(self.host, str(e))) + return + + @staticmethod + def get_attr_from_output(output_lines: list, attr: str) -> str: + matches = [l for l in output_lines if l.startswith(attr)] + if not matches: + return '' + line = matches[0] + return SensuClientInstaller.get_attr_from_output_line(line) + + @staticmethod + def get_attr_from_output_line(output_line: str): + val = output_line[output_line.index(':')+1:].strip() + return val + + INSTALLED = 'Installed: ' + CANDIDATE = 'Candidate: ' + SENSU_DIR = '/opt/sensu' + SENSU_PKG_DIR = '/etc/sensu/pkg' + SENSU_PKG_DIR_LOCAL = '/tmp/sensu_pkg' + SENSU_VERSION_FILE = '/opt/sensu/version-manifest.txt' + + def find_available_downloads(self): + ls_output = self.server_cli_ssh.exec('ls -R {}' + .format(self.SENSU_PKG_DIR)) + ls_lines = ls_output.splitlines() + last_target_dir = None + for line in ls_lines: + if line[-4:] in ['/32:', '/64:']: + last_target_dir = line.replace(self.SENSU_PKG_DIR, '') + continue + elif last_target_dir: + target_dir = last_target_dir.strip(os.path.sep).strip(':') + self.available_downloads[target_dir] = line + last_target_dir = None + else: + last_target_dir = None + + def find_available_package(self, os_details: dict): + if not self.available_downloads: + self.find_available_downloads() + distribution = os_details['ID'] + version = os_details['version'].split()[-2].lower() + arch = os_details['architecure'][-2:] + download_dir = os.path.join(distribution, version, arch) + download_file = self.available_downloads.get(download_dir) + full_path = '' if not download_file \ + else os.path.join(self.SENSU_PKG_DIR, download_dir, download_file) + return download_file, full_path + + @staticmethod + def find_available_version(download_file: str) -> str: + ver = download_file.replace('sensu', '').strip('-_') + ver = ver[:ver.index('-')] + return ver + + def get_pkg_to_install(self) -> str: + if self.provision == self.provision_levels['none']: + return '' + if not self.host: + return '' + supported_os = [self.UBUNTU, self.CENTOS] + distribution = self.host['OS']['ID'] + if distribution not in [self.UBUNTU, self.CENTOS]: + self.log.error('Sensu client auto-install only supported for: {}' + .format(', '.join(supported_os))) + return '' + cmd = 'if [ -d {} ]; then head -1 {} | sed "s/sensu //"; fi' \ + .format(self.SENSU_DIR, self.SENSU_VERSION_FILE) + installed_version = self.cli_ssh.exec(cmd).strip() + os_details = self.host['OS'] + available_pkg, pkg_path = self.find_available_package(os_details) + available_version = self.find_available_version(available_pkg) + if parse_version(available_version) <= parse_version(installed_version): + return '' + return pkg_path + + def get_local_path(self, pkg_to_install: str): + return os.path.join(self.SENSU_PKG_DIR_LOCAL, + os.path.basename(pkg_to_install)) + + def fetch_package(self, pkg_to_install: str): + self.make_directory(self.SENSU_PKG_DIR_LOCAL) + self.get_file(self.server, pkg_to_install, + self.get_local_path(pkg_to_install)) + local_path = self.get_local_path(pkg_to_install) + self.copy_to_remote_host(self.host['host'], + local_path=local_path, + remote_path=local_path) + + def install_package(self, pkg_to_install): + local_path = self.get_local_path(pkg_to_install) + install_cmd = self.INSTALL_CMD[self.host['OS']['ID']] + self.cli_ssh.exec(install_cmd.format(local_path)) + + def set_permissions(self): + cmd = self.PERMISSIONS_CMD[self.host['OS']['ID']] + if cmd: + self.cli_ssh.exec(cmd) + # add to sudoers file + sudoer_permission = 'sensu ALL=(ALL) NOPASSWD: ALL' + sudoer_cmd = 'grep --silent -w sensu {} || echo "{}" >> {}'\ + .format(self.SUDOERS_FILE, sudoer_permission, self.SUDOERS_FILE) + self.cli_ssh.exec(sudoer_cmd) diff --git a/app/test/api/responders_test/resource/test_clique_types.py b/app/test/api/responders_test/resource/test_clique_types.py index f5e331e..5e52cea 100644 --- a/app/test/api/responders_test/resource/test_clique_types.py +++ b/app/test/api/responders_test/resource/test_clique_types.py @@ -17,10 +17,17 @@ from unittest.mock import patch class TestCliqueTypes(TestBase): - def test_get_clique_types_list_without_env_name(self): - self.validate_get_request(clique_types.URL, - params={}, - expected_code=base.BAD_REQUEST_CODE) + @patch(base.RESPONDER_BASE_READ) + def test_get_all_clique_types_list(self, read): + self.validate_get_request( + clique_types.URL, + params={}, + mocks={ + read: clique_types.CLIQUE_TYPES + }, + expected_code=base.SUCCESSFUL_CODE, + expected_response=clique_types.CLIQUE_TYPES_RESPONSE + ) def test_get_clique_types_with_invalid_filter(self): self.validate_get_request(clique_types.URL, @@ -53,6 +60,28 @@ class TestCliqueTypes(TestBase): expected_code=base.SUCCESSFUL_CODE ) + def test_get_clique_type_with_insufficient_configuration(self): + self.validate_get_request( + clique_types.URL, + params={ + "distribution_version": base.CORRECT_DIST_VER, + }, + expected_code=base.BAD_REQUEST_CODE + ) + + @patch(base.RESPONDER_BASE_READ) + def test_get_clique_type_with_correct_configuration(self, read): + self.validate_get_request( + clique_types.URL, + params=clique_types.TEST_CONFIGURATION, + mocks={ + read: clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION + }, + expected_response=clique_types. + CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE, + expected_code=base.SUCCESSFUL_CODE + ) + def test_get_clique_types_list_with_wrong_focal_point_type(self): self.validate_get_request(clique_types.URL, params={ @@ -204,9 +233,53 @@ class TestCliqueTypes(TestBase): body=json.dumps(clique_types.NON_DICT_CLIQUE_TYPE), expected_code=base.BAD_REQUEST_CODE) - def test_post_clique_type_without_env_name(self): + @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME) + def test_post_clique_type_with_reserved_env_name(self, check_env_name): + self.validate_post_request( + clique_types.URL, + mocks={ + check_env_name: True + }, + body=json.dumps(clique_types.CLIQUE_TYPE_WITH_RESERVED_NAME), + expected_code=base.BAD_REQUEST_CODE + ) + + def test_post_clique_type_without_env_name_and_configuration(self): + self.validate_post_request( + clique_types.URL, + body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_ENV_NAME_AND_CONF), + expected_code=base.BAD_REQUEST_CODE + ) + + def test_post_clique_type_with_both_env_name_and_configuration(self): + self.validate_post_request( + clique_types.URL, + body=json.dumps( + clique_types.CLIQUE_TYPE_WITH_BOTH_ENV_AND_CONF), + expected_code=base.BAD_REQUEST_CODE + ) + + @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME) + def test_post_clique_type_with_insufficient_configuration(self, check_env_name): + self.validate_post_request( + clique_types.URL, + mocks={ + check_env_name: True + }, + body=json.dumps(clique_types.CLIQUE_TYPE_WITH_INSUFFICIENT_CONF), + expected_code=base.BAD_REQUEST_CODE + ) + + @patch(base.RESPONDER_BASE_READ) + def test_post_clique_type_with_duplicate_configuration(self, read): + data = clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION[0] + resp = clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE + test_data = self.get_updated_data(data, deleted_keys=['id']) self.validate_post_request(clique_types.URL, - body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_ENVIRONMENT), + body=json.dumps(test_data), + mocks={ + read: resp, + }, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME) @@ -231,6 +304,17 @@ class TestCliqueTypes(TestBase): CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE), expected_code=base.BAD_REQUEST_CODE) + @patch(base.RESPONDER_BASE_READ) + def test_post_clique_type_with_duplicate_focal_point_type(self, read): + test_data = self.get_updated_data(clique_types.CLIQUE_TYPE, + updates={'name': 'test-name'}) + self.validate_post_request(clique_types.URL, + body=json.dumps(test_data), + mocks={ + read: [clique_types.CLIQUE_TYPE], + }, + expected_code=base.BAD_REQUEST_CODE) + def test_post_clique_type_without_link_types(self): self.validate_post_request(clique_types.URL, body=json.dumps( @@ -255,6 +339,18 @@ class TestCliqueTypes(TestBase): body=json.dumps(clique_types.CLIQUE_TYPE_WITHOUT_NAME), expected_code=base.BAD_REQUEST_CODE) + def test_post_clique_type_with_wrong_mechanism_drivers(self): + self.validate_post_request(clique_types.URL, + body=json.dumps(clique_types. + CLIQUE_TYPE_WITH_WRONG_MECH_DRIVERS), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_clique_type_with_wrong_type_drivers(self): + self.validate_post_request(clique_types.URL, + body=json.dumps(clique_types. + CLIQUE_TYPE_WITH_WRONG_TYPE_DRIVERS), + expected_code=base.BAD_REQUEST_CODE) + @patch(base.RESPONDER_BASE_CHECK_ENVIRONMENT_NAME) @patch(base.RESPONDER_BASE_WRITE) def test_post_clique_type(self, write, check_environment_name): diff --git a/app/test/api/responders_test/resource/test_environment_configs.py b/app/test/api/responders_test/resource/test_environment_configs.py index 6356f06..4405f2b 100644 --- a/app/test/api/responders_test/resource/test_environment_configs.py +++ b/app/test/api/responders_test/resource/test_environment_configs.py @@ -9,7 +9,9 @@ ############################################################################### import json +from api.responders.resource.environment_configs import EnvironmentConfigs from test.api.responders_test.test_data import base +from test.api.responders_test.test_data.base import CONSTANTS_BY_NAMES from test.api.test_base import TestBase from test.api.responders_test.test_data import environment_configs from utils.constants import EnvironmentFeatures @@ -23,35 +25,25 @@ class TestEnvironmentConfigs(TestBase): def test_get_environment_configs_list(self, read): self.validate_get_request(environment_configs.URL, params={}, - mocks={ - read: environment_configs.ENV_CONFIGS - }, + mocks={read: environment_configs.ENV_CONFIGS}, expected_code=base.SUCCESSFUL_CODE, expected_response=environment_configs. - ENV_CONFIGS_RESPONSE - ) + ENV_CONFIGS_RESPONSE) def test_get_environment_configs_list_with_invalid_filters(self): self.validate_get_request(environment_configs.URL, - params={ - "unknown": "unknown" - }, + params={"unknown": "unknown"}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_name(self, read): + mocks = {read: environment_configs.ENV_CONFIGS_WITH_SPECIFIC_NAME} self.validate_get_request(environment_configs.URL, - params={ - "name": environment_configs.NAME - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_NAME - }, + params={"name": environment_configs.NAME}, + mocks=mocks, expected_code=base.SUCCESSFUL_CODE, expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_NAME[0] - ) + ENV_CONFIGS_WITH_SPECIFIC_NAME[0]) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_unknown_name(self, read): @@ -82,193 +74,151 @@ class TestEnvironmentConfigs(TestBase): @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_distribution(self, read): + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION + config_response = \ + environment_configs.ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE self.validate_get_request(environment_configs.URL, params={ "distribution": environment_configs. CORRECT_DISTRIBUTION }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION - }, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION_RESPONSE) + expected_response=config_response) def test_get_environment_configs_list_with_wrong_mechanism_driver(self): + config = environment_configs.WRONG_MECHANISM_DRIVER self.validate_get_request(environment_configs.URL, - params={ - "mechanism_drivers": - environment_configs.WRONG_MECHANISM_DRIVER - }, + params={"mechanism_drivers": config}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_mechanism_driver(self, read): + mechanism = environment_configs.CORRECT_MECHANISM_DRIVER + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER + config_response = environment_configs.\ + ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "mechanism_drivers": - environment_configs.CORRECT_MECHANISM_DRIVER - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER - }, + params={"mechanism_drivers": mechanism}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_MECHANISM_DRIVER_RESPONSE - ) + expected_response=config_response) def test_get_environment_configs_list_with_wrong_type_driver(self): + driver = environment_configs.WRONG_TYPE_DRIVER self.validate_get_request(environment_configs.URL, - params={ - "type_drivers": - environment_configs.WRONG_TYPE_DRIVER - }, + params={"type_drivers": driver}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_type_driver(self, read): + driver = environment_configs.CORRECT_TYPE_DRIVER + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER + config_response = environment_configs.\ + ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "type_drivers": - environment_configs.CORRECT_TYPE_DRIVER - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER - }, + params={"type_drivers": driver}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_TYPE_DRIVER_RESPONSE + expected_response=config_response ) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_user(self, read): + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_USER + config_response = \ + environment_configs.ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "user": environment_configs.USER - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_USER - }, + params={"user": environment_configs.USER}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_USER_RESPONSE - ) + expected_response=config_response) def test_get_environment_configs_list_with_non_bool_listen(self): self.validate_get_request(environment_configs.URL, - params={ - "listen": environment_configs.NON_BOOL_LISTEN - }, + params={"listen": environment_configs. + NON_BOOL_LISTEN}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_bool_listen(self, read): + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_LISTEN + config_response = \ + environment_configs.ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "listen": environment_configs.BOOL_LISTEN - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_LISTEN - }, + params={"listen": environment_configs. + BOOL_LISTEN}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_LISTEN_RESPONSE - ) + expected_response=config_response) def test_get_environment_configs_list_with_non_bool_scanned(self): self.validate_get_request(environment_configs.URL, - params={ - "scanned": environment_configs. - NON_BOOL_SCANNED - }, + params={"scanned": environment_configs. + NON_BOOL_SCANNED}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_bool_scanned(self, read): + config = environment_configs.ENV_CONFIGS_WITH_SPECIFIC_SCANNED + config_response = \ + environment_configs.ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "scanned": environment_configs.BOOL_SCANNED - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_SCANNED - }, + params={"scanned": environment_configs. + BOOL_SCANNED}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_SCANNED_RESPONSE + expected_response=config_response ) - def test_get_environment_configs_list_with_non_bool_monitoring_setup_done(self): + def test_get_env_configs_list_with_non_bool_monitoring_setup_done(self): self.validate_get_request(environment_configs.URL, - params={ - "listen": environment_configs. - NON_BOOL_MONITORING_SETUP_DONE - }, + params={"listen": environment_configs. + NON_BOOL_MONITORING_SETUP_DONE}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) - def test_get_environment_configs_list_with_bool_monitoring_setup_done(self, read): + def test_get_environment_configs_list_with_bool_monitoring_setup_done(self, + read): + config = environment_configs.\ + ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE + config_response = environment_configs.\ + ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "scanned": environment_configs. - BOOL_MONITORING_SETUP_DONE - }, - mocks={ - read: environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE - }, + params={"scanned": environment_configs. + BOOL_MONITORING_SETUP_DONE}, + mocks={read: config}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_WITH_SPECIFIC_MONITORING_SETUP_DONE_RESPONSE - ) + expected_response=config_response) def test_get_environment_configs_list_with_non_int_page(self): self.validate_get_request(environment_configs.URL, - params={ - "page": base.NON_INT_PAGE - }, + params={"page": base.NON_INT_PAGE}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_int_page(self, read): + config_response = environment_configs.ENV_CONFIGS_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "page": base.INT_PAGE - }, - mocks={ - read: environment_configs.ENV_CONFIGS - }, + params={"page": base.INT_PAGE}, + mocks={read: environment_configs.ENV_CONFIGS}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_RESPONSE - ) + expected_response=config_response) def test_get_environment_configs_list_with_non_int_page_size(self): self.validate_get_request(environment_configs.URL, - params={ - "page_size": base.NON_INT_PAGESIZE - }, + params={"page_size": base.NON_INT_PAGESIZE}, expected_code=base.BAD_REQUEST_CODE) @patch(base.RESPONDER_BASE_READ) def test_get_environment_configs_list_with_int_page_size(self, read): + config_response = environment_configs.ENV_CONFIGS_RESPONSE self.validate_get_request(environment_configs.URL, - params={ - "page_size": base.INT_PAGESIZE - }, - mocks={ - read: environment_configs.ENV_CONFIGS - }, + params={"page_size": base.INT_PAGESIZE}, + mocks={read: environment_configs.ENV_CONFIGS}, expected_code=base.SUCCESSFUL_CODE, - expected_response=environment_configs. - ENV_CONFIGS_RESPONSE - ) + expected_response=config_response) def test_post_environment_config_without_app_path(self): test_data = self.get_updated_data(environment_configs.ENV_CONFIG, @@ -292,8 +242,9 @@ class TestEnvironmentConfigs(TestBase): expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_distribution(self): + dist = environment_configs.WRONG_DISTRIBUTION test_data = self.get_updated_data(environment_configs.ENV_CONFIG, - updates={"distribution": environment_configs.WRONG_DISTRIBUTION}) + updates={"distribution": dist}) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), expected_code=base.BAD_REQUEST_CODE) @@ -306,8 +257,9 @@ class TestEnvironmentConfigs(TestBase): expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_listen(self): + listen_val = environment_configs.NON_BOOL_LISTEN test_data = self.get_updated_data(environment_configs.ENV_CONFIG, - updates={"listen": environment_configs.NON_BOOL_LISTEN}) + updates={"listen": listen_val}) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), expected_code=base.BAD_REQUEST_CODE) @@ -320,10 +272,10 @@ class TestEnvironmentConfigs(TestBase): expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_mechanism_driver(self): + mechanism = environment_configs.WRONG_MECHANISM_DRIVER test_data = self.get_updated_data(environment_configs.ENV_CONFIG, updates={ - "mechanism_drivers": - [environment_configs.WRONG_MECHANISM_DRIVER] + "mechanism_drivers": [mechanism] }) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), @@ -344,19 +296,17 @@ class TestEnvironmentConfigs(TestBase): expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_scanned(self): + scanned_val = environment_configs.NON_BOOL_SCANNED test_data = self.get_updated_data(environment_configs.ENV_CONFIG, - updates={ - "scanned": environment_configs.NON_BOOL_SCANNED - }) + updates={"scanned": scanned_val}) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_last_scanned(self): + scanned_val = base.WRONG_FORMAT_TIME test_data = self.get_updated_data(environment_configs.ENV_CONFIG, - updates={ - "last_scanned": base.WRONG_FORMAT_TIME - }) + updates={"last_scanned": scanned_val}) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), expected_code=base.BAD_REQUEST_CODE) @@ -376,16 +326,81 @@ class TestEnvironmentConfigs(TestBase): expected_code=base.BAD_REQUEST_CODE) def test_post_environment_config_with_wrong_type_drivers(self): + driver = environment_configs.WRONG_TYPE_DRIVER + test_data = self.get_updated_data(environment_configs.ENV_CONFIG, + updates={"type_drivers": [driver]}) + self.validate_post_request(environment_configs.URL, + body=json.dumps(test_data), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_environment_config_with_duplicate_configurations(self): + test_data = self.get_updated_data(environment_configs.ENV_CONFIG) + test_data["configuration"].append({ + "name": "OpenStack" + }) + self.validate_post_request(environment_configs.URL, + body=json.dumps(test_data), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_environment_config_with_empty_configuration(self): + test_data = self.get_updated_data(environment_configs.ENV_CONFIG) + test_data["configuration"].append({}) + self.validate_post_request(environment_configs.URL, + body=json.dumps(test_data), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_environment_config_with_unknown_configuration(self): + test_data = self.get_updated_data(environment_configs.ENV_CONFIG) + test_data["configuration"].append({ + "name": "Unknown configuration", + }) + self.validate_post_request(environment_configs.URL, + body=json.dumps(test_data), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_environment_config_without_required_configurations(self): + for env_type in CONSTANTS_BY_NAMES["environment_types"]: + required_conf_list = ( + EnvironmentConfigs.REQUIRED_CONFIGURATIONS_NAMES.get(env_type, + []) + ) + if required_conf_list: + test_data = \ + self.get_updated_data(environment_configs.ENV_CONFIG) + test_data['environment_type'] = env_type + test_data['configuration'] = [ + c + for c in test_data['configuration'] + if c['name'] != required_conf_list[0] + ] + + self.validate_post_request(environment_configs.URL, + body=json.dumps(test_data), + expected_code=base.BAD_REQUEST_CODE) + + def test_post_environment_config_with_incomplete_configuration(self): test_data = self.get_updated_data(environment_configs.ENV_CONFIG, updates={ - "type_drivers": [environment_configs.WRONG_TYPE_DRIVER] + "configuration": [{ + "host": "10.56.20.239", + "name": "mysql", + "user": "root" + }, { + "name": "OpenStack", + "host": "10.56.20.239", + }, { + "host": "10.56.20.239", + "name": "CLI", + "user": "root" + }] }) self.validate_post_request(environment_configs.URL, body=json.dumps(test_data), expected_code=base.BAD_REQUEST_CODE) - def mock_validate_env_config_with_supported_envs(self, scanning, - monitoring, listening): + @staticmethod + def mock_validate_env_config_with_supported_envs(scanning, monitoring, + listening): InventoryMgr.is_feature_supported_in_env = \ lambda self, matches, feature: { EnvironmentFeatures.SCANNING: scanning, @@ -396,11 +411,12 @@ class TestEnvironmentConfigs(TestBase): @patch(base.RESPONDER_BASE_WRITE) def test_post_environment_config(self, write): self.mock_validate_env_config_with_supported_envs(True, True, True) + post_body = json.dumps(environment_configs.ENV_CONFIG) self.validate_post_request(environment_configs.URL, mocks={ write: None }, - body=json.dumps(environment_configs.ENV_CONFIG), + body=post_body, expected_code=base.CREATED_CODE) def test_post_unsupported_environment_config(self): @@ -421,10 +437,11 @@ class TestEnvironmentConfigs(TestBase): "listening": False } ] + mock_validate = self.mock_validate_env_config_with_supported_envs + config = environment_configs.ENV_CONFIG for test_case in test_cases: - self.mock_validate_env_config_with_supported_envs(test_case["scanning"], - test_case["monitoring"], - test_case["listening"]) + mock_validate(test_case["scanning"], test_case["monitoring"], + test_case["listening"]) self.validate_post_request(environment_configs.URL, - body=json.dumps(environment_configs.ENV_CONFIG), + body=json.dumps(config), expected_code=base.BAD_REQUEST_CODE) diff --git a/app/test/api/responders_test/test_data/base.py b/app/test/api/responders_test/test_data/base.py index b99d5bb..6d2422a 100644 --- a/app/test/api/responders_test/test_data/base.py +++ b/app/test/api/responders_test/test_data/base.py @@ -16,14 +16,14 @@ UNAUTHORIZED_CODE = "401" CREATED_CODE = "201" ENV_NAME = "Mirantis-Liberty-API" -UNKNOWN_ENV = "Unkown-Environment" +UNKNOWN_ENV = "Unknown-Environment" NON_INT_PAGE = 1.4 INT_PAGE = 1 NON_INT_PAGESIZE = 2.4 INT_PAGESIZE = 2 WRONG_LINK_TYPE = "instance-host" -CORRECT_LINK_TYPE= "instance-vnic" +CORRECT_LINK_TYPE = "instance-vnic" WRONG_LINK_STATE = "wrong" CORRECT_LINK_STATE = "up" @@ -41,7 +41,7 @@ WRONG_TYPE_DRIVER = "wrong_type" CORRECT_TYPE_DRIVER = "local" WRONG_MECHANISM_DRIVER = "wrong-mechanism-dirver" -CORRECT_MECHANISM_DRIVER = "ovs" +CORRECT_MECHANISM_DRIVER = "OVS" WRONG_LOG_LEVEL = "wrong-log-level" CORRECT_LOG_LEVEL = "critical" @@ -71,16 +71,32 @@ NON_DICT_OBJ = "" CONSTANTS_BY_NAMES = { "link_types": [ "instance-vnic", - "otep-vconnector", - "otep-host_pnic", + "vnic-instance", + "vnic-vconnector", + "vconnector-vnic", + "vconnector-vedge", + "vedge-vconnector", + "vedge-host_pnic", + "host_pnic-vedge", "host_pnic-network", + "network-host_pnic", "vedge-otep", - "vnic-vconnector", + "otep-vedge", + "otep-vconnector", + "vconnector-otep", + "otep-host_pnic", + "host_pnic-otep", "vconnector-host_pnic", - "vconnector-vedge", + "host_pnic-vconnector", "vnic-vedge", - "vedge-host_pnic", - "vservice-vnic" + "vedge-vnic", + "vservice-vnic", + "vnic-vservice", + "switch_pnic-host_pnic", + "host_pnic-switch_pnic", + "switch_pnic-switch_pnic", + "switch_pnic-switch", + "switch-switch_pnic" ], "link_states": [ "up", @@ -117,9 +133,9 @@ CONSTANTS_BY_NAMES = { "flat" ], "mechanism_drivers": [ - "ovs", - "vpp", - "LinuxBridge", + "OVS", + "VPP", + "LXB", "Arista", "Nexus" ], @@ -155,6 +171,10 @@ CONSTANTS_BY_NAMES = { "Mirantis", "RDO" ], + "distribution_versions": [ + "8.0", + "9.0" + ], "environment_operational_status": [ "stopped", "running", @@ -168,6 +188,30 @@ CONSTANTS_BY_NAMES = { ], "environment_monitoring_types": [ "Sensu" + ], + "scans_statuses": [ + "draft", + "pending", + "running", + "completed", + "completed_with_errors", + "failed", + "aborted" + ], + "configuration_targets": [ + "AMQP", + "CLI", + "ACI", + "mysql", + "OpenStack", + "Monitoring", + "Kubernetes" + ], + "environment_types": [ + "OpenStack", + "Kubernetes", + "VMware", + "Bare-metal" ] } @@ -175,7 +219,8 @@ CONSTANTS_BY_NAMES = { RESPONDER_BASE_PATH = "api.responders.responder_base.ResponderBase" RESPONDER_BASE_GET_OBJECTS_LIST = RESPONDER_BASE_PATH + ".get_objects_list" RESPONDER_BASE_GET_OBJECT_BY_ID = RESPONDER_BASE_PATH + ".get_object_by_id" -RESPONDER_BASE_CHECK_ENVIRONMENT_NAME = RESPONDER_BASE_PATH + ".check_environment_name" +RESPONDER_BASE_CHECK_ENVIRONMENT_NAME = \ + RESPONDER_BASE_PATH + ".check_environment_name" RESPONDER_BASE_READ = RESPONDER_BASE_PATH + ".read" RESPONDER_BASE_WRITE = RESPONDER_BASE_PATH + ".write" RESPONDER_BASE_AGGREGATE = RESPONDER_BASE_PATH + ".aggregate" diff --git a/app/test/api/responders_test/test_data/clique_types.py b/app/test/api/responders_test/test_data/clique_types.py index ae962ce..0791bdf 100644 --- a/app/test/api/responders_test/test_data/clique_types.py +++ b/app/test/api/responders_test/test_data/clique_types.py @@ -8,13 +8,18 @@ # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### from test.api.responders_test.test_data import base - +from test.api.responders_test.test_data.base import WRONG_MECHANISM_DRIVER, \ + CORRECT_MECHANISM_DRIVER, CORRECT_TYPE_DRIVER, WRONG_TYPE_DRIVER, \ + CORRECT_DISTRIBUTION, CORRECT_DIST_VER URL = "/clique_types" WRONG_ID = base.WRONG_OBJECT_ID NONEXISTENT_ID = "58ca73ae3a8a836d10ff3b44" CORRECT_ID = base.CORRECT_OBJECT_ID +SAMPLE_IDS = ['58ca73ae3a8a836d10ff3b80', '58ca73ae3a8a836d10ff3b81'] + +RESERVED_ENV_NAME = 'ANY' WRONG_FOCAL_POINT_TYPE = base.WRONG_OBJECT_TYPE CORRECT_FOCAL_POINT_POINT_TYPE = base.CORRECT_OBJECT_TYPE @@ -23,25 +28,52 @@ WRONG_LINK_TYPE = base.WRONG_LINK_TYPE NONEXISTENT_LINK_TYPE = "otep-host_pnic" CORRECT_LINK_TYPE = base.CORRECT_LINK_TYPE +CLIQUE_TYPE = { + "environment": "Mirantis-Liberty-API", + "name": "instance_vconnector_clique", + "link_types": [ + "instance-vnic", + "vnic-vconnector" + ], + "focal_point_type": "instance" +} + +TEST_CONFIGURATION = { + "distribution": CORRECT_DISTRIBUTION, + "distribution_version": CORRECT_DIST_VER, + "mechanism_drivers": CORRECT_MECHANISM_DRIVER, + "type_drivers": CORRECT_TYPE_DRIVER +} + + +def get_payload(update: dict = None, delete: list = None): + payload = CLIQUE_TYPE.copy() + if update: + payload.update(update) + if delete: + for k in delete: + del payload[k] + return payload + + CLIQUE_TYPES_WITH_SPECIFIC_ID = [ - { - "environment": "Mirantis-Liberty-API", - "focal_point_type": "host_pnic", - "id": CORRECT_ID - } + get_payload(update={'id': CORRECT_ID}) +] + +CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION = [ + get_payload(update={'id': SAMPLE_IDS[0], + **TEST_CONFIGURATION}, + delete=['environment']) ] +CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE = { + "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION +} + CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE = [ - { - "environment": "Mirantis-Liberty-API", - "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE, - "id": "58ca73ae3a8a836d10ff3b80" - }, - { - "environment": "Mirantis-Liberty-API", - "focal_point_type": CORRECT_FOCAL_POINT_POINT_TYPE, - "id": "58ca73ae3a8a836d10ff3b81" - } + get_payload(update={'id': _id, + 'focal_point_type': CORRECT_FOCAL_POINT_POINT_TYPE}) + for _id in SAMPLE_IDS ] CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = { @@ -49,20 +81,9 @@ CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE_RESPONSE = { } CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE = [ - { - "environment": "Mirantis-Liberty-API", - "link_types": [ - CORRECT_LINK_TYPE - ], - "id": "58ca73ae3a8a836d10ff3b80" - }, - { - "environment": "Mirantis-Liberty-API", - "link_types": [ - CORRECT_LINK_TYPE - ], - "id": "58ca73ae3a8a836d10ff3b81" - } + get_payload(update={'id': _id, + 'link_types': [CORRECT_LINK_TYPE]}) + for _id in SAMPLE_IDS ] CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = { @@ -70,16 +91,7 @@ CLIQUE_TYPES_WITH_SPECIFIC_LINK_TYPE_RESPONSE = { } CLIQUE_TYPES = [ - { - "environment": "Mirantis-Liberty-API", - "focal_point_type": "vnic", - "id": "58ca73ae3a8a836d10ff3b80" - }, - { - "environment": "Mirantis-Liberty-API", - "focal_point_type": "vnic", - "id": "58ca73ae3a8a836d10ff3b81" - } + get_payload(update={'id': _id}) for _id in SAMPLE_IDS ] CLIQUE_TYPES_RESPONSE = { @@ -88,83 +100,48 @@ CLIQUE_TYPES_RESPONSE = { NON_DICT_CLIQUE_TYPE = base.NON_DICT_OBJ -CLIQUE_TYPE_WITHOUT_ENVIRONMENT = { - "name": "instance_vconnector_clique", - "link_types": [ - "instance-vnic", - "vnic-vconnector" - ], - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITH_RESERVED_NAME = get_payload( + update={'environment': RESERVED_ENV_NAME} +) -CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT = { - "environment": base.UNKNOWN_ENV, - "id": "589a3969761b0555a3ef6093", - "name": "instance_vconnector_clique", - "link_types": [ - "instance-vnic", - "vnic-vconnector" - ], - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITHOUT_ENV_NAME_AND_CONF = get_payload( + delete=['environment'] +) -CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "link_types": [ - "instance-vnic", - "vnic-vconnector" - ] -} +CLIQUE_TYPE_WITH_BOTH_ENV_AND_CONF = get_payload( + update=TEST_CONFIGURATION +) -CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "link_types": [ - "instance-vnic", - "vnic-vconnector" - ], - "focal_point_type": WRONG_FOCAL_POINT_TYPE -} +CLIQUE_TYPE_WITH_INSUFFICIENT_CONF = get_payload( + update={'distribution_version': CORRECT_DIST_VER} +) -CLIQUE_TYPE_WITHOUT_LINK_TYPES = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITH_UNKNOWN_ENVIRONMENT = get_payload( + update={'environment': base.UNKNOWN_ENV} +) -CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "link_types": "instance-vnic", - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITHOUT_FOCAL_POINT_TYPE = get_payload(delete=['focal_point_type']) -CLIQUE_TYPE_WITH_WRONG_LINK_TYPE = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "link_types": [ - WRONG_LINK_TYPE, - "vnic-vconnector" - ], - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITH_WRONG_FOCAL_POINT_TYPE = get_payload( + update={'focal_point_type': WRONG_FOCAL_POINT_TYPE} +) -CLIQUE_TYPE_WITHOUT_NAME = { - "environment": "Mirantis-Liberty-API", - "link_types": [ - "instance-vnic", - "vnic-vconnector", - ], - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITHOUT_LINK_TYPES = get_payload(delete=['link_types']) -CLIQUE_TYPE = { - "environment": "Mirantis-Liberty-API", - "name": "instance_vconnector_clique", - "link_types": [ - "instance-vnic", - "vnic-vconnector" - ], - "focal_point_type": "instance" -} +CLIQUE_TYPE_WITH_NON_LIST_LINK_TYPES = get_payload( + update={'link_types': "instance-vnic"} +) + +CLIQUE_TYPE_WITH_WRONG_LINK_TYPE = get_payload( + update={'link_types': [WRONG_LINK_TYPE, "vnic-vconnector"]} +) + +CLIQUE_TYPE_WITHOUT_NAME = get_payload(delete=['name']) + +CLIQUE_TYPE_WITH_WRONG_MECH_DRIVERS = get_payload( + update={'mechanism_drivers': WRONG_MECHANISM_DRIVER} +) + +CLIQUE_TYPE_WITH_WRONG_TYPE_DRIVERS = get_payload( + update={'type_drivers': WRONG_TYPE_DRIVER} +)
\ No newline at end of file diff --git a/app/test/api/responders_test/test_data/environment_configs.py b/app/test/api/responders_test/test_data/environment_configs.py index 4cea105..3e976ec 100644 --- a/app/test/api/responders_test/test_data/environment_configs.py +++ b/app/test/api/responders_test/test_data/environment_configs.py @@ -201,6 +201,7 @@ ENV_CONFIG = { "provision": "None", "env_type": "development", "name": "Monitoring", + "install_monitoring_client": True, "api_port": "4567", "rabbitmq_port": "5671", "rabbitmq_pass": "sensuaccess", @@ -218,12 +219,13 @@ ENV_CONFIG = { "last_scanned": "2017-03-16T11:14:54Z", "listen": True, "mechanism_drivers": [ - "ovs" + "OVS" ], "name": "Mirantis-Liberty", "operational": "running", "scanned": True, "type": "environment", "type_drivers": "vxlan", - "user": "WS7j8oTbWPf3LbNne" + "user": "WS7j8oTbWPf3LbNne", + "environment_type": "OpenStack" } diff --git a/app/test/api/test_base.py b/app/test/api/test_base.py index 33185ec..edc59ae 100644 --- a/app/test/api/test_base.py +++ b/app/test/api/test_base.py @@ -34,8 +34,10 @@ class TestBase(TestCase): self.original_auth_method = AuthenticationMiddleware.process_request AuthenticationMiddleware.process_request = mock_auth_method - ResponderBase.get_constants_by_name = MagicMock(side_effect= - lambda name: base.CONSTANTS_BY_NAMES[name]) + ResponderBase.get_constants_by_name = MagicMock( + side_effect=lambda name: base.CONSTANTS_BY_NAMES[name] + ) + # mock mongo access MongoAccess.mongo_connect = MagicMock() MongoAccess.db = MagicMock() @@ -47,8 +49,8 @@ class TestBase(TestCase): log_level = 'debug' self.app = App(log_level=log_level).get_app() - def validate_get_request(self, url, params={}, headers=None, mocks={}, - side_effects={}, + def validate_get_request(self, url, params=None, headers=None, mocks=None, + side_effects=None, expected_code=base.SUCCESSFUL_CODE, expected_response=None): self.validate_request("GET", url, params, headers, "", @@ -59,25 +61,27 @@ class TestBase(TestCase): def validate_request(self, action, url, params, headers, body, mocks, side_effects, expected_code, expected_response): - for mock_method, mock_data in mocks.items(): - mock_method.return_value = mock_data + if mocks: + for mock_method, mock_data in mocks.items(): + mock_method.return_value = mock_data - for mock_method, side_effect in side_effects.items(): - mock_method.side_effect = side_effect + if side_effects: + for mock_method, side_effect in side_effects.items(): + mock_method.side_effect = side_effect result = self.simulate_request(action, url, params=params, headers=headers, body=body) self.assertEqual(result.status, expected_code) if expected_response: self.assertEqual(result.json, expected_response) - def validate_post_request(self, url, headers={}, body="", mocks={}, - side_effects={}, + def validate_post_request(self, url, headers=None, body="", mocks=None, + side_effects=None, expected_code=base.CREATED_CODE, expected_response=None): self.validate_request("POST", url, {}, headers, body, mocks, side_effects, expected_code, expected_response) - def validate_delete_request(self, url, params={}, headers={}, mocks={}, - side_effects={}, + def validate_delete_request(self, url, params=None, headers=None, mocks=None, + side_effects=None, expected_code=base.SUCCESSFUL_CODE, expected_response=None): self.validate_request("DELETE", url, params, headers, "", mocks, side_effects, diff --git a/app/test/fetch/api_fetch/test_api_access.py b/app/test/fetch/api_fetch/test_api_access.py index 0effc0e..440b730 100644 --- a/app/test/fetch/api_fetch/test_api_access.py +++ b/app/test/fetch/api_fetch/test_api_access.py @@ -7,9 +7,9 @@ # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### -from unittest.mock import MagicMock, Mock - +import copy import requests +from unittest.mock import MagicMock, Mock from discover.fetchers.api.api_access import ApiAccess from test.fetch.api_fetch.test_data.api_access import * @@ -35,38 +35,45 @@ class TestApiAccess(TestFetch): def test_parse_illegal_time(self): time = self.api_access.parse_time(ILLEGAL_TIME) - self.assertEqual(time, None, "Can't get None when the time format is wrong") + self.assertEqual(time, None, + "Can't get None when the time format is wrong") def test_get_existing_token(self): self.api_access.tokens = VALID_TOKENS token = self.api_access.get_existing_token(PROJECT) - self.assertNotEqual(token, VALID_TOKENS[PROJECT], "Can't get existing token") + self.assertNotEqual(token, VALID_TOKENS[PROJECT], + "Can't get existing token") def test_get_nonexistent_token(self): self.api_access.tokens = EMPTY_TOKENS token = self.api_access.get_existing_token(TEST_PROJECT) - self.assertEqual(token, None, "Can't get None when the token doesn't " + - "exist in tokens") + self.assertEqual(token, None, + "Can't get None when the token doesn't exist " + "in tokens") def test_v2_auth(self): self.api_access.get_existing_token = MagicMock(return_value=None) self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT) # mock authentication info from OpenStack Api - token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY) + token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, + TEST_BODY) self.assertNotEqual(token_details, None, "Can't get the token details") def test_v2_auth_with_error_content(self): self.api_access.get_existing_token = MagicMock(return_value=None) self.response.json = Mock(return_value=ERROR_AUTH_CONTENT) # authentication content from OpenStack Api will be incorrect - token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY) - self.assertIs(token_details, None, "Can't get None when the content is wrong") + token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, + TEST_BODY) + self.assertIs(token_details, None, + "Can't get None when the content is wrong") def test_v2_auth_with_error_token(self): self.response.status_code = requests.codes.bad_request self.response.json = Mock(return_value=ERROR_TOKEN_CONTENT) # authentication info from OpenStack Api will not contain token info - token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY) + token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, + TEST_BODY) self.assertIs(token_details, None, "Can't get None when the content " + "doesn't contain any token info") @@ -78,12 +85,13 @@ class TestApiAccess(TestFetch): # the time will not be parsed self.api_access.parse_time = MagicMock(return_value=None) - token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, TEST_BODY) + token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER, + TEST_BODY) # reset original parse_time method self.api_access.parse_time = original_method - self.assertIs(token_details, None, "Can't get None when the time in token " + - "can't be parsed") + self.assertIs(token_details, None, + "Can't get None when the time in token can't be parsed") def test_v2_auth_pwd(self): self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT) @@ -92,20 +100,30 @@ class TestApiAccess(TestFetch): self.assertNotEqual(token, None, "Can't get token") def test_get_url(self): - self.response.json = Mock(return_value=GET_CONTENT) + get_response = copy.deepcopy(self.response) + get_response.status_code = requests.codes.ok + self.requests_get = requests.get + requests.get = MagicMock(return_value=get_response) + get_response.json = Mock(return_value=GET_CONTENT) result = self.api_access.get_url(TEST_URL, TEST_HEADER) # check whether it returns content message when the response is correct self.assertNotEqual(result, None, "Can't get content when the " "response is correct") + requests.get = self.requests_get def test_get_url_with_error_response(self): - self.response.status_code = requests.codes.bad_request - self.response.json = Mock(return_value=None) - self.response.text = "Bad request" + get_response = copy.deepcopy(self.response) + get_response.status_code = requests.codes.bad_request + get_response.text = "Bad request" + get_response.json = Mock(return_value=GET_CONTENT) + self.requests_get = requests.get + requests.get = MagicMock(return_value=get_response) + # the response will be wrong result = self.api_access.get_url(TEST_URL, TEST_HEADER) self.assertEqual(result, None, "Result returned" + "when the response status is not 200") + requests.get = self.requests_get def test_get_region_url(self): region_url = self.api_access.get_region_url(REGION_NAME, SERVICE_NAME) @@ -120,23 +138,30 @@ class TestApiAccess(TestFetch): def test_get_region_url_without_service_endpoint(self): # error service doesn't exist in region service endpoints - region_url = self.api_access.get_region_url(REGION_NAME, ERROR_SERVICE_NAME) - self.assertIs(region_url, None, "Can't get None with wrong service name") + region_url = self.api_access.get_region_url(REGION_NAME, + ERROR_SERVICE_NAME) + self.assertIs(region_url, None, + "Can't get None with wrong service name") def test_region_url_nover(self): - # mock return value of get_region_url, which has something starting from v2 + # mock return value of get_region_url, + # which has something starting from v2 self.api_access.get_region_url = MagicMock(return_value=REGION_URL) - region_url = self.api_access.get_region_url_nover(REGION_NAME, SERVICE_NAME) + region_url = self.api_access.get_region_url_nover(REGION_NAME, + SERVICE_NAME) # get_region_nover will remove everything from v2 - self.assertNotIn("v2", region_url, "Can't get region url without v2 info") + self.assertNotIn("v2", region_url, + "Can't get region url without v2 info") def test_get_service_region_endpoints(self): region = REGIONS[REGION_NAME] - result = self.api_access.get_service_region_endpoints(region, SERVICE_NAME) + result = self.api_access.get_service_region_endpoints(region, + SERVICE_NAME) self.assertNotEqual(result, None, "Can't get service endpoint") def test_get_service_region_endpoints_with_nonexistent_service(self): region = REGIONS[REGION_NAME] - result = self.api_access.get_service_region_endpoints(region, ERROR_SERVICE_NAME) + get_endpoints = self.api_access.get_service_region_endpoints + result = get_endpoints(region, ERROR_SERVICE_NAME) self.assertIs(result, None, "Can't get None when the service name " + "doesn't exist in region's services") diff --git a/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py index da3df17..784079e 100644 --- a/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py +++ b/app/test/fetch/api_fetch/test_api_fetch_project_hosts.py @@ -7,6 +7,7 @@ # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### +import copy from unittest.mock import MagicMock from discover.fetchers.api.api_fetch_project_hosts import ApiFetchProjectHosts from test.fetch.test_fetch import TestFetch @@ -36,23 +37,28 @@ class TestApiFetchProjectHosts(TestFetch): "type in host_type") def test_add_host_type_with_existent_host_type(self): + fetch_host_os_details = self.fetcher.fetch_host_os_details + self.fetcher.fetch_host_os_details = MagicMock() # add nonexistent host type to host type HOST_DOC["host_type"] = [NONEXISTENT_TYPE] # try to add existing host type self.fetcher.add_host_type(HOST_DOC, NONEXISTENT_TYPE, HOST_ZONE) - self.assertEqual(len(HOST_DOC['host_type']), 1, "Add duplicate host type") + self.assertEqual(len(HOST_DOC['host_type']), 1, + "Add duplicate host type") + self.fetcher.fetch_host_os_details = fetch_host_os_details def test_add_compute_host_type(self): - HOST_DOC['host_type'] = [] + doc = copy.deepcopy(HOST_DOC) + doc['host_type'] = [] # clear zone - HOST_DOC['zone'] = None + doc['zone'] = None # add compute host type - self.fetcher.add_host_type(HOST_DOC, COMPUTE_TYPE, HOST_ZONE) + self.fetcher.add_host_type(doc, COMPUTE_TYPE, HOST_ZONE) # for compute host type, zone information will be added - self.assertEqual(HOST_DOC['zone'], HOST_ZONE, "Can't update zone " + - "name for compute node") - self.assertEqual(HOST_DOC['parent_id'], HOST_ZONE, "Can't update parent_id " + - "for compute node") + self.assertEqual(doc['zone'], HOST_ZONE, + "Can't update zone name for compute node") + self.assertEqual(doc['parent_id'], HOST_ZONE, + "Can't update parent_id for compute node") def test_fetch_compute_node_ip_address(self): # mock ip address information fetched from DB @@ -78,16 +84,24 @@ class TestApiFetchProjectHosts(TestFetch): def test_get_host_details(self): # test node have nova-conductor attribute, controller type will be added + fetch_host_os_details = self.fetcher.fetch_host_os_details + self.fetcher.fetch_host_os_details = MagicMock() result = self.fetcher.get_host_details(AVAILABILITY_ZONE, HOST_NAME) self.assertIn("Controller", result['host_type'], "Can't put controller type " + "in the compute node host_type") + self.fetcher.fetch_host_os_details = fetch_host_os_details def test_get_hosts_from_az(self): + fetch_host_os_details = self.fetcher.fetch_host_os_details + self.fetcher.fetch_host_os_details = MagicMock() result = self.fetcher.get_hosts_from_az(AVAILABILITY_ZONE) self.assertNotEqual(result, [], "Can't get hosts information from " "availability zone") + self.fetcher.fetch_host_os_details = fetch_host_os_details def test_get_for_region(self): + fetch_host_os_details = self.fetcher.fetch_host_os_details + self.fetcher.fetch_host_os_details = MagicMock() # mock region url for nova node self.fetcher.get_region_url = MagicMock(return_value=REGION_URL) # mock the response from OpenStack Api @@ -96,6 +110,7 @@ class TestApiFetchProjectHosts(TestFetch): result = self.fetcher.get_for_region(self.region, TOKEN) self.assertNotEqual(result, [], "Can't get hosts information for region") + self.fetcher.fetch_host_os_details = fetch_host_os_details def test_get_for_region_without_token(self): self.fetcher.get_region_url = MagicMock(return_value=REGION_URL) @@ -112,6 +127,8 @@ class TestApiFetchProjectHosts(TestFetch): self.assertEqual(result, [], "Can't get [] when the response is wrong") def test_get_for_region_with_error_hypervisors_response(self): + fetch_host_os_details = self.fetcher.fetch_host_os_details + self.fetcher.fetch_host_os_details = MagicMock() self.fetcher.get_region_url = MagicMock(return_value=REGION_URL) # mock error hypervisors response from OpenStack Api side_effect = [AVAILABILITY_ZONE_RESPONSE, HYPERVISORS_ERROR_RESPONSE] @@ -120,6 +137,7 @@ class TestApiFetchProjectHosts(TestFetch): result = self.fetcher.get_for_region(self.region, TOKEN) self.assertNotEqual(result, [], "Can't get hosts information when " + "the hypervisors response is wrong") + self.fetcher.fetch_host_os_details = fetch_host_os_details def test_get(self): original_method = self.fetcher.get_for_region @@ -140,6 +158,15 @@ class TestApiFetchProjectHosts(TestFetch): result = self.fetcher.get(PROJECT_NAME) self.assertEqual(result, [], "Can't get [] when the token is invalid") + def test_fetch_host_os_details(self): + original_method = self.fetcher.run + self.fetcher.run = MagicMock(return_value=OS_DETAILS_INPUT) + doc = {'host': 'host1'} + self.fetcher.fetch_host_os_details(doc) + self.assertEqual(doc.get('OS', {}), OS_DETAILS) + self.fetcher.run = original_method + + def tearDown(self): super().tearDown() ApiFetchProjectHosts.v2_auth_pwd = self._v2_auth_pwd diff --git a/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py index 3ef1ac7..ba42590 100644 --- a/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py +++ b/app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py @@ -223,3 +223,24 @@ GET_FOR_REGION_INFO = [ "zone": "osdna-zone" } ] + +OS_DETAILS_INPUT = """ +NAME="Ubuntu" +VERSION="16.04 LTS (Xenial Xerus)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 16.04 LTS" +VERSION_ID="16.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" +UBUNTU_CODENAME=xenial +ARCHITECURE=x86_64 +""" +OS_DETAILS = { + 'name': 'Ubuntu', + 'version': '16.04 LTS (Xenial Xerus)', + 'ID': 'ubuntu', + 'ID_LIKE': 'debian', + 'architecure': 'x86_64' +} diff --git a/app/test/fetch/link_finders/__init__.py b/app/test/fetch/link_finders/__init__.py new file mode 100644 index 0000000..b0637e9 --- /dev/null +++ b/app/test/fetch/link_finders/__init__.py @@ -0,0 +1,9 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### diff --git a/app/test/fetch/link_finders/test_data/__init__.py b/app/test/fetch/link_finders/test_data/__init__.py new file mode 100644 index 0000000..b0637e9 --- /dev/null +++ b/app/test/fetch/link_finders/test_data/__init__.py @@ -0,0 +1,9 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### diff --git a/app/test/fetch/link_finders/test_data/test_find_implicit_links.py b/app/test/fetch/link_finders/test_data/test_find_implicit_links.py new file mode 100644 index 0000000..aef20f6 --- /dev/null +++ b/app/test/fetch/link_finders/test_data/test_find_implicit_links.py @@ -0,0 +1,303 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +ENV = 'env1' +CLIQUE_CONSTRAINTS = [ + { + 'focal_point_type': 'instance', + 'constraints': ['network'] + }, + { + 'focal_point_type': 'dummy1', + 'constraints': [] + }, + { + 'focal_point_type': 'dummy2', + 'constraints': ['network', 'dummy_constraint'] + }, + { + 'focal_point_type': 'dummy3', + 'constraints': ['dummy_constraint2'] + } +] +CONSTRAINTS = ['network', 'dummy_constraint', 'dummy_constraint2'] + +LINK_ATTRIBUTES_NONE = {} +LINK_ATTRIBUTES_NONE_2 = {} +LINK_ATTRIBUTES_EMPTY = {'attributes': []} +LINK_ATTR_V1 = {'attributes': {'network': 'v1'}} +LINK_ATTR_V1_2 = {'attributes': {'network': 'v1'}} +LINK_ATTR_V2 = {'attributes': {'network': 'v2'}} +LINK_ATTR_V1_AND_A2V2 = {'attributes': {'network': 'v1', 'attr2': 'v2'}} + +LINK_TYPE_1 = { + 'link_type': 'instance-vnic', + 'source_id': 'instance1', + 'target_id': 'vnic1' +} +LINK_TYPE_1_REVERSED = { + 'link_type': 'instance-vnic', + 'source_id': 'vnic1', + 'target_id': 'instance1' +} +LINK_TYPE_1_2 = { + 'link_type': 'instance-vnic', + 'source_id': 'instance1', + 'target_id': 'vnic2' +} +LINK_TYPE_2 = { + 'link_type': 'vnic-vconnector', + 'source_id': 'vnic1', + 'target_id': 'vconnector1' +} +LINK_TYPE_3 = { + 'implicit': True, + 'link_type': 'instance-vconnector', + 'source_id': 'instance1', + 'target_id': 'vconnector1' +} +LINK_TYPE_4_NET1 = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'instance-host_pnic', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'host_pnic1_dbid', + 'target_id': 'host_pnic1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_TYPE_5_NET2 = { + 'environment': ENV, + 'link_type': 'host_pnic-switch', + 'source_id': 'host_pnic1', + 'target': 'switch1_dbid', + 'target_id': 'switch1', + 'host': 'host2', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID2'} +} +LINK_TYPE_6_NET1 = { + 'environment': ENV, + 'link_type': 'host_pnic-switch', + 'source': 'host_pnic1_dbid', + 'source_id': 'host_pnic1', + 'target': 'switch2_dbid', + 'target_id': 'switch2', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_TYPE_7_NET1 = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'instance-switch', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'switch2_dbid', + 'target_id': 'switch2', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} + +LINK_FULL_A2B = { + 'environment': ENV, + 'link_type': 'instance-vnic', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'vnic1_dbid', + 'target_id': 'vnic1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_B2C = { + 'environment': ENV, + 'link_type': 'vnic-vconnector', + 'source': 'vnic1_dbid', + 'source_id': 'vnic1', + 'target': 'vconnector1_dbid', + 'target_id': 'vconnector1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_C2D = { + 'environment': ENV, + 'link_type': 'vconnector-vedge', + 'source': 'vconnector1_dbid', + 'source_id': 'vconnector1', + 'target': 'vedge1_dbid', + 'target_id': 'vedge1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_D2E = { + 'environment': ENV, + 'link_type': 'vedge-otep', + 'source': 'vedge1_dbid', + 'source_id': 'vedge1', + 'target': 'otep1_dbid', + 'target_id': 'otep1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_A2C = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'instance-vconnector', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'vconnector1_dbid', + 'target_id': 'vconnector1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_B2D = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'vnic-vedge', + 'source': 'vnic1_dbid', + 'source_id': 'vnic1', + 'target': 'vedge1_dbid', + 'target_id': 'vedge1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_C2E = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'vconnector-otep', + 'source': 'vconnector1_dbid', + 'source_id': 'vconnector1', + 'target': 'otep1_dbid', + 'target_id': 'otep1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_A2D = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'instance-vedge', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'vedge1_dbid', + 'target_id': 'vedge1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_B2E = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'vnic-otep', + 'source': 'vnic1_dbid', + 'source_id': 'vnic1', + 'target': 'otep1_dbid', + 'target_id': 'otep1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +LINK_FULL_A2E = { + 'environment': ENV, + 'implicit': True, + 'link_type': 'instance-otep', + 'source': 'instance1_dbid', + 'source_id': 'instance1', + 'target': 'otep1_dbid', + 'target_id': 'otep1', + 'host': 'host1', + 'link_name': '', + 'state': 'up', + 'source_label': '', + 'target_label': '', + 'link_weight': 0, + 'attributes': {'network': 'netID1'} +} +BASE_LINKS = [ + {'pass': 0, 'link': LINK_FULL_A2B}, + {'pass': 0, 'link': LINK_FULL_B2C}, + {'pass': 0, 'link': LINK_FULL_C2D}, + {'pass': 0, 'link': LINK_FULL_D2E}, +] +IMPLICIT_LINKS = [ + [ + {'pass': 1, 'link': LINK_FULL_A2C}, + {'pass': 1, 'link': LINK_FULL_B2D}, + {'pass': 1, 'link': LINK_FULL_C2E}, + ], + [ + {'pass': 2, 'link': LINK_FULL_A2D}, + {'pass': 2, 'link': LINK_FULL_B2E}, + ], + [ + {'pass': 3, 'link': LINK_FULL_A2E}, + ], + [] +] diff --git a/app/test/fetch/link_finders/test_find_implicit_links.py b/app/test/fetch/link_finders/test_find_implicit_links.py new file mode 100644 index 0000000..9931688 --- /dev/null +++ b/app/test/fetch/link_finders/test_find_implicit_links.py @@ -0,0 +1,107 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +import bson + +from discover.link_finders.find_implicit_links import FindImplicitLinks +from test.fetch.test_fetch import TestFetch +from unittest.mock import MagicMock +from test.fetch.link_finders.test_data.test_find_implicit_links import * + +from utils.inventory_mgr import InventoryMgr + + +class TestFindImplicitLinks(TestFetch): + + def setUp(self): + super().setUp() + self.configure_environment() + self.fetcher = FindImplicitLinks() + self.fetcher.set_env(ENV) + self.fetcher.constraint_attributes = ['network'] + self.original_write_link = self.inv.write_link + self.inv.write_link = lambda x: x + self.original_objectid = bson.ObjectId + bson.ObjectId = lambda x: x + + def tearDown(self): + super().tearDown() + bson.ObjectId = self.original_objectid + self.inv.write_link = self.original_write_link + + def test_get_constraint_attributes(self): + original_find = InventoryMgr.find + InventoryMgr.find = MagicMock(return_value=CLIQUE_CONSTRAINTS) + constraint_types = self.fetcher.get_constraint_attributes() + self.assertEqual(sorted(constraint_types), sorted(CONSTRAINTS)) + InventoryMgr.find = original_find + + def test_constraints_match(self): + matcher = self.fetcher.constraints_match + self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTRIBUTES_NONE_2)) + self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTRIBUTES_EMPTY)) + self.assertTrue(matcher(LINK_ATTRIBUTES_NONE, LINK_ATTR_V1)) + self.assertTrue(matcher(LINK_ATTRIBUTES_EMPTY, LINK_ATTR_V1)) + self.assertTrue(matcher(LINK_ATTR_V1, LINK_ATTR_V1_2)) + self.assertTrue(matcher(LINK_ATTR_V1, + LINK_ATTR_V1_AND_A2V2)) + self.assertFalse(matcher(LINK_ATTR_V1, LINK_ATTR_V2)) + + def test_links_match(self): + matcher = self.fetcher.links_match + self.assertFalse(matcher(LINK_TYPE_1, LINK_TYPE_1_2)) + self.assertFalse(matcher(LINK_TYPE_1, LINK_TYPE_1_REVERSED)) + self.assertFalse(matcher(LINK_TYPE_4_NET1, LINK_TYPE_5_NET2)) + self.assertFalse(matcher(LINK_TYPE_1_2, LINK_TYPE_2)) + self.assertTrue(matcher(LINK_TYPE_1, LINK_TYPE_2)) + + def test_get_link_constraint_attributes(self): + getter = self.fetcher.get_link_constraint_attributes + self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_1_2), {}) + self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_4_NET1), + LINK_TYPE_4_NET1.get('attributes')) + self.assertEqual(getter(LINK_TYPE_4_NET1, LINK_TYPE_1), + LINK_TYPE_4_NET1.get('attributes')) + self.assertEqual(getter(LINK_TYPE_1, LINK_TYPE_5_NET2), + LINK_TYPE_5_NET2.get('attributes')) + self.assertEqual(getter(LINK_TYPE_4_NET1, LINK_TYPE_6_NET1), + LINK_TYPE_4_NET1.get('attributes')) + + def test_get_attr(self): + getter = self.fetcher.get_attr + self.assertIsNone(getter('host', {}, {})) + self.assertIsNone(getter('host', {'host': 'v1'}, {'host': 'v2'})) + self.assertEqual(getter('host', {'host': 'v1'}, {}), 'v1') + self.assertEqual(getter('host', {}, {'host': 'v2'}), 'v2') + self.assertEqual(getter('host', {'host': 'v1'}, {'host': 'v1'}), 'v1') + + def test_add_implicit_link(self): + original_write_link = self.inv.write_link + self.inv.write_link = lambda x: x + original_objectid = bson.ObjectId + bson.ObjectId = lambda x: x + add_func = self.fetcher.add_implicit_link + self.assertEqual(add_func(LINK_TYPE_4_NET1, LINK_TYPE_6_NET1), + LINK_TYPE_7_NET1) + bson.ObjectId = original_objectid + self.inv.write_link = original_write_link + + def test_get_transitive_closure(self): + self.fetcher.links = [ + {'pass': 0, 'link': LINK_FULL_A2B}, + {'pass': 0, 'link': LINK_FULL_B2C}, + {'pass': 0, 'link': LINK_FULL_C2D}, + {'pass': 0, 'link': LINK_FULL_D2E}, + ] + self.fetcher.get_transitive_closure() + for pass_no in range(1, len(IMPLICIT_LINKS)): + implicit_links = [l for l in self.fetcher.links + if l['pass'] == pass_no] + self.assertEqual(implicit_links, IMPLICIT_LINKS[pass_no-1], + 'incorrect links for pass #{}'.format(pass_no)) diff --git a/app/test/scan/test_data/configurations.py b/app/test/scan/test_data/configurations.py index 96dbc23..044ff0b 100644 --- a/app/test/scan/test_data/configurations.py +++ b/app/test/scan/test_data/configurations.py @@ -47,6 +47,7 @@ CONFIGURATIONS = { "provision": "Deploy", "env_type": "development", "name": "Monitoring", + "install_monitoring_client": True, "rabbitmq_port": "5672", "rabbitmq_pass": "osdna", "rabbitmq_user": "sensu", diff --git a/app/test/scan/test_data/scanner.py b/app/test/scan/test_data/scanner.py index 23838aa..500021d 100644 --- a/app/test/scan/test_data/scanner.py +++ b/app/test/scan/test_data/scanner.py @@ -17,6 +17,19 @@ METADATA = { "scanners_package": "discover", "scanners": {} } +LINK_FINDERS_METADATA = { + "finders_package": "discover.link_finders", + "base_finder": "FindLinks", + "link_finders": [ + "FindLinksForInstanceVnics", + "FindLinksForOteps", + "FindLinksForVconnectors", + "FindLinksForVedges", + "FindLinksForVserviceVnics", + "FindLinksForPnics", + "FindImplicitLinks" + ] +} TYPE_TO_FETCH = { "type": "host_pnic", diff --git a/app/test/scan/test_scan_metadata_parser.py b/app/test/scan/test_scan_metadata_parser.py index 91c11ef..5d91306 100644 --- a/app/test/scan/test_scan_metadata_parser.py +++ b/app/test/scan/test_scan_metadata_parser.py @@ -104,6 +104,8 @@ class TestScanMetadataParser(TestScan): 'input': METADATA_SCANNER_INCORRECT_FETCHER, 'msg': 'failed to find fetcher class f1 ' 'in scanner ScanAggregate type #1' + ' (could not import module discover.fetchers.f1.f1: ' + 'No module named \'discover.fetchers.f1\')' }, { 'input': METADATA_SCANNER_WITH_INCORRECT_CHILD, diff --git a/app/test/scan/test_scanner.py b/app/test/scan/test_scanner.py index 4a7536e..e93a35b 100644 --- a/app/test/scan/test_scanner.py +++ b/app/test/scan/test_scanner.py @@ -10,6 +10,9 @@ from discover.scanner import Scanner from test.scan.test_scan import TestScan from unittest.mock import MagicMock, patch + +from discover.link_finders.find_links_metadata_parser \ + import FindLinksMetadataParser from discover.scan_metadata_parser import ScanMetadataParser from test.scan.test_data.scanner import * from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager @@ -19,7 +22,10 @@ class TestScanner(TestScan): def setUp(self): super().setUp() - ScanMetadataParser.parse_metadata_file = MagicMock(return_value=METADATA) + ScanMetadataParser.parse_metadata_file = \ + MagicMock(return_value=METADATA) + FindLinksMetadataParser.parse_metadata_file = \ + MagicMock(return_value=LINK_FINDERS_METADATA) self.scanner = Scanner() self.scanner.set_env(self.env) MonitoringSetupManager.create_setup = MagicMock() diff --git a/app/test/verify.sh b/app/test/verify.sh index a7ac9a2..681b5ed 100755 --- a/app/test/verify.sh +++ b/app/test/verify.sh @@ -11,4 +11,8 @@ set -o errexit set -o nounset set -o pipefail +PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/api +PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/event_based_scan PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/fetch +PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/scan +PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/utils diff --git a/app/utils/api_access_base.py b/app/utils/api_access_base.py new file mode 100644 index 0000000..31f50b4 --- /dev/null +++ b/app/utils/api_access_base.py @@ -0,0 +1,51 @@ +############################################################################### +# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) # +# and others # +# # +# All rights reserved. This program and the accompanying materials # +# are made available under the terms of the Apache License, Version 2.0 # +# which accompanies this distribution, and is available at # +# http://www.apache.org/licenses/LICENSE-2.0 # +############################################################################### +import requests + +from discover.configuration import Configuration +from discover.fetcher import Fetcher + + +class ApiAccessBase(Fetcher): + + CONNECT_TIMEOUT = 5 + + def __init__(self, api_name=None, config=None): + super().__init__() + if api_name is None: + raise ValueError('ApiAccessBase: api_name must be defined') + self.config = {api_name: config} if config else Configuration() + self.api_config = self.config.get(api_name) + if self.api_config is None: + raise ValueError('ApiAccessBase: section "{}" missing in config' + .format(api_name)) + self.host = self.api_config.get('host', '') + self.port = self.api_config.get('port', '') + if not (self.host and self.port): + raise ValueError('Missing definition of host or port ' + + 'for {} API access' + .format(api_name)) + + def get_rel_url(self, relative_url, headers): + req_url = self.base_url + relative_url + return self.get_url(req_url, headers) + + def get_url(self, req_url, headers): + response = requests.get(req_url, headers=headers) + if response.status_code != requests.codes.ok: + # some error happened + if 'reason' in response: + msg = ', reason: {}'.format(response.reason) + else: + msg = ', response: {}'.format(response.text) + self.log.error('req_url: {} {}'.format(req_url, msg)) + return None + ret = response.json() + return ret diff --git a/app/utils/inventory_mgr.py b/app/utils/inventory_mgr.py index 722d0aa..bbc5542 100644 --- a/app/utils/inventory_mgr.py +++ b/app/utils/inventory_mgr.py @@ -265,6 +265,7 @@ class InventoryMgr(MongoAccess, metaclass=Singleton): # source_label, target_label: labels for the ends of the link (optional) def create_link(self, env, src, source_id, target, target_id, link_type, link_name, state, link_weight, + implicit=False, source_label="", target_label="", host=None, switch=None, extra_attributes=None): @@ -282,6 +283,7 @@ class InventoryMgr(MongoAccess, metaclass=Singleton): "link_weight": link_weight, "source_label": source_label, "target_label": target_label, + "implicit": implicit, "attributes": extra_attributes if extra_attributes else {} } if host: @@ -347,16 +349,18 @@ class InventoryMgr(MongoAccess, metaclass=Singleton): if not env_config: return False - # Workaround for mechanism_drivers field type - mechanism_driver = env_config['mechanism_drivers'][0] \ - if isinstance(env_config['mechanism_drivers'], list) \ - else env_config['mechanism_drivers'] + # Workarounds for mechanism_drivers and distribution_version field types + mechanism_driver = env_config.get('mechanism_drivers') + if isinstance(mechanism_driver, list): + mechanism_driver = mechanism_driver[0] + env_distribution_version = env_config.get('distribution_version') + if isinstance(env_distribution_version, list): + env_distribution_version = env_distribution_version[0] full_env = { - 'environment.distribution': env_config['distribution'], - 'environment.distribution_version': - {"$in": [env_config['distribution_version']]}, - 'environment.type_drivers': env_config['type_drivers'], + 'environment.distribution': env_config.get('distribution'), + 'environment.distribution_version': env_distribution_version, + 'environment.type_drivers': env_config.get('type_drivers'), 'environment.mechanism_drivers': mechanism_driver } return self.is_feature_supported_in_env(full_env, feature) @@ -394,8 +398,10 @@ class InventoryMgr(MongoAccess, metaclass=Singleton): self.log.error("failed to find master parent " + master_parent_id) return False - folder_id_path = "/".join((master_parent["id_path"], o["parent_id"])) - folder_name_path = "/".join((master_parent["name_path"], o["parent_text"])) + folder_id_path = "/".join((master_parent["id_path"], + o["parent_id"])) + folder_name_path = "/".join((master_parent["name_path"], + o["parent_text"])) folder = { "environment": parent["environment"], "parent_id": master_parent_id, diff --git a/app/utils/mongo_access.py b/app/utils/mongo_access.py index d4599f1..75c265c 100644 --- a/app/utils/mongo_access.py +++ b/app/utils/mongo_access.py @@ -36,8 +36,10 @@ class MongoAccess(DictNamingConverter): def __init__(self): super().__init__() - self.log_file = os.path.join(FileLogger.LOG_DIRECTORY, - MongoAccess.LOG_FILENAME) + log_dir = FileLogger.LOG_DIRECTORY \ + if os.path.isdir(FileLogger.LOG_DIRECTORY) \ + else os.path.abspath('.') + self.log_file = os.path.join(log_dir, MongoAccess.LOG_FILENAME) try: self.log = FileLogger(self.log_file) diff --git a/app/utils/ssh_connection.py b/app/utils/ssh_connection.py index e9dd39a..b9b1cde 100644 --- a/app/utils/ssh_connection.py +++ b/app/utils/ssh_connection.py @@ -22,6 +22,7 @@ class SshConnection(BinaryConverter): max_call_count_per_con = 100 timeout = 15 # timeout for exec in seconds + CONNECT_TIMEOUT = 5 DEFAULT_PORT = 22 @@ -118,7 +119,8 @@ class SshConnection(BinaryConverter): pkey=k, port=self.port if self.port is not None else self.DEFAULT_PORT, - password=self.pwd, timeout=30) + password=self.pwd, + timeout=self.CONNECT_TIMEOUT) else: port = None try: @@ -127,7 +129,7 @@ class SshConnection(BinaryConverter): username=self.user, password=self.pwd, port=port, - timeout=30) + timeout=self.CONNECT_TIMEOUT) except paramiko.ssh_exception.AuthenticationException: self.log.error('Failed SSH connect to host {}, port={}' .format(self.host, port)) diff --git a/app/utils/util.py b/app/utils/util.py index ae7b518..17a31c0 100644 --- a/app/utils/util.py +++ b/app/utils/util.py @@ -47,7 +47,6 @@ class ClassResolver: class_name = ''.join(name_parts) return class_name - @staticmethod def get_fully_qualified_class(class_name: str = None, package_name: str = "discover", @@ -58,8 +57,9 @@ class ClassResolver: module_name = ".".join(module_parts) try: class_module = importlib.import_module(module_name) - except ImportError: - raise ValueError('could not import module {}'.format(module_name)) + except ImportError as e: + raise ValueError('could not import module {}: {}' + .format(module_name, str(e))) clazz = getattr(class_module, class_name) return clazz @@ -74,7 +74,8 @@ class ClassResolver: class_name = ClassResolver.get_class_name_by_module(module_name) if class_name in ClassResolver.instances: return 'instance', ClassResolver.instances[class_name] - clazz = ClassResolver.get_fully_qualified_class(class_name, package_name, + clazz = ClassResolver.get_fully_qualified_class(class_name, + package_name, module_name) return 'class', clazz |