summaryrefslogtreecommitdiffstats
path: root/app
diff options
context:
space:
mode:
authorKoren Lev <korenlev@gmail.com>2017-09-29 01:38:18 +0300
committerYaron Yogev <yaronyogev@gmail.com>2017-10-03 09:46:58 +0000
commitd0adff06bed72f9a0edd7adccfa6a1111784bc8b (patch)
tree4b5eaf1107e6973b1eac636309a99c83074acbfc /app
parentfbbaf20912c79bd99a5c3696850d70c11965f56b (diff)
release 1.0 calipso for opnfv apex
Change-Id: I3e63cd27c5f4d3756e67a07c749863a68e84dde2 Signed-off-by: Koren Lev <korenlev@gmail.com> (cherry picked from commit d32f75145676bacefde0d08a14680a5984623451)
Diffstat (limited to 'app')
-rw-r--r--app/api/auth/auth.py4
-rw-r--r--app/api/responders/resource/environment_configs.py6
-rw-r--r--app/config/events.json16
-rw-r--r--app/config/link_finders.json4
-rw-r--r--app/config/scanners.json5
-rw-r--r--app/discover/event_manager.py21
-rw-r--r--app/discover/events/event_interface_add.py7
-rw-r--r--app/discover/events/event_interface_delete.py3
-rw-r--r--app/discover/events/event_router_add.py2
-rw-r--r--app/discover/events/event_router_delete.py3
-rw-r--r--app/discover/events/event_router_update.py2
-rw-r--r--app/discover/events/event_subnet_add.py4
-rw-r--r--app/discover/events/event_subnet_update.py5
-rwxr-xr-xapp/discover/events/listeners/default_listener.py16
-rw-r--r--app/discover/events/listeners/listener_base.py14
-rw-r--r--app/discover/fetcher_new.py30
-rw-r--r--app/discover/fetchers/cli/cli_access.py62
-rw-r--r--app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py134
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py26
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservice.py22
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservices.py2
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py2
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py8
-rw-r--r--app/discover/fetchers/db/db_access.py43
-rw-r--r--app/discover/fetchers/db/db_fetch_host_network_agents.py3
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py4
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_ovs.py63
-rw-r--r--app/discover/link_finders/__init__.py10
-rw-r--r--app/discover/link_finders/find_links_for_pnics.py12
-rw-r--r--app/discover/link_finders/find_links_for_vconnectors.py3
-rw-r--r--app/discover/link_finders/find_links_for_vservice_vnics.py16
-rw-r--r--app/discover/scanner.py2
-rw-r--r--app/install/calipso-installer.py55
-rw-r--r--app/install/configure/setup_apex_environment.py568
-rw-r--r--app/install/db/apex_environment_config.json3
-rw-r--r--app/install/db/api_tokens.json7
-rw-r--r--app/install/db/connection_tests.json156
-rw-r--r--app/install/db/constants.json33
-rw-r--r--app/install/db/environments_config.json10
-rw-r--r--app/install/db/supported_environments.json428
-rw-r--r--app/install/db/user_settings.json4
-rw-r--r--app/install/ldap.conf.example2
-rw-r--r--app/messages/message.py7
-rw-r--r--app/monitoring/handlers/monitoring_check_handler.py3
-rw-r--r--app/test/api/responders_test/resource/test_environment_configs.py22
-rw-r--r--app/test/api/responders_test/test_data/base.py11
-rw-r--r--app/test/api/responders_test/test_data/environment_configs.py16
-rw-r--r--app/test/api/test_base.py6
-rw-r--r--app/test/event_based_scan/test_interface_add.py2
-rw-r--r--app/test/event_based_scan/test_interface_delete.py3
-rw-r--r--app/test/event_based_scan/test_router_add.py2
-rw-r--r--app/test/event_based_scan/test_router_update.py2
-rw-r--r--app/test/fetch/api_fetch/test_data/configurations.py3
-rw-r--r--app/test/fetch/cli_fetch/test_cli_access.py13
-rw-r--r--app/test/fetch/db_fetch/mock_cursor.py24
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py8
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_oteps.py6
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py55
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_oteps.py2
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py90
-rw-r--r--app/test/scan/test_data/configurations.py3
-rw-r--r--app/test/scan/test_data/scanner.py6
-rw-r--r--app/test/utils/test_cli_dist_translator.py38
-rw-r--r--app/utils/cli_dist_translator.py59
-rw-r--r--app/utils/inventory_mgr.py19
-rw-r--r--app/utils/logging/mongo_logging_handler.py3
-rw-r--r--app/utils/mongo_access.py2
-rw-r--r--app/utils/util.py4
68 files changed, 1730 insertions, 499 deletions
diff --git a/app/api/auth/auth.py b/app/api/auth/auth.py
index 04fc4b9..b7139f4 100644
--- a/app/api/auth/auth.py
+++ b/app/api/auth/auth.py
@@ -19,7 +19,7 @@ class Auth:
super().__init__()
self.inv = InventoryMgr()
self.log = FullLogger()
- self.tokens_coll = self.inv.client['tokens']['api_tokens']
+ self.tokens_coll = self.inv.collections['api_tokens']
self.ldap_access = LDAPAccess()
def get_token(self, token):
@@ -37,7 +37,7 @@ class Auth:
self.tokens_coll.insert_one(token)
except Exception as e:
self.log.error("Failed to write new token {0} to database for {1}"
- .format(token[token], str(e)))
+ .format(token['token'], str(e)))
error = 'Failed to create new token'
return error
diff --git a/app/api/responders/resource/environment_configs.py b/app/api/responders/resource/environment_configs.py
index 32e70ad..90a1adf 100644
--- a/app/api/responders/resource/environment_configs.py
+++ b/app/api/responders/resource/environment_configs.py
@@ -239,6 +239,7 @@ class EnvironmentConfigs(ResponderBase):
"configuration": self.require(list, mandatory=True),
"distribution": self.require(str, False, DataValidate.LIST,
self.distributions, True),
+ "distribution_version": self.require(str, mandatory=True),
"listen": self.require(bool, True, mandatory=True),
"user": self.require(str),
"mechanism_drivers": self.require(list, False, DataValidate.LIST,
@@ -343,8 +344,11 @@ class EnvironmentConfigs(ResponderBase):
# validate the environment config with supported environments
matches = {
'environment.distribution': env_config['distribution'],
+ 'environment.distribution_version':
+ env_config['distribution_version'],
'environment.type_drivers': env_config['type_drivers'],
- 'environment.mechanism_drivers': {'$in': env_config['mechanism_drivers']}
+ 'environment.mechanism_drivers':
+ {'$in': env_config['mechanism_drivers']}
}
err_prefix = 'configuration not accepted: '
diff --git a/app/config/events.json b/app/config/events.json
index c067754..e09ebb6 100644
--- a/app/config/events.json
+++ b/app/config/events.json
@@ -23,36 +23,32 @@
"compute.instance.rebuild.end": "EventInstanceAdd",
"compute.instance.update": "EventInstanceUpdate",
"compute.instance.delete.end": "EventInstanceDelete",
- "network.create.start": "EventNetworkAdd",
"network.create.end": "EventNetworkAdd",
"network.update": "EventNetworkUpdate",
- "network.update.start": "EventNetworkUpdate",
"network.update.end": "EventNetworkUpdate",
"network.delete": "EventNetworkDelete",
- "network.delete.start": "EventNetworkDelete",
"network.delete.end": "EventNetworkDelete",
"subnet.create": "EventSubnetAdd",
- "subnet.create.start": "EventSubnetAdd",
"subnet.create.end": "EventSubnetAdd",
"subnet.update": "EventSubnetUpdate",
- "subnet.update.start": "EventSubnetUpdate",
"subnet.update.end": "EventSubnetUpdate",
"subnet.delete": "EventSubnetDelete",
- "subnet.delete.start": "EventSubnetDelete",
"subnet.delete.end": "EventSubnetDelete",
+ "port.create": "EventPortAdd",
"port.create.end": "EventPortAdd",
+ "port.update": "EventPortUpdate",
"port.update.end": "EventPortUpdate",
+ "port.delete": "EventPortDelete",
"port.delete.end": "EventPortDelete",
"router.create": "EventRouterAdd",
- "router.create.start": "EventRouterAdd",
"router.create.end": "EventRouterAdd",
"router.update": "EventRouterUpdate",
- "router.update.start": "EventRouterUpdate",
"router.update.end": "EventRouterUpdate",
"router.delete": "EventRouterDelete",
- "router.delete.start": "EventRouterDelete",
"router.delete.end": "EventRouterDelete",
"router.interface.create": "EventInterfaceAdd",
- "router.interface.delete": "EventInterfaceDelete"
+ "router.interface.create.end": "EventInterfaceAdd",
+ "router.interface.delete": "EventInterfaceDelete",
+ "router.interface.delete.end": "EventInterfaceDelete"
}
} \ No newline at end of file
diff --git a/app/config/link_finders.json b/app/config/link_finders.json
index 2368333..55c31f6 100644
--- a/app/config/link_finders.json
+++ b/app/config/link_finders.json
@@ -4,9 +4,9 @@
"link_finders": [
"FindLinksForInstanceVnics",
"FindLinksForOteps",
- "FindLinksForPnics",
"FindLinksForVconnectors",
"FindLinksForVedges",
- "FindLinksForVserviceVnics"
+ "FindLinksForVserviceVnics",
+ "FindLinksForPnics"
]
} \ No newline at end of file
diff --git a/app/config/scanners.json b/app/config/scanners.json
index ae856a2..c5efb06 100644
--- a/app/config/scanners.json
+++ b/app/config/scanners.json
@@ -220,6 +220,11 @@
},
"fetcher": "CliFetchHostPnics",
"children_scanner": "ScanHostPnic"
+ },
+ {
+ "type": "host_pnic",
+ "fetcher": "CliFetchBondHostPnics",
+ "children_scanner": "ScanHostPnic"
}
],
"ScanHostPnic": [
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
index 6a56912..e2f8282 100644
--- a/app/discover/event_manager.py
+++ b/app/discover/event_manager.py
@@ -40,12 +40,18 @@ class EventManager(Manager):
}
LISTENERS = {
- 'Mirantis-6.0': DefaultListener,
- 'Mirantis-7.0': DefaultListener,
- 'Mirantis-8.0': DefaultListener,
- 'RDO-Mitaka': DefaultListener,
- 'RDO-Liberty': DefaultListener,
- 'Apex-Euphrates': DefaultListener,
+ 'Mirantis': {
+ '6.0': DefaultListener,
+ '7.0': DefaultListener,
+ '8.0': DefaultListener,
+ },
+ 'RDO': {
+ 'Mitaka': DefaultListener,
+ 'Liberty': DefaultListener,
+ },
+ 'Apex': {
+ 'Euphrates': DefaultListener,
+ },
}
def __init__(self):
@@ -105,7 +111,8 @@ class EventManager(Manager):
def get_listener(self, env: str):
env_config = self.inv.get_env_config(env)
- return self.LISTENERS.get(env_config.get('distribution'))
+ return (self.LISTENERS.get(env_config.get('distribution'), {})
+ .get(env_config.get('distribution_version')))
def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
listener.listen({
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
index 698559c..e54bedb 100644
--- a/app/discover/events/event_interface_add.py
+++ b/app/discover/events/event_interface_add.py
@@ -83,11 +83,12 @@ class EventInterfaceAdd(EventBase):
def handle(self, env, values):
interface = values['payload']['router_interface']
+ project_id = values['_context_project_id']
project = values['_context_project_name']
host_id = values["publisher_id"].replace("network.", "", 1)
port_id = interface['port_id']
subnet_id = interface['subnet_id']
- router_id = encode_router_id(host_id, interface['id'])
+ router_id = encode_router_id(interface['id'])
network_document = self.inv.get_by_field(env, "network", "subnet_ids",
subnet_id, get_single=True)
@@ -98,10 +99,10 @@ class EventInterfaceAdd(EventBase):
network_id = network_document['id']
# add router-interface port document.
- if len(ApiAccess.regions) == 0:
+ if not ApiAccess.regions:
fetcher = ApiFetchRegions()
fetcher.set_env(env)
- fetcher.get(None)
+ fetcher.get(project_id)
port_doc = EventSubnetAdd().add_port_document(env, port_id,
network_name=network_name)
diff --git a/app/discover/events/event_interface_delete.py b/app/discover/events/event_interface_delete.py
index b1df978..f4ec400 100644
--- a/app/discover/events/event_interface_delete.py
+++ b/app/discover/events/event_interface_delete.py
@@ -18,8 +18,7 @@ class EventInterfaceDelete(EventDeleteBase):
def handle(self, env, values):
interface = values['payload']['router_interface']
port_id = interface['port_id']
- host_id = values["publisher_id"].replace("network.", "", 1)
- router_id = encode_router_id(host_id, interface['id'])
+ router_id = encode_router_id(interface['id'])
# update router document
port_doc = self.inv.get_by_id(env, port_id)
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
index 3c1c9e2..1fb2244 100644
--- a/app/discover/events/event_router_add.py
+++ b/app/discover/events/event_router_add.py
@@ -96,7 +96,7 @@ class EventRouterAdd(EventBase):
router = values['payload']['router']
host_id = values["publisher_id"].replace("network.", "", 1)
project_id = values['_context_project_id']
- router_id = encode_router_id(host_id, router['id'])
+ router_id = encode_router_id(router['id'])
host = self.inv.get_by_id(env, host_id)
fetcher = CliFetchHostVservice()
diff --git a/app/discover/events/event_router_delete.py b/app/discover/events/event_router_delete.py
index 65072d6..d0bd645 100644
--- a/app/discover/events/event_router_delete.py
+++ b/app/discover/events/event_router_delete.py
@@ -21,7 +21,6 @@ class EventRouterDelete(EventDeleteBase):
self.log.error("Publisher_id is not in event values. Aborting router delete")
return EventResult(result=False, retry=False)
- host_id = values['publisher_id'].replace('network.', '', 1)
if 'router_id' in payload:
router_id = payload['router_id']
elif 'id' in payload:
@@ -33,5 +32,5 @@ class EventRouterDelete(EventDeleteBase):
self.log.error("Router id is not in payload. Aborting router delete")
return EventResult(result=False, retry=False)
- router_full_id = encode_router_id(host_id, router_id)
+ router_full_id = encode_router_id(router_id)
return self.delete_handler(env, router_full_id, "vservice")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
index cfbbf58..b63b224 100644
--- a/app/discover/events/event_router_update.py
+++ b/app/discover/events/event_router_update.py
@@ -26,7 +26,7 @@ class EventRouterUpdate(EventBase):
host_id = values["publisher_id"].replace("network.", "", 1)
router_id = payload['id'] if 'id' in payload else router['id']
- router_full_id = encode_router_id(host_id, router_id)
+ router_full_id = encode_router_id(router_id)
router_doc = self.inv.get_by_id(env, router_full_id)
if not router_doc:
self.log.info("Router document not found, aborting router updating")
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
index fcae5fd..4126e0c 100644
--- a/app/discover/events/event_subnet_add.py
+++ b/app/discover/events/event_subnet_add.py
@@ -131,10 +131,10 @@ class EventSubnetAdd(EventBase):
# Check DHCP enable, if true, scan network.
if subnet['enable_dhcp'] is True:
# update network
- if len(ApiAccess.regions) == 0:
+ if not ApiAccess.regions:
fetcher = ApiFetchRegions()
fetcher.set_env(env)
- fetcher.get(None)
+ fetcher.get(project_id)
self.log.info("add new subnet.")
host_id = notification["publisher_id"].replace("network.", "", 1)
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
index 3529f0d..59b0afb 100644
--- a/app/discover/events/event_subnet_update.py
+++ b/app/discover/events/event_subnet_update.py
@@ -23,6 +23,7 @@ class EventSubnetUpdate(EventBase):
def handle(self, env, notification):
# check for network document.
subnet = notification['payload']['subnet']
+ project_id = notification['_context_project_id']
project = notification['_context_project_name']
host_id = notification['publisher_id'].replace('network.', '', 1)
subnet_id = subnet['id']
@@ -47,10 +48,10 @@ class EventSubnetUpdate(EventBase):
network_document['name'])
# make sure that self.regions is not empty.
- if len(ApiAccess.regions) == 0:
+ if not ApiAccess.regions:
fetcher = ApiFetchRegions()
fetcher.set_env(env)
- fetcher.get(None)
+ fetcher.get(project_id)
self.log.info("add port binding to DHCP server.")
port_id = DbFetchPort(). \
diff --git a/app/discover/events/listeners/default_listener.py b/app/discover/events/listeners/default_listener.py
index 54453a7..273f3e3 100755
--- a/app/discover/events/listeners/default_listener.py
+++ b/app/discover/events/listeners/default_listener.py
@@ -30,17 +30,19 @@ from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
from utils.constants import OperationalStatus, EnvironmentFeatures
from utils.inventory_mgr import InventoryMgr
from utils.logging.full_logger import FullLogger
+from utils.logging.logger import Logger
from utils.mongo_access import MongoAccess
-from utils.string_utils import stringify_datetime
from utils.util import SignalHandler, setup_args
class DefaultListener(ListenerBase, ConsumerMixin):
SOURCE_SYSTEM = "OpenStack"
-
COMMON_METADATA_FILE = "events.json"
+ LOG_FILENAME = "default_listener.log"
+ LOG_LEVEL = Logger.INFO
+
DEFAULTS = {
"env": "Mirantis-Liberty",
"mongo_config": "",
@@ -92,7 +94,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
return False, None
def process_task(self, body, message):
- received_timestamp = stringify_datetime(datetime.datetime.now())
+ received_timestamp = datetime.datetime.now()
processable, event_data = self._extract_event_data(body)
# If env listener can't process the message
# or it's not intended for env listener to handle,
@@ -100,7 +102,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
if processable and event_data["event_type"] in self.handler.handlers:
event_result = self.handle_event(event_data["event_type"],
event_data)
- finished_timestamp = stringify_datetime(datetime.datetime.now())
+ finished_timestamp = datetime.datetime.now()
self.save_message(message_body=event_data,
result=event_result,
started=received_timestamp,
@@ -143,8 +145,8 @@ class DefaultListener(ListenerBase, ConsumerMixin):
# 'Retry' flag specifies if the error is recoverable or not
# 'Retry' flag is checked only is 'result' is False
def handle_event(self, event_type: str, notification: dict) -> EventResult:
- print("Got notification.\nEvent_type: {}\nNotification:\n{}".
- format(event_type, notification))
+ self.log.error("Got notification.\nEvent_type: {}\nNotification:\n{}".
+ format(event_type, notification))
try:
result = self.handler.handle(event_name=event_type,
notification=notification)
@@ -154,7 +156,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
return EventResult(result=False, retry=False)
def save_message(self, message_body: dict, result: EventResult,
- started: str, finished: str):
+ started: datetime, finished: datetime):
try:
message = Message(
msg_id=message_body.get('message_id'),
diff --git a/app/discover/events/listeners/listener_base.py b/app/discover/events/listeners/listener_base.py
index 7052dc9..4ff4e57 100644
--- a/app/discover/events/listeners/listener_base.py
+++ b/app/discover/events/listeners/listener_base.py
@@ -7,11 +7,25 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+import os
from abc import ABC, abstractmethod
+from utils.logging.console_logger import ConsoleLogger
+from utils.logging.file_logger import FileLogger
+from utils.logging.logger import Logger
+
class ListenerBase(ABC):
+ LOG_FILENAME = "listener_base.log"
+ LOG_LEVEL = Logger.WARNING
+
+ def __init__(self):
+ super().__init__()
+ self.log_file = os.path.join(FileLogger.LOG_DIRECTORY,
+ self.LOG_FILENAME)
+ self.log = ConsoleLogger(level=Logger.INFO)
+
@staticmethod
@abstractmethod
def listen(self):
diff --git a/app/discover/fetcher_new.py b/app/discover/fetcher_new.py
deleted file mode 100644
index f545554..0000000
--- a/app/discover/fetcher_new.py
+++ /dev/null
@@ -1,30 +0,0 @@
-###############################################################################
-# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
-# and others #
-# #
-# All rights reserved. This program and the accompanying materials #
-# are made available under the terms of the Apache License, Version 2.0 #
-# which accompanies this distribution, and is available at #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-###############################################################################
-from discover.fetcher import Fetcher
-##old stuff
-class FetchHostObjectTypes(Fetcher):
-
-
- def get(self, parent):
- ret = {
- "type": "host_object_type",
- "id": "",
- "parent": parent,
- "rows": [
- {"id": "instances_root", "text": "Instances", "descendants": 1},
- {"id": "networks_root", "text": "Networks", "descendants": 1},
- {"id": "pnics_root", "text": "pNICs", "descendants": 1},
- {"id": "vservices_root", "text": "vServices", "descendants": 1}
- ]
- }
- return ret
-
- ## old/moved
-
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
index 275a3e8..c77b22a 100644
--- a/app/discover/fetchers/cli/cli_access.py
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -12,6 +12,7 @@ import time
from discover.fetcher import Fetcher
from utils.binary_converter import BinaryConverter
+from utils.cli_dist_translator import CliDistTranslator
from utils.logging.console_logger import ConsoleLogger
from utils.ssh_conn import SshConn
@@ -41,11 +42,16 @@ class CliAccess(BinaryConverter, Fetcher):
def run(self, cmd, ssh_to_host="", enable_cache=True, on_gateway=False,
ssh=None, use_sudo=True):
ssh_conn = ssh if ssh else SshConn(ssh_to_host)
- if use_sudo and not cmd.strip().startswith("sudo "):
- cmd = "sudo " + cmd
- if not on_gateway and ssh_to_host \
- and not ssh_conn.is_gateway_host(ssh_to_host):
- cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+ commands = self.adapt_cmd_to_env(ssh_conn, cmd, use_sudo, on_gateway,
+ ssh_to_host)
+ out = ''
+ for c in commands:
+ out += self.run_single_command(c, ssh_conn, ssh_to_host,
+ enable_cache=enable_cache)
+ return out
+
+ def run_single_command(self, cmd, ssh_conn, ssh_to_host="",
+ enable_cache=True):
curr_time = time.time()
cmd_path = ssh_to_host + ',' + cmd
if enable_cache and cmd_path in self.cached_commands:
@@ -73,9 +79,44 @@ class CliAccess(BinaryConverter, Fetcher):
ret = out.splitlines()
# if split by whitespace did not work, try splitting by "\\n"
if len(ret) == 1:
- ret = [l for l in out.split("\\n") if l != ""]
+ ret = [line for line in out.split("\\n") if line != ""]
return ret
+ MULTI_COMMAND_SEPARATOR = ';;;'
+
+ @staticmethod
+ def handle_split_cmd(cmd: str):
+ if CliAccess.MULTI_COMMAND_SEPARATOR in cmd:
+ return cmd.split(CliAccess.MULTI_COMMAND_SEPARATOR)
+ return [cmd]
+
+ def adapt_cmd_to_env(self, ssh_conn, cmd, use_sudo, on_gateway,
+ ssh_to_host):
+ cmd = self.adapt_cmd_to_dist(cmd)
+ commands = self.handle_split_cmd(cmd)
+ return [self.adapt_cmd_to_environment(c, use_sudo, on_gateway,
+ ssh_to_host, ssh_conn)
+ for c in commands]
+
+ def adapt_cmd_to_environment(self, cmd, use_sudo, on_gateway, ssh_to_host,
+ ssh_conn):
+ if self.configuration.environment["distribution"] == "Mercury":
+ use_sudo = False
+ if use_sudo and not cmd.strip().startswith("sudo "):
+ cmd = "sudo " + cmd
+ if not on_gateway and ssh_to_host \
+ and not ssh_conn.is_gateway_host(ssh_to_host):
+ cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+ return cmd
+
+ def adapt_cmd_to_dist(self, cmd):
+ env_conf = self.configuration.get_env_config()
+ dist = env_conf.get('distribution')
+ dist_version = env_conf.get('distribution_version')
+ translator = CliDistTranslator(dist, dist_version=dist_version)
+ cmd = translator.translate(cmd)
+ return cmd
+
# parse command output columns separated by whitespace
# since headers can contain whitespace themselves,
# it is the caller's responsibility to provide the headers
@@ -126,7 +167,8 @@ class CliAccess(BinaryConverter, Fetcher):
content[headers[i]] = content_parts[i]
return content
- def merge_ws_spillover_lines(self, lines):
+ @staticmethod
+ def merge_ws_spillover_lines(lines):
# with WS-separated output, extra output sometimes spills to next line
# detect that and add to the end of the previous line for our procesing
pending_line = None
@@ -156,7 +198,8 @@ class CliAccess(BinaryConverter, Fetcher):
- header_regexp: regexp marking the start of the section
- end_regexp: regexp marking the end of the section
"""
- def get_section_lines(self, lines, header_regexp, end_regexp):
+ @staticmethod
+ def get_section_lines(lines, header_regexp, end_regexp):
if not lines:
return []
header_re = re.compile(header_regexp)
@@ -196,7 +239,8 @@ class CliAccess(BinaryConverter, Fetcher):
if 'name' not in o and 'default' in regexp_tuple:
o[name] = regexp_tuple['default']
- def find_matching_regexps(self, o, line, regexps):
+ @staticmethod
+ def find_matching_regexps(o, line, regexps):
for regexp_tuple in regexps:
name = regexp_tuple['name']
regex = regexp_tuple['re']
diff --git a/app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py
new file mode 100644
index 0000000..77f149f
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py
@@ -0,0 +1,134 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from collections import deque
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchBondHostPnics(CliAccess):
+ BOND_DIR = '/proc/net/bonding/'
+ SLAVE_INTERFACE_HEADER = 'Slave Interface: '
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, parent_id: str):
+ self.log.info('CliFetchBondHostPnics: checking under {}'
+ .format(parent_id))
+ host_id = parent_id[:parent_id.rindex('-')]
+ cmd = 'ls -1 {} 2>&1'.format(self.BOND_DIR)
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error('CliFetchBondHostPnics: host not found: ' + host_id)
+ return []
+ host_types = host['host_type']
+ if 'Network' not in host_types and 'Compute' not in host_types:
+ return []
+ lines = self.run_fetch_lines(cmd, host_id)
+ if lines and 'No such file or directory' in lines[0]:
+ return [] # no bonds so directory does not exist
+ bonds = []
+ for line in lines:
+ bond = self.get_bond_details(host_id, line)
+ if bond:
+ bonds.append(bond)
+ return bonds
+
+ def get_bond_details(self, host_id: str, interface_name: str) -> dict:
+ lines = self.run_fetch_lines('cat {}{}'
+ .format(self.BOND_DIR, interface_name),
+ host_id)
+ status, mac_address = \
+ self.get_bond_status_and_mac_address(host_id, interface_name)
+ interface_id = '{}-{}'.format(interface_name, mac_address)
+ interface = {
+ 'host': host_id,
+ 'name': interface_name,
+ 'id': interface_id,
+ 'local_name': interface_name,
+ 'mac_address': mac_address,
+ 'Link detected': 'yes' if status == 'up' else 'no',
+ 'EtherChannel': True,
+ 'EtherChannel Master': '',
+ 'members': {}
+ }
+ # keep stack of info objects to support multi-level info
+ info_objects = deque([interface])
+ for line in [line for line in lines if line != '']:
+ if line.startswith(self.SLAVE_INTERFACE_HEADER):
+ name = line[line.index(':')+1:].strip()
+ slave = {
+ 'name': name,
+ 'EtherChannel Master': interface_id
+ }
+ # remove any pending info objects, keep only interface
+ info_objects = deque([interface])
+ info_objects.append(slave)
+ interface['members'][name] = slave
+ elif line.rstrip(':').lower().endswith('info'):
+ # move to lower level info object
+ info_name = line.rstrip(':')
+ upper_info_obj = info_objects[-1]
+ info_obj = {}
+ upper_info_obj[info_name] = info_obj
+ info_objects.append(info_obj)
+ else:
+ self.get_attribute_from_line(info_objects[-1], line)
+ for slave in list(interface['members'].values()):
+ self.set_slave_host_pnic_bond_attributes(host_id, slave,
+ interface_id)
+ return interface
+
+ def get_bond_status_and_mac_address(self, host_id: str, name: str):
+ output = self.run_fetch_lines('ip link show {}'.format(name), host_id)
+ status_line = output[0]
+ status = status_line[status_line.index(' state ') + len(' state '):]
+ status = status[:status.index(' ')]
+ matches = [line.strip() for line in output if 'link/ether' in line]
+ if not matches:
+ self.log.error('Failed to find line with MAC address '
+ 'for bond {} (host: {})'
+ .format(name, host_id))
+ tokens = matches[0].split()
+ if len(tokens) < 2:
+ self.log.error('Failed to find MAC address in line: {}'
+ .format(matches[0]))
+ mac_address = tokens[1]
+ return status.lower(), mac_address
+
+ def get_attribute_from_line(self, obj: dict, line: str):
+ if ':' not in line:
+ self.log.error('object {}: failed to find ":" in line: {}'
+ .format(obj['name'], line))
+ return
+ attr = line[:line.index(':')]
+ value = line[len(attr)+1:]
+ obj[attr.strip()] = value.strip()
+
+ def set_slave_host_pnic_bond_attributes(self, host, slave, interface_id):
+ pnic = self.inv.find_one({
+ 'environment': self.get_env(),
+ 'host': host,
+ 'type': 'host_pnic',
+ 'name': slave['name']
+ })
+ if not pnic:
+ self.log.error('unable to find slave pNIC {} under bond {}'
+ .format(slave_id, interface_id))
+ return
+ mac_address = pnic['mac_address']
+ slave_id = '{}-{}'.format(slave.get('name', ''), mac_address)
+ slave['mac_address'] = mac_address
+ slave['id'] = slave_id
+ pnic['EtherChannel'] = True
+ pnic['EtherChannel Master'] = interface_id
+ self.inv.set(pnic)
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
index 5df4d3b..4af3ebc 100644
--- a/app/discover/fetchers/cli/cli_fetch_host_pnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -67,21 +67,17 @@ class CliFetchHostPnics(CliAccess):
tokens = None
if interface is None:
tokens = line.split()
- name = tokens[0].strip('- :')
- name = name.strip()
- if name == interface_name:
- line_remainder = line.strip('-')[len(interface_name)+2:]
- line_remainder = line_remainder.strip(' :')
- id = interface_name
- interface = {
- "host": host_id,
- "name": id,
- "local_name": interface_name,
- "lines": []
- }
- self.handle_line(interface, line_remainder)
- if '<UP,' in line:
- status_up = True
+ line_remainder = line.strip('-')[len(interface_name)+2:]
+ line_remainder = line_remainder.strip(' :')
+ interface = {
+ "host": host_id,
+ "name": interface_name,
+ "local_name": interface_name,
+ "lines": []
+ }
+ self.handle_line(interface, line_remainder)
+ if '<UP,' in line:
+ status_up = True
if status_up is None:
if tokens is None:
tokens = line.split()
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservice.py b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
index 9f8173f..ae7c656 100644
--- a/app/discover/fetchers/cli/cli_fetch_host_vservice.py
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
@@ -31,35 +31,37 @@ class CliFetchHostVservice(CliAccess, DbAccess):
def set_details(self, host_id, r):
# keep the index without prefix
id_full = r["local_service_id"].strip()
- prefix = id_full[1:id_full.index('-')]
- id_clean = id_full[id_full.index('-') + 1:]
- r["service_type"] = prefix
- name = self.get_router_name(r, id_clean) if prefix == "router" \
+ prefix = id_full[:id_full.index('-')]
+ id_clean = id_full[len(prefix)+1:]
+ r["service_type"] = prefix[1:]
+ name = self.get_router_name(r, id_clean) \
+ if r["service_type"] == "router" \
else self.get_network_name(id_clean)
r["name"] = prefix + "-" + name
r["host"] = host_id
- r["id"] = host_id + "-" + id_full
+ r["id"] = "{}-{}".format(host_id, id_full)
self.set_agent_type(r)
- def get_network_name(self, id):
+ def get_network_name(self, network_id):
query = """
SELECT name
FROM {}.networks
WHERE id = %s
""".format(self.neutron_db)
- results = self.get_objects_list_for_id(query, "router", id)
+ results = self.get_objects_list_for_id(query, "router", network_id)
if not list(results):
- return id
+ return network_id
for db_row in results:
return db_row["name"]
- def get_router_name(self, r, id):
+ def get_router_name(self, r, router_id):
query = """
SELECT *
FROM {}.routers
WHERE id = %s
""".format(self.neutron_db)
- results = self.get_objects_list_for_id(query, "router", id.strip())
+ results = self.get_objects_list_for_id(query, "router",
+ router_id.strip())
for db_row in results:
r.update(db_row)
return r["name"]
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservices.py b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
index 9b62dcb..b9496bc 100644
--- a/app/discover/fetchers/cli/cli_fetch_host_vservices.py
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
@@ -19,7 +19,7 @@ class CliFetchHostVservices(CliFetchHostVservice):
if "Network" not in host["host_type"]:
return []
services_ids = [l[:l.index(' ')] if ' ' in l else l
- for l in self.run_fetch_lines("ip netns", host_id)]
+ for l in self.run_fetch_lines("ip netns list", host_id)]
results = [{"local_service_id": s} for s in services_ids if self.type_re.match(s)]
for r in results:
self.set_details(host_id, r)
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
index 4de1840..bb1e7fc 100644
--- a/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
@@ -58,7 +58,7 @@ class CliFetchInstanceVnicsBase(CliAccess):
def set_vnic_properties(self, v, instance):
v["name"] = self.get_vnic_name(v, instance)
- v["id"] = v["name"]
+ v["id"] = "{}-{}".format(instance["host"], v["name"])
v["vnic_type"] = "instance_vnic"
v["host"] = instance["host"]
v["instance_id"] = instance["id"]
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
index d10d99e..239ecd7 100644
--- a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -42,7 +42,7 @@ class CliFetchVserviceVnics(CliAccess):
return []
if "Network" not in host["host_type"]:
return []
- lines = self.run_fetch_lines("ip netns", host_id)
+ lines = self.run_fetch_lines("ip netns list", host_id)
ret = []
for l in [l for l in lines
if l.startswith("qdhcp") or l.startswith("qrouter")]:
@@ -68,7 +68,7 @@ class CliFetchVserviceVnics(CliAccess):
current = None
else:
line_remainder = matches.group(2)
- vservice_id = host + "-" + service
+ master_parent_id = "{}-{}".format(host, service)
current = {
"id": host + "-" + name,
"type": "vnic",
@@ -76,9 +76,9 @@ class CliFetchVserviceVnics(CliAccess):
"host": host,
"name": name,
"master_parent_type": "vservice",
- "master_parent_id": vservice_id,
+ "master_parent_id": master_parent_id,
"parent_type": "vnics_folder",
- "parent_id": vservice_id + "-vnics",
+ "parent_id": "{}-vnics".format(master_parent_id),
"parent_text": "vNICs",
"lines": []
}
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
index 49fdb5e..64d7372 100644
--- a/app/discover/fetchers/db/db_access.py
+++ b/app/discover/fetchers/db/db_access.py
@@ -7,6 +7,7 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+import functools
import mysql.connector
from discover.configuration import Configuration
@@ -15,6 +16,24 @@ from discover.scan_error import ScanError
from utils.string_utils import jsonify
+def with_cursor(method):
+ @functools.wraps(method)
+ def wrap(self, *args, **kwargs):
+ self.connect_to_db(DbAccess.query_count_per_con >= 25)
+ DbAccess.query_count_per_con += 1
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ res = method(self, *args, cursor=cursor, **kwargs)
+ DbAccess.conn.commit()
+ return res
+ except:
+ DbAccess.conn.rollback()
+ raise
+ finally:
+ cursor.close()
+ return wrap
+
+
class DbAccess(Fetcher):
conn = None
query_count_per_con = 0
@@ -47,10 +66,9 @@ class DbAccess(Fetcher):
return
DbAccess.query_count_per_con = 0
- @staticmethod
- def get_neutron_db_name():
+ @with_cursor
+ def get_neutron_db_name(self, cursor=None):
# check if DB schema 'neutron' exists
- cursor = DbAccess.conn.cursor(dictionary=True)
cursor.execute('SHOW DATABASES')
matches = [row.get('Database', '') for row in cursor
if 'neutron' in row.get('Database', '')]
@@ -68,6 +86,8 @@ class DbAccess(Fetcher):
self.log.info("DbAccess: ****** forcing reconnect, " +
"query count: %s ******",
DbAccess.query_count_per_con)
+ DbAccess.conn.commit()
+ DbAccess.conn.close()
DbAccess.conn = None
self.conf = self.config.get("mysql")
cnf = self.conf
@@ -76,16 +96,15 @@ class DbAccess(Fetcher):
cnf["user"], cnf["pwd"],
cnf["schema"])
- def get_objects_list_for_id(self, query, object_type, id):
- self.connect_to_db(DbAccess.query_count_per_con >= 25)
- DbAccess.query_count_per_con += 1
+ @with_cursor
+ def get_objects_list_for_id(self, query, object_type, object_id,
+ cursor=None):
self.log.debug("query count: %s, running query:\n%s\n",
str(DbAccess.query_count_per_con), query)
- cursor = DbAccess.conn.cursor(dictionary=True)
try:
- if id:
- cursor.execute(query, [str(id)])
+ if object_id:
+ cursor.execute(query, [str(object_id)])
else:
cursor.execute(query)
except (AttributeError, mysql.connector.errors.OperationalError) as e:
@@ -93,13 +112,13 @@ class DbAccess(Fetcher):
self.connect_to_db(True)
# try again to run the query
cursor = DbAccess.conn.cursor(dictionary=True)
- if id:
- cursor.execute(query, [str(id)])
+ if object_id:
+ cursor.execute(query, [str(object_id)])
else:
cursor.execute(query)
rows = []
- for row in cursor:
+ for row in cursor.fetchall():
rows.append(row)
return rows
diff --git a/app/discover/fetchers/db/db_fetch_host_network_agents.py b/app/discover/fetchers/db/db_fetch_host_network_agents.py
index c323573..7d415f2 100644
--- a/app/discover/fetchers/db/db_fetch_host_network_agents.py
+++ b/app/discover/fetchers/db/db_fetch_host_network_agents.py
@@ -27,9 +27,8 @@ class DbFetchHostNetworkAgents(DbAccess):
host_id = id[:-1 * len("-network_agents")]
results = self.get_objects_list_for_id(query, "network_agent", host_id)
mechanism_drivers = self.env_config['mechanism_drivers']
- id_prefix = mechanism_drivers[0] if mechanism_drivers else 'network_agent'
for o in results:
o["configurations"] = json.loads(o["configurations"])
o["name"] = o["binary"]
- o['id'] = id_prefix + '-' + o['id']
+ o['id'] = o['name'] + '-' + o['id']
return results
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
index 3e3f4e1..f7eb8bd 100644
--- a/app/discover/fetchers/db/db_fetch_oteps.py
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -35,7 +35,9 @@ class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
table_name = "{}.ml2_{}_endpoints".format(self.neutron_db, tunnel_type)
env_config = self.config.get_env_config()
distribution = env_config["distribution"]
- if distribution == "Canonical-icehouse":
+ distribution_version = env_config["distribution_version"]
+ dist_ver = "{}-{}".format(distribution, distribution_version)
+ if dist_ver == "Canonical-icehouse":
# for Icehouse, we only get IP address from the DB, so take the
# host IP address and from the host data in Mongo
host = self.inv.get_by_id(self.get_env(), host_id)
diff --git a/app/discover/fetchers/db/db_fetch_vedges_ovs.py b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
index 838ccb9..f516d10 100644
--- a/app/discover/fetchers/db/db_fetch_vedges_ovs.py
+++ b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
@@ -24,8 +24,8 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
self.port_line_header_prefix = " " * 8 + "Port "
- def get(self, id):
- host_id = id[:id.rindex('-')]
+ def get(self, parent_id):
+ host_id = parent_id[:parent_id.rindex('-')]
results = self.get_objects_list_for_id(
"""
SELECT *
@@ -66,11 +66,11 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
if not port_matches:
continue
port = {}
- id = port_matches.group(1)
+ port_id = port_matches.group(1)
name = port_matches.group(2)
is_internal = port_matches.group(3) == " (internal)"
port["internal"] = is_internal
- port["id"] = id
+ port["id"] = port_id
port["name"] = name
ports[name] = port
return ports
@@ -106,7 +106,7 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
if "tunneling_ip" not in doc["configurations"]:
return {}
if not doc["configurations"]["tunneling_ip"]:
- self.get_bridge_pnic(doc)
+ self.get_pnics(doc)
return {}
# read the 'br-tun' interface ports
@@ -148,31 +148,48 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
tunnel_ports[port["name"]] = port
return tunnel_ports
- def get_bridge_pnic(self, doc):
- conf = doc["configurations"]
- if "bridge_mappings" not in conf or not conf["bridge_mappings"]:
- return
- for v in conf["bridge_mappings"].values(): br = v
- ifaces_list_lines = self.run_fetch_lines("ovs-vsctl list-ifaces " + br,
- doc["host"])
- br_pnic_postfix = br + "--br-"
- interface = ""
+ def get_pnics(self, vedge) -> dict:
+ bridges = vedge["configurations"].get("bridge_mappings", {})
+ pnics = {}
+ for bridge in bridges.values():
+ self.get_bridge_pnic(pnics, vedge, bridge)
+ return pnics
+
+ MIRANTIS_DIST = "Mirantis"
+
+ def get_bridge_pnic(self, pnics: dict, vedge: dict, bridge: dict):
+ cmd = "ovs-vsctl list-ifaces {}".format(bridge)
+ ifaces_list_lines = self.run_fetch_lines(cmd, vedge["host"])
+ env_config = self.configuration.get_env_config()
+ distribution = env_config.get("distribution")
+ dist_version = env_config.get("distribution_version")
+ use_br_postfix = distribution == self.MIRANTIS_DIST and \
+ dist_version in ["6.0", "7.0", "8.0"]
for l in ifaces_list_lines:
- if l.startswith(br_pnic_postfix):
- interface = l[len(br_pnic_postfix):]
- break
- if not interface:
- return
- doc["pnic"] = interface
+ if use_br_postfix:
+ br_pnic_postfix = "{}--br-".format(bridge)
+ interface = l[len(br_pnic_postfix):] \
+ if l.startswith(br_pnic_postfix) \
+ else ""
+ else:
+ interface = l
+ if interface:
+ pnic = self.find_pnic_for_interface(vedge, interface)
+ if pnic:
+ pnics[pnic["name"]] = pnic
+
+ def find_pnic_for_interface(self, vedge, interface):
# add port ID to pNIC
pnic = self.inv.find_items({
"environment": self.get_env(),
"type": "host_pnic",
- "host": doc["host"],
+ "host": vedge["host"],
"name": interface
}, get_single=True)
if not pnic:
return
- port = doc["ports"][interface]
- pnic["port_id"] = port["id"]
+ vedge["pnic"] = interface
+ port = vedge["ports"].get(interface, {})
+ pnic["port_id"] = port.get("id", "")
self.inv.set(pnic)
+ return pnic
diff --git a/app/discover/link_finders/__init__.py b/app/discover/link_finders/__init__.py
index e69de29..1e85a2a 100644
--- a/app/discover/link_finders/__init__.py
+++ b/app/discover/link_finders/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/link_finders/find_links_for_pnics.py b/app/discover/link_finders/find_links_for_pnics.py
index 1f02426..94eba7b 100644
--- a/app/discover/link_finders/find_links_for_pnics.py
+++ b/app/discover/link_finders/find_links_for_pnics.py
@@ -41,6 +41,18 @@ class FindLinksForPnics(FindLinks):
def add_pnic_network_links(self, pnic):
host = pnic["host"]
+ if self.configuration.get_env_config()['type_drivers'] == "vlan":
+ # take this pnic only if we can find matching vedge-pnic links
+ matches = self.inv.find({
+ "environment": self.get_env(),
+ "link_type": "vedge-host_pnic",
+ "host": host,
+ "target_id": pnic["id"]},
+ projection={"_id": 1},
+ collection="links",
+ get_single=True)
+ if not matches:
+ return
# find ports for that host, and fetch just the network ID
ports = self.inv.find_items({
"environment": self.get_env(),
diff --git a/app/discover/link_finders/find_links_for_vconnectors.py b/app/discover/link_finders/find_links_for_vconnectors.py
index edb351a..0703cd8 100644
--- a/app/discover/link_finders/find_links_for_vconnectors.py
+++ b/app/discover/link_finders/find_links_for_vconnectors.py
@@ -31,7 +31,8 @@ class FindLinksForVconnectors(FindLinks):
is_ovs = mechanism_drivers and mechanism_drivers[0] == 'OVS'
if is_ovs:
# interface ID for OVS
- vnic = self.inv.get_by_id(self.get_env(), interface_name)
+ vnic_id = "{}-{}".format(vconnector["host"], interface_name)
+ vnic = self.inv.get_by_id(self.get_env(), vnic_id)
else:
# interface ID for VPP - match interface MAC address to vNIC MAC
interface = vconnector['interfaces'][interface_name]
diff --git a/app/discover/link_finders/find_links_for_vservice_vnics.py b/app/discover/link_finders/find_links_for_vservice_vnics.py
index ca9bc4a..f975c92 100644
--- a/app/discover/link_finders/find_links_for_vservice_vnics.py
+++ b/app/discover/link_finders/find_links_for_vservice_vnics.py
@@ -33,11 +33,6 @@ class FindLinksForVserviceVnics(FindLinks):
host = self.inv.get_by_id(self.get_env(), v["host"])
if "Network" not in host["host_type"]:
return
- if "network" not in v:
- return
- network = self.inv.get_by_id(self.get_env(), v["network"])
- if network == []:
- return
vservice_id = v["parent_id"]
vservice_id = vservice_id[:vservice_id.rindex('-')]
vservice = self.inv.get_by_id(self.get_env(), vservice_id)
@@ -46,7 +41,14 @@ class FindLinksForVserviceVnics(FindLinks):
target = v["_id"]
target_id = v["id"]
link_type = "vservice-vnic"
- link_name = network["name"]
+ extra_attributes = None
+ if "network" in v:
+ network = self.inv.get_by_id(self.get_env(), v["network"])
+ link_name = network["name"]
+ extra_attributes = {'network': v['network']}
+ else:
+ link_name = "{}-{}".format(vservice["object_name"],
+ v["object_name"])
state = "up" # TBD
link_weight = 0 # TBD
self.create_link(self.get_env(),
@@ -54,4 +56,4 @@ class FindLinksForVserviceVnics(FindLinks):
target, target_id,
link_type, link_name, state, link_weight,
host=v["host"],
- extra_attributes={'network': v['network']})
+ extra_attributes=extra_attributes)
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
index d1323bd..1fbcc68 100644
--- a/app/discover/scanner.py
+++ b/app/discover/scanner.py
@@ -240,7 +240,7 @@ class Scanner(Fetcher):
run_app_path = conf.get('run_app_path', '')
if not run_app_path:
run_app_path = conf.get('app_path', '/etc/calipso')
- return run_app_path
+ return run_app_path
def load_scanners_metadata(self):
parser = ScanMetadataParser(self.inv)
diff --git a/app/install/calipso-installer.py b/app/install/calipso-installer.py
index 523a838..c2b8579 100644
--- a/app/install/calipso-installer.py
+++ b/app/install/calipso-installer.py
@@ -176,6 +176,10 @@ def start_mongo(dbport, copy):
copy_file("scheduled_scans")
copy_file("statistics")
copy_file("supported_environments")
+ copy_file("connection_tests")
+ copy_file("api_tokens")
+ copy_file("user_settings")
+ copy_file("apex_environment_config")
# note : 'messages', 'roles', 'users' and some of the 'constants'
# are filled by calipso-ui at runtime
@@ -216,14 +220,14 @@ def start_ldap():
volumes=calipso_volume)
-def start_api():
+def start_api(apiport):
name = "calipso-api"
if container_started(name):
return
print("\nstarting container {}...\n".format(name))
image_name = "korenlev/calipso:api"
download_image(image_name)
- api_ports = {'8000/tcp': 8000, '22/tcp': 40022}
+ api_ports = {'8000/tcp': apiport, '22/tcp': 40022}
DockerClient.containers.run(image_name,
detach=True,
name=name,
@@ -252,15 +256,15 @@ def start_scan():
volumes=calipso_volume)
-def start_sensu():
+def start_sensu(uchiwaport, sensuport, rabbitport, rabbitmport):
name = "calipso-sensu"
if container_started(name):
return
print("\nstarting container {}...\n".format(name))
image_name = "korenlev/calipso:sensu"
download_image(image_name)
- sensu_ports = {'22/tcp': 20022, '3000/tcp': 3000, '4567/tcp': 4567,
- '5671/tcp': 5671, '15672/tcp': 15672}
+ sensu_ports = {'22/tcp': 20022, '3000/tcp': uchiwaport, '4567/tcp': sensuport,
+ '5671/tcp': rabbitport, '15672/tcp': rabbitmport}
DockerClient.containers.run(image_name,
detach=True,
name=name,
@@ -326,6 +330,36 @@ parser.add_argument("--dbport",
type=int,
default="27017",
required=False)
+parser.add_argument("--apiport",
+ help="Port for the Calipso API "
+ "(default=8000)",
+ type=int,
+ default="8000",
+ required=False)
+parser.add_argument("--uchiwaport",
+ help="Port for the Calipso Uchiwa "
+ "(default=3000)",
+ type=int,
+ default="3000",
+ required=False)
+parser.add_argument("--rabbitmport",
+ help="Port for the Calipso Sensu RabbitMQ Managment "
+ "(default=15672)",
+ type=int,
+ default="15672",
+ required=False)
+parser.add_argument("--sensuport",
+ help="Port for the Calipso Sensu-api "
+ "(default=4567)",
+ type=int,
+ default="4567",
+ required=False)
+parser.add_argument("--rabbitport",
+ help="Port for the Calipso Sensu RabbitMQ "
+ "(default=5671)",
+ type=int,
+ default="5671",
+ required=False)
parser.add_argument("--dbuser",
help="User for the Calipso MongoDB "
"(default=calipso)",
@@ -339,14 +373,14 @@ parser.add_argument("--dbpassword",
default="calipso_default",
required=False)
parser.add_argument("--command",
- help="'start-all' or 'stop-all' the calipso containers "
+ help="'start-all' or 'stop-all' the Calipso containers "
"(default=None)",
type=str,
default=None,
required=False)
parser.add_argument("--copy",
help="'c' to copy json files from 'db' folder to mongoDB, 'q' to skip copy of files "
- "(default=q)",
+ "(default=None)",
type=str,
default=None,
required=False)
@@ -382,9 +416,10 @@ if action == "start":
calipso_mongo_access_text = \
"server {}\n" \
"user {}\n" \
+ "port {}\n" \
"pwd {}\n" \
"auth_db calipso" \
- .format(args.hostname, args.dbuser, args.dbpassword)
+ .format(args.hostname, args.dbuser, args.dbport, args.dbpassword)
LDAP_PWD_ATTRIBUTE = "password password"
LDAP_USER_PWD_ATTRIBUTE = "userpassword"
ldap_text = \
@@ -421,13 +456,13 @@ if action == "start":
start_ldap()
time.sleep(1)
if container == "calipso-api" or container == "all":
- start_api()
+ start_api(args.apiport)
time.sleep(1)
if container == "calipso-scan" or container == "all":
start_scan()
time.sleep(1)
if container == "calipso-sensu" or container == "all":
- start_sensu()
+ start_sensu(args.uchiwaport, args.sensuport, args.rabbitport, args.rabbitmport)
time.sleep(1)
if container == "calipso-ui" or container == "all":
start_ui(args.hostname, args.dbuser, args.dbpassword, args.webport,
diff --git a/app/install/configure/setup_apex_environment.py b/app/install/configure/setup_apex_environment.py
new file mode 100644
index 0000000..7dc49c5
--- /dev/null
+++ b/app/install/configure/setup_apex_environment.py
@@ -0,0 +1,568 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC
+from logging.handlers import WatchedFileHandler
+import argparse
+import json
+import logging
+import re
+import shlex
+import subprocess
+import sys
+
+
+def run_command(cmd, raise_on_error=False) -> str:
+ try:
+ output = subprocess.check_output([cmd], shell=True)
+ return output.decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ error_msg = 'Error running command: {}, output: {}'\
+ .format(cmd, e.output.decode('utf-8'))
+ if raise_on_error:
+ raise RuntimeError(error_msg)
+ return msg
+
+
+class Logger(ABC):
+ DEBUG = 'DEBUG'
+ INFO = 'INFO'
+ WARNING = 'WARNING'
+ ERROR = 'ERROR'
+ CRITICAL = 'CRITICAL'
+
+ PROJECT_NAME = 'Calipso'
+
+ levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL]
+ log_format = '%(asctime)s %(levelname)s: %(message)s'
+ formatter = logging.Formatter(log_format)
+ default_level = INFO
+
+ def __init__(self, logger_name: str = PROJECT_NAME,
+ level: str = default_level):
+ super().__init__()
+ self.check_level(level)
+ self.log = logging.getLogger(logger_name)
+ logging.basicConfig(format=self.log_format,
+ level=level)
+ self.log.propagate = False
+ self.set_loglevel(level)
+ self.env = None
+ self.level = level
+
+ def set_env(self, env):
+ self.env = env
+
+ @staticmethod
+ def check_level(level):
+ if level.upper() not in Logger.levels:
+ raise ValueError('Invalid log level: {}. Supported levels: ({})'
+ .format(level, ", ".join(Logger.levels)))
+
+ @staticmethod
+ def get_numeric_level(loglevel):
+ Logger.check_level(loglevel)
+ numeric_level = getattr(logging, loglevel.upper(), Logger.default_level)
+ if not isinstance(numeric_level, int):
+ raise ValueError('Invalid log level: {}'.format(loglevel))
+ return numeric_level
+
+ def set_loglevel(self, loglevel):
+ # assuming loglevel is bound to the string value obtained from the
+ # command line argument. Convert to upper case to allow the user to
+ # specify --log=DEBUG or --log=debug
+ numeric_level = self.get_numeric_level(loglevel)
+
+ for handler in self.log.handlers:
+ handler.setLevel(numeric_level)
+ self.log.setLevel(numeric_level)
+ self.level = loglevel
+
+ def _log(self, level, message, *args, exc_info=False, **kwargs):
+ self.log.log(level, message, *args, exc_info=exc_info, **kwargs)
+
+ def debug(self, message, *args, **kwargs):
+ self._log(logging.DEBUG, message, *args, **kwargs)
+
+ def info(self, message, *args, **kwargs):
+ self._log(logging.INFO, message, *args, **kwargs)
+
+ def warning(self, message, *args, **kwargs):
+ self._log(logging.WARNING, message, *args, **kwargs)
+
+ def warn(self, message, *args, **kwargs):
+ self.warning(message, *args, **kwargs)
+
+ def error(self, message, *args, **kwargs):
+ self._log(logging.ERROR, message, *args, **kwargs)
+
+ def exception(self, message, *args, **kwargs):
+ self._log(logging.ERROR, message, exc_info=True, *args, **kwargs)
+
+ def critical(self, message, *args, **kwargs):
+ self._log(logging.CRITICAL, message, *args, **kwargs)
+
+ def add_handler(self, handler):
+ handler_defined = handler.__class__ in map(lambda h: h.__class__,
+ self.log.handlers)
+
+ if not handler_defined:
+ handler.setLevel(self.level)
+ handler.setFormatter(self.formatter)
+ self.log.addHandler(handler)
+
+
+class FileLogger(Logger):
+
+ def __init__(self, log_file: str, level: str = Logger.default_level):
+ super().__init__(logger_name="{}-File".format(self.PROJECT_NAME),
+ level=level)
+ self.add_handler(WatchedFileHandler(log_file))
+
+
+class ApexEnvironmentFetcher:
+
+ DEFAULTS = {
+ 'logfile': '/home/calipso/log/apex_environment_fetch.log',
+ 'mongo_config': '/local_dir/calipso_mongo_access.conf',
+ 'config_dir': '/home/calipso/Calipso/app/install/db',
+ 'env': 'Apex-Euphrates',
+ 'loglevel': 'INFO',
+ 'git_repo': 'https://git.opnfv.org/calipso',
+ 'root': False
+ }
+
+ USER_NAME = 'calipso'
+ USER_PWD = 'calipso_default'
+ REPO_LOCAL_NAME = 'Calipso'
+ INSTALLER = 'python3 app/install/calipso-installer.py --command start-all'
+ CONFIG_FILE_NAME = 'apex-configuration.conf'
+ ENV_CONFIG_FILE_NAME = 'apex_environment_config.json'
+ OVERCLOUDRC_FILE = 'overcloudrc.v3'
+ SSH_DIR = '/home/calipso/.ssh'
+ SSH_OPTIONS = '-q -o StrictHostKeyChecking=no'
+ UNDERCLOUD_KEY_FILE = 'uc-id_rsa'
+ UNDERCLOUD_PUBLIC_KEY_FILE = '{}/uc-id_rsa.pub'.format(SSH_DIR)
+ OVERCLOUD_USER = 'heat-admin'
+ OVERCLOUD_KEY_FILE = 'oc-id_rsa'
+ MOUNT_SSH_DIR = '/local_dir/.ssh'
+ OVERCLOUD_KEYSTONE_CONF = 'oc-keystone.conf'
+ OVERCLOUD_ML2_CONF = 'overcloud_ml2_conf.ini'
+ OVERCLOUD_RABBITMQ_CONF = 'overcloud_rabbitmq_conf.ini'
+
+ def __init__(self):
+ self.args = self.get_args()
+ self.log = None
+ self.config_file = '{}/{}'.format(self.args.config_dir,
+ self.CONFIG_FILE_NAME)
+ self.env_config_file = '{}/{}'.format(self.args.config_dir,
+ self.ENV_CONFIG_FILE_NAME)
+ self.undercloud_user = 'root'
+ self.undercloud_host = '192.0.2.1'
+ self.undercloud_key = '{}/{}'.format(self.SSH_DIR,
+ self.UNDERCLOUD_KEY_FILE)
+ self.overcloud_config_file = '{}/{}'\
+ .format(self.args.config_dir, self.OVERCLOUDRC_FILE)
+ self.overcloud_key = '{}/{}'.format(self.SSH_DIR,
+ self.OVERCLOUD_KEY_FILE)
+ self.overcloud_key_container = '{}/{}'.format(self.MOUNT_SSH_DIR,
+ self.OVERCLOUD_KEY_FILE)
+ self.undercloud_ip = None
+ self.overcloud_ip = None
+ self.conf_lines = {}
+ self.env_config = None
+
+ def get_args(self):
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-m', '--mongo_config', nargs='?', type=str,
+ default=self.DEFAULTS['mongo_config'],
+ help='name of config file ' +
+ 'with MongoDB server access details\n'
+ '(Default: {})'
+ .format(self.DEFAULTS['mongo_config']))
+ parser.add_argument('-d', '--config_dir', nargs='?', type=str,
+ default=self.DEFAULTS['config_dir'],
+ help='path to directory with config data\n'
+ '(Default: {})'
+ .format(self.DEFAULTS['config_dir']))
+ parser.add_argument('-a', '--apex', nargs='?', type=str,
+ help='name of environment to Apex host')
+ parser.add_argument('-e', '--env', nargs='?', type=str,
+ default=self.DEFAULTS['env'],
+ help='name of environment to create'
+ '(Default: {})'
+ .format(self.DEFAULTS['env']))
+ parser.add_argument('-l', '--loglevel', nargs='?', type=str,
+ default=self.DEFAULTS['loglevel'],
+ help='logging level \n(default: "{}")'
+ .format(self.DEFAULTS['loglevel']))
+ parser.add_argument('-f', '--logfile', nargs='?', type=str,
+ default=self.DEFAULTS['logfile'],
+ help='log file \n(default: "{}")'
+ .format(self.DEFAULTS['logfile']))
+ parser.add_argument('-g', '--git', nargs='?', type=str,
+ help='URL to clone Git repository\n(default: {})'
+ .format(self.DEFAULTS['git_repo']),
+ default=self.DEFAULTS['git_repo'])
+ parser.add_argument('--root', dest='root', action='store_true')
+ parser.add_argument('--no-root', dest='root', action='store_false')
+ parser.set_defaults(root=False)
+ return parser.parse_args()
+
+ @staticmethod
+ def run_cmd(cmd: str ='', use_sudo=True, as_user=None):
+ sudo_prefix = '' if not use_sudo \
+ else 'sudo {} '.format(as_user if as_user else '')
+ command = '{}{}'.format(sudo_prefix, cmd)
+ output = run_command(cmd=command, raise_on_error=True)
+ return output
+
+ def get_undercloud_ip(self):
+ output = self.run_cmd('ifconfig br-admin')
+ lines = output.splitlines()
+ if not lines or len(lines) < 2:
+ self.log.error('Unable to feth inet address, output: {}'
+ .format(output))
+ return
+ inet_parts = lines[1].split()
+ inet_address = inet_parts[1]
+ return inet_address
+
+ def get_overcloud_ip(self):
+ with open('{}'.format(self.overcloud_config_file)) as rc_file:
+ lines = rc_file.readlines()
+ no_proxy_line = [l for l in lines if 'no_proxy=' in l]
+ no_proxy_line = no_proxy_line[0]
+ value = no_proxy_line[no_proxy_line.index('=')+2:]
+ parts = value.strip().split(',')
+ inet_address = parts[-1]
+ return inet_address
+
+ def set_ssh_dir(self):
+ self.run_cmd('mkdir -p {}'.format(self.SSH_DIR))
+ # will be used to access undercloud VM
+ self.run_cmd('cp /root/.ssh/id_rsa {}'.format(self.undercloud_key))
+ self.run_cmd('cp /root/.ssh/id_rsa.pub {}'
+ .format(self.UNDERCLOUD_PUBLIC_KEY_FILE))
+ self.run_cmd('chown calipso.calipso {}/uc-id_rsa*'.format(self.SSH_DIR))
+ self.copy_undercloud_file('/home/stack/.ssh/id_rsa',
+ local_dir=self.SSH_DIR,
+ local_name=self.OVERCLOUD_KEY_FILE)
+ self.copy_undercloud_file('/home/stack/.ssh/id_rsa.pub',
+ local_dir=self.SSH_DIR,
+ local_name='oc-id_rsa.pub')
+ self.run_cmd('chown calipso.calipso {}/oc-id_rsa*'.format(self.SSH_DIR))
+
+ def copy_undercloud_file(self, file_path, local_dir=None, local_name=None):
+ cmd = 'scp {} -i {} {}@{}:{} {}/{}' \
+ .format(self.SSH_OPTIONS,
+ self.undercloud_key,
+ self.undercloud_user, self.undercloud_host,
+ file_path,
+ local_dir if local_dir else self.args.config_dir,
+ local_name if local_name else '')
+ self.run_cmd(cmd)
+
+ def copy_undercloud_conf_file(self, file_name, local_name=None):
+ self.copy_undercloud_file('/home/stack/{}'.format(file_name),
+ local_name)
+
+ def get_undercloud_setup(self):
+ self.copy_undercloud_conf_file('undercloud.conf')
+ self.copy_undercloud_conf_file('opnfv-environment.yaml')
+ self.copy_undercloud_conf_file('overcloudrc')
+ self.copy_undercloud_conf_file('stackrc')
+ self.copy_undercloud_conf_file('overcloudrc.v3')
+ self.copy_undercloud_conf_file('deploy_command')
+ self.copy_undercloud_conf_file('apex-undercloud-install.log')
+ self.copy_undercloud_conf_file('undercloud-passwords.conf')
+ self.copy_undercloud_file('/etc/keystone/keystone.conf',
+ local_name='uc-keystone.conf')
+ self.run_cmd('mkdir -p {}/deploy_logs'.format(self.args.config_dir))
+ self.copy_undercloud_file('/home/stack/deploy_logs/*',
+ local_name='deploy_logs/')
+
+ def fetch_conf_file(self, file_name, target_file, lines_property=None):
+ conf = \
+ self.run_cmd('ssh -i {} {} {}@{} '
+ 'sudo grep -v "^#" {}'
+ .format(self.overcloud_key,
+ self.SSH_OPTIONS,
+ self.OVERCLOUD_USER,
+ self.overcloud_ip,
+ file_name))
+ conf_file_path = '{}/{}'.format(self.args.config_dir, target_file)
+ if lines_property:
+ self.conf_lines[lines_property] = conf.splitlines()
+ with open(conf_file_path, 'w') as conf_file:
+ conf_file.write(conf)
+
+ def fetch_keystone_conf(self):
+ self.fetch_conf_file('/etc/keystone/keystone.conf',
+ self.OVERCLOUD_KEYSTONE_CONF,
+ lines_property='keystone_conf')
+
+ def fetch_ml2_conf(self):
+ self.fetch_conf_file('/etc/neutron/plugins/ml2/ml2_conf.ini',
+ self.OVERCLOUD_ML2_CONF,
+ lines_property='ml2_conf')
+
+ def fetch_rabbitmq_conf(self):
+ self.fetch_conf_file('/etc/rabbitmq/rabbitmq.config',
+ self.OVERCLOUD_RABBITMQ_CONF,
+ lines_property='rabbitmq_conf')
+
+ def copy_local_file_to_overcloud(self, local_file, remote_file_path,
+ local_dir=None):
+ source_dir = local_dir if local_dir else self.args.config_dir
+ local_file_path = '{}/{}'.format(source_dir, local_file)
+ cmd = 'scp {} -i {} {} {}@{}:{}' \
+ .format(self.SSH_OPTIONS,
+ self.overcloud_key,
+ local_file_path,
+ self.OVERCLOUD_USER, self.overcloud_ip,
+ remote_file_path)
+ self.run_cmd(cmd)
+
+ def get_overcloud_keys(self):
+ remote_ssh_dir = '/home/{}/.ssh'.format(self.OVERCLOUD_USER)
+ remote_private_key = '{}/id_rsa'.format(remote_ssh_dir)
+ self.copy_local_file_to_overcloud(self.OVERCLOUD_KEY_FILE,
+ remote_private_key,
+ local_dir=self.SSH_DIR)
+ public_key = '{}.pub'.format(self.OVERCLOUD_KEY_FILE)
+ remote_public_key = '{}/id_rsa.pub'.format(remote_ssh_dir)
+ self.copy_local_file_to_overcloud(public_key, remote_public_key,
+ local_dir=self.SSH_DIR)
+
+ def get_overcloud_setup(self):
+ self.get_overcloud_keys()
+ self.fetch_keystone_conf()
+ self.fetch_ml2_conf()
+ self.fetch_rabbitmq_conf()
+
+ def get_value_from_file(self, file_attr, attr, regex=None, separator='='):
+ line_prefix = 'export ' if separator == '=' else ''
+ prefix = '{}{}{}'.format(line_prefix, attr, separator)
+ lines = self.conf_lines.get(file_attr, {})
+ matches = [l for l in lines if l.startswith(prefix)]
+ if not matches:
+ self.log.error('failed to find attribute {}'.format(attr))
+ return ''
+ line = matches[0].strip()
+ value = line[line.index(separator)+len(separator):]
+ if not regex:
+ return value
+ matches = re.search(regex, value)
+ if not matches:
+ return ''
+ match = matches.group(1)
+ return match
+
+ def get_value_from_rc_file(self, lines, attr, regex=None):
+ return self.get_value_from_file(lines, attr, regex=regex)
+
+ def get_api_config(self):
+ with open('{}'.format(self.overcloud_config_file)) as rc_file:
+ self.conf_lines['overcloudrc'] = rc_file.readlines()
+ api_config = {
+ 'name': 'OpenStack',
+ 'host': self.overcloud_ip,
+ 'port': self.get_value_from_rc_file('overcloudrc',
+ 'OS_AUTH_URL',
+ regex=':(\d+)/'),
+ 'user': self.get_value_from_rc_file('overcloudrc', 'OS_USERNAME'),
+ 'pwd': self.get_value_from_rc_file('overcloudrc', 'OS_PASSWORD'),
+ 'admin_token': self.get_value_from_file('keystone_conf',
+ 'admin_token',
+ separator=' = ')
+ }
+ return api_config
+
+ def run_command_on_overcloud(self, cmd):
+ output = \
+ self.run_cmd('ssh -i {} {} {}@{} {}'
+ .format(self.overcloud_key,
+ self.SSH_OPTIONS,
+ self.OVERCLOUD_USER,
+ self.overcloud_ip,
+ shlex.quote(cmd)))
+ return output
+
+ def create_mysql_user(self, host, pwd):
+ mysql_file_name = '/tmp/create_user.sql'
+ # create calipso MySQL user with access from jump host to all tables
+ echo_cmd = "echo \"GRANT ALL PRIVILEGES ON *.* " \
+ "TO 'calipso'@'{}' " \
+ "IDENTIFIED BY '{}'; " \
+ "FLUSH PRIVILEGES;\" > {}"\
+ .format(host, pwd, mysql_file_name)
+ self.run_command_on_overcloud(echo_cmd)
+ run_mysql_cmd = 'sudo mysql < {}'.format(mysql_file_name)
+ self.run_command_on_overcloud(run_mysql_cmd)
+ remove_file_cmd = 'rm {}'.format(mysql_file_name)
+ self.run_command_on_overcloud(remove_file_cmd)
+ return pwd
+
+ def get_mysql_config(self):
+ pwd = self.run_cmd('openssl rand -base64 18').strip()
+ self.create_mysql_user(self.undercloud_ip, pwd)
+ pwd = self.create_mysql_user(self.overcloud_ip, pwd)
+ mysql_config = {
+ 'name': 'mysql',
+ 'host': self.overcloud_ip,
+ 'port': '3306',
+ 'user': 'calipso',
+ 'pwd': pwd
+ }
+ return mysql_config
+
+ def get_cli_config(self):
+ return {
+ 'name': 'CLI',
+ 'host': self.overcloud_ip,
+ 'user': self.OVERCLOUD_USER,
+ 'key': self.overcloud_key_container
+ }
+
+ def get_amqp_config(self):
+ user = self.get_value_from_file('rabbitmq_conf',
+ ' {default_user',
+ separator=',',
+ regex='"(.+)"')
+ pwd = self.get_value_from_file('rabbitmq_conf',
+ ' {default_pass',
+ separator=',',
+ regex='"(.+)"')
+ port = self.get_value_from_file('rabbitmq_conf',
+ ' {tcp_listeners',
+ separator=',',
+ regex=', (\d+)')
+ port = int(port)
+ return {
+ 'name': 'AMQP',
+ 'host': self.overcloud_ip,
+ 'port': port,
+ 'user': user,
+ 'pwd': pwd
+ }
+
+ def get_monitoring_config(self):
+ return {
+ 'name': 'Monitoring',
+ 'config_folder': '/local_dir/sensu_config',
+ 'env_type': 'production',
+ 'rabbitmq_port': '5671',
+ 'rabbitmq_user': 'sensu',
+ 'server_ip': self.undercloud_ip,
+ 'server_name': 'sensu_server',
+ 'type': 'Sensu',
+ 'provision': 'None',
+ 'ssh_port': '20022',
+ 'ssh_user': 'root',
+ 'ssh_password': 'osdna',
+ 'api_port': 4567,
+ 'rabbitmq_pass': 'osdna'
+ }
+
+ def prepare_env_configuration_array(self):
+ config_array = [
+ self.get_api_config(),
+ self.get_mysql_config(),
+ self.get_cli_config(),
+ self.get_amqp_config(),
+ self.get_monitoring_config()
+ ]
+ self.env_config['configuration'] = config_array
+
+ UI_USER = 'wNLeBJxNDyw8G7Ssg'
+
+ def add_env_ui_conf(self):
+ self.env_config.update({
+ 'user': self.UI_USER,
+ 'auth': {
+ 'view-env': [self.UI_USER],
+ 'edit-env': [self.UI_USER]
+ }
+ })
+
+ def get_mechanism_driver(self):
+ driver = self.get_value_from_file('ml2_conf', 'mechanism_drivers',
+ separator=' =')
+ return 'OVS' if driver == 'openvswitch' else driver
+
+ def set_env_level_attributes(self):
+ self.env_config.update({
+ 'distribution': 'Apex',
+ 'distribution_version': 'Euphrates',
+ 'type_drivers': self.get_value_from_file('ml2_conf',
+ 'tenant_network_types',
+ separator=' = '),
+ 'mechanism_drivers': [self.get_mechanism_driver()],
+ "operational": "running",
+ "scanned": False,
+ "type": "environment",
+ "app_path": "/home/scan/calipso_prod/app",
+ "listen": True,
+ "enable_monitoring": True,
+ "aci_enabled": False,
+ "last_scanned": "",
+ "monitoring_setup_done": False
+ })
+
+ def prepare_env_config(self):
+ self.prepare_env_configuration_array()
+ self.set_env_level_attributes()
+ self.add_env_ui_conf()
+ config_dump = json.dumps(self.env_config, sort_keys=True, indent=4,
+ separators=(',', ': '))
+ with open(self.env_config_file, 'w') as config_file:
+ config_file.write(config_dump)
+
+ def setup_environment_config(self, config_file):
+ self.run_cmd('mkdir -p {}'.format(self.args.config_dir))
+ self.env_config = {'name': self.args.env}
+ self.undercloud_ip = self.get_undercloud_ip()
+ config_file.write('jumphost_admin_ip {}\n'.format(self.undercloud_ip))
+ self.set_ssh_dir()
+ self.get_undercloud_setup()
+ self.overcloud_ip = self.get_overcloud_ip()
+ config_file.write('overcloud_admin_ip {}\n'.format(self.overcloud_ip))
+ self.get_overcloud_setup()
+ # now get correct IP of overcloud from RabbitMQ setup
+ self.overcloud_ip = self.get_value_from_file('rabbitmq_conf',
+ ' {tcp_listeners',
+ regex='"(.*)"',
+ separator=',')
+ self.prepare_env_config()
+
+ def get(self):
+ try:
+ print('Fetching Apex environment settings')
+ self.log = FileLogger(self.args.logfile)
+ self.run_cmd('mkdir -p {}'.format(self.args.config_dir))
+ with open(self.config_file, 'w') as config_file:
+ self.setup_environment_config(config_file)
+ print('Finished fetching Apex environment settings')
+ return True, 'Environment setup finished successfully'
+ except RuntimeError as e:
+ return False, str(e)
+
+
+if __name__ == '__main__':
+ fetcher = ApexEnvironmentFetcher()
+ ret, msg = fetcher.get()
+ if not ret:
+ if fetcher.log:
+ fetcher.log.error(msg)
+ else:
+ print(msg)
+ sys.exit(0 if ret else 1)
diff --git a/app/install/db/apex_environment_config.json b/app/install/db/apex_environment_config.json
new file mode 100644
index 0000000..918cd01
--- /dev/null
+++ b/app/install/db/apex_environment_config.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "apex_environment_config_temp_id"
+} \ No newline at end of file
diff --git a/app/install/db/api_tokens.json b/app/install/db/api_tokens.json
new file mode 100644
index 0000000..94cc63a
--- /dev/null
+++ b/app/install/db/api_tokens.json
@@ -0,0 +1,7 @@
+{
+ "token" : "ec56537a-a448-43f9-b36e-3e2bee44f018",
+ "issued_at" : "2017-04-06T14:32:17.893797Z",
+ "expires_at" : "2017-11-11T15:32:17.893769Z",
+ "lifetime" : "86400",
+ "method" : "credentials"
+}
diff --git a/app/install/db/connection_tests.json b/app/install/db/connection_tests.json
new file mode 100644
index 0000000..9965ea3
--- /dev/null
+++ b/app/install/db/connection_tests.json
@@ -0,0 +1,156 @@
+[
+{
+ "environment" : "My-Environment",
+ "test_targets" : [
+ "AMQP",
+ "CLI",
+ "ACI",
+ "mysql",
+ "OpenStack",
+ "Monitoring"
+ ],
+ "test_results" : {
+ "AMQP" : false,
+ "CLI" : true,
+ "ACI" : false,
+ "mysql" : false,
+ "OpenStack" : false,
+ "Monitoring" : false
+ },
+ "targets_configuration" :
+ [
+ {
+ "name" : "OpenStack",
+ "host" : "1.1.1.1",
+ "port" : "5000",
+ "user" : "admin",
+ "pwd" : "admin",
+ "admin_token" : "tokentoken"
+ },
+ {
+ "name" : "ACI",
+ "host" : "2.2.2.2",
+ "user" : "admin",
+ "pwd" : "password"
+ },
+ {
+ "name" : "mysql",
+ "host" : "1.1.1.1",
+ "port" : "3307",
+ "user" : "root",
+ "pwd" : "password"
+ },
+ {
+ "name" : "CLI",
+ "host" : "1.1.1.7",
+ "user" : "root",
+ "key" : "/local_dir/ACI-id_rsa"
+ },
+ {
+ "name" : "AMQP",
+ "host" : "1.1.1.1",
+ "port" : "5673",
+ "user" : "nova",
+ "pwd" : "password"
+ },
+ {
+ "name" : "Monitoring",
+ "config_folder" : "/local_dir/sensu_config",
+ "env_type" : "production",
+ "rabbitmq_port" : "5671",
+ "rabbitmq_user" : "sensu",
+ "server_ip" : "the_sensu_server",
+ "server_name" : "sensu_server",
+ "type" : "Sensu",
+ "provision" : "None",
+ "ssh_port" : "20022",
+ "ssh_user" : "root",
+ "ssh_password" : "osdna",
+ "api_port" : 4567,
+ "rabbitmq_pass" : "osdna"
+ }
+ ],
+ "submit_timestamp" : "2017-05-17T07:53:09.194+0000",
+ "response_time" : "78ms",
+ "response_timestamp" : "2017-03-17T11:00:17.939+0000",
+ "status" : "response",
+ "last_response_message" : "cli says yes i am alive"
+},
+{
+ "environment" : "Mirantis-Liberty",
+ "test_targets" : [
+ "AMQP",
+ "CLI",
+ "ACI",
+ "mysql",
+ "OpenStack",
+ "Monitoring"
+ ],
+ "targets_configuration" :
+ [
+ {
+ "name" : "OpenStack",
+ "host" : "1.1.1.1",
+ "port" : "5000",
+ "user" : "admin",
+ "pwd" : "admin",
+ "admin_token" : "tokentoken"
+ },
+ {
+ "name" : "ACI",
+ "host" : "2.2.2.2",
+ "user" : "admin",
+ "pwd" : "password"
+ },
+ {
+ "name" : "mysql",
+ "host" : "1.1.1.1",
+ "port" : "3307",
+ "user" : "root",
+ "pwd" : "password"
+ },
+ {
+ "name" : "CLI",
+ "host" : "1.1.1.7",
+ "user" : "root",
+ "key" : "/local_dir/ACI-id_rsa"
+ },
+ {
+ "name" : "AMQP",
+ "host" : "1.1.1.1",
+ "port" : "5673",
+ "user" : "nova",
+ "pwd" : "password"
+ },
+ {
+ "name" : "Monitoring",
+ "config_folder" : "/local_dir/sensu_config",
+ "env_type" : "production",
+ "rabbitmq_port" : "5671",
+ "rabbitmq_user" : "sensu",
+ "server_ip" : "the_sensu_server",
+ "server_name" : "sensu_server",
+ "type" : "Sensu",
+ "provision" : "None",
+ "ssh_port" : "20022",
+ "ssh_user" : "root",
+ "ssh_password" : "osdna",
+ "api_port" : 4567,
+ "rabbitmq_pass" : "osdna"
+ }
+ ],
+ "test_results" : {
+ "AMQP" : false,
+ "CLI" : false,
+ "ACI" : false,
+ "mysql" : true,
+ "OpenStack" : false,
+ "Monitoring" : false
+ },
+ "submit_timestamp" : "2017-05-17T07:53:09.194+0000",
+ "response_time" : "56ms",
+ "response_timestamp" : "2017-05-17T11:00:17.939+0000",
+ "status" : "response",
+ "last_response_message" : "mysql says yes i am alive"
+}
+]
diff --git a/app/install/db/constants.json b/app/install/db/constants.json
index 2ad8921..15522b7 100644
--- a/app/install/db/constants.json
+++ b/app/install/db/constants.json
@@ -528,6 +528,10 @@
{
"value" : "10239",
"label" : "10239"
+ },
+ {
+ "value" : "10918",
+ "label" : "10918"
}
]
},
@@ -721,5 +725,34 @@
"value" : "switch"
}
]
+},
+{
+ "name" : "configuration_targets",
+ "data" : [
+ {
+ "label" : "AMQP",
+ "value" : "AMQP"
+ },
+ {
+ "label" : "CLI",
+ "value" : "CLI"
+ },
+ {
+ "label" : "ACI",
+ "value" : "ACI"
+ },
+ {
+ "label" : "mysql",
+ "value" : "mysql"
+ },
+ {
+ "label" : "OpenStack",
+ "value" : "OpenStack"
+ },
+ {
+ "label" : "Monitoring",
+ "value" : "Monitoring"
+ }
+ ]
}
]
diff --git a/app/install/db/environments_config.json b/app/install/db/environments_config.json
index 93971a2..d7157e7 100644
--- a/app/install/db/environments_config.json
+++ b/app/install/db/environments_config.json
@@ -7,14 +7,14 @@
"name" : "OpenStack",
"admin_token" : "dummy_token",
"user" : "adminuser",
- "port" : 5000,
+ "port" : "5000",
"pwd" : "dummy_pwd",
"host" : "10.0.0.1"
},
{
"name" : "mysql",
"pwd" : "dummy_pwd",
- "port" : 3307,
+ "port" : "3307",
"user" : "mysqluser",
"host" : "10.0.0.1"
},
@@ -27,18 +27,18 @@
{
"name" : "AMQP",
"pwd" : "dummy_pwd",
- "port" : 5673,
+ "port" : "5673",
"user" : "rabbitmquser",
"host" : "10.0.0.1"
},
{
- "rabbitmq_port" : 5671,
+ "rabbitmq_port" : "5671",
"ssh_user" : "root",
"server_name" : "sensu_server",
"env_type" : "production",
"provision" : "None",
"name" : "Monitoring",
- "ssh_port" : 20022,
+ "ssh_port" : "20022",
"rabbitmq_pass" : "dummy_pwd",
"ssh_password" : "dummy_pwd",
"rabbitmq_user" : "sensu",
diff --git a/app/install/db/supported_environments.json b/app/install/db/supported_environments.json
index 9a9ddcb..0d507c3 100644
--- a/app/install/db/supported_environments.json
+++ b/app/install/db/supported_environments.json
@@ -1,288 +1,178 @@
[
-{
+ {
"environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "10.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mercury",
- "distribution_version" : "10239",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : false
- }
-},
-{
- "environment" : {
- "distribution" : "Apex",
- "distribution_version" : "Euphrates",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Stratoscale",
- "distribution_version" : "2.1.6",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : false,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "6.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "7.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "8.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
+ "distribution" : "Apex",
+ "distribution_version" : ["Euphrates"],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
"features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
}
-},
-{
+ },
+ {
"environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "9.1",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
+ "distribution" : "Devstack",
+ "distribution_version" : ["Mitaka"],
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
"features" : {
- "listening" : false,
- "scanning" : true,
- "monitoring" : true
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
}
-},
-{
+ },
+ {
"environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
+ "distribution" : "Devstack",
+ "distribution_version" : ["Mitaka"],
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
"features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
}
-},
-{
+ },
+ {
"environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Liberty",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
+ "distribution" : "Mercury",
+ "distribution_version" : ["10239"],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
"features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : false
}
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "9.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vxlan"
- },
+ },
+ {
"features" : {
"listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "9.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
+ "monitoring" : true,
+ "scanning" : true
},
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
"environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "8.0",
"mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "6.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "7.0",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Mirantis",
- "distribution_version" : "9.1",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "VPP",
- "type_drivers" : "vxlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "VPP",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Devstack",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "VPP",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "Devstack",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "VPP",
- "type_drivers" : "vxlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Mitaka",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-},
-{
- "environment" : {
- "distribution" : "RDO",
- "distribution_version" : "Liberty",
- "mechanism_drivers" : "OVS",
- "type_drivers" : "vlan"
- },
- "features" : {
- "listening" : true,
- "scanning" : true,
- "monitoring" : true
- }
-}
+ "type_drivers" : "vlan",
+ "distribution" : "Mercury",
+ "distribution_version" : ["10918"]
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "Mirantis",
+ "distribution_version" : [
+ "6.0",
+ "7.0",
+ "8.0",
+ "9.0",
+ "9.1",
+ "10.0"
+ ],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "Mirantis",
+ "distribution_version" : [
+ "6.0",
+ "7.0",
+ "8.0",
+ "9.0",
+ "9.1",
+ "10.0"
+ ],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "RDO",
+ "distribution_version" : [
+ "Liberty",
+ "Mitaka"
+ ],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "RDO",
+ "distribution_version" : [
+ "Liberty",
+ "Mitaka"
+ ],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "RDO",
+ "distribution_version" : ["Mitaka"],
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "RDO",
+ "distribution_version" : ["Mitaka"],
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "listening" : true,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ },
+ {
+ "environment" : {
+ "distribution" : "Stratoscale",
+ "distribution_version" : ["2.1.6"],
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "listening" : false,
+ "scanning" : true,
+ "monitoring" : true
+ }
+ }
]
diff --git a/app/install/db/user_settings.json b/app/install/db/user_settings.json
new file mode 100644
index 0000000..ad0e868
--- /dev/null
+++ b/app/install/db/user_settings.json
@@ -0,0 +1,4 @@
+{
+ "messages_view_backward_delta" : 1209600000,
+ "user_id" : "wNLeBJxNDyw8G7Ssg"
+}
diff --git a/app/install/ldap.conf.example b/app/install/ldap.conf.example
index b1798f7..6a4f926 100644
--- a/app/install/ldap.conf.example
+++ b/app/install/ldap.conf.example
@@ -1,6 +1,6 @@
user admin
password password
-url ldap://korlev-calipso-dev.cisco.com:389
+url ldap://your-server.cisco.com:389
user_id_attribute CN
user_pass_attribute userpassword
user_objectclass inetOrgPerson
diff --git a/app/messages/message.py b/app/messages/message.py
index 03c9069..e940054 100644
--- a/app/messages/message.py
+++ b/app/messages/message.py
@@ -7,6 +7,7 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+import datetime
from typing import Union
from bson import ObjectId
@@ -26,9 +27,9 @@ class Message:
display_context: Union[str, ObjectId] = None,
level: str = DEFAULT_LEVEL,
object_type: str = None,
- ts: str = None,
- received_ts: str = None,
- finished_ts: str = None):
+ ts: datetime = None,
+ received_ts: datetime = None,
+ finished_ts: datetime = None):
super().__init__()
if level and level.lower() in self.LEVELS:
diff --git a/app/monitoring/handlers/monitoring_check_handler.py b/app/monitoring/handlers/monitoring_check_handler.py
index a299076..1436a46 100644
--- a/app/monitoring/handlers/monitoring_check_handler.py
+++ b/app/monitoring/handlers/monitoring_check_handler.py
@@ -89,10 +89,9 @@ class MonitoringCheckHandler(SpecialCharConverter):
level = error_level if error_level\
else ERROR_LEVEL[check_result['status']]
dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
- ts = stringify_datetime(dt)
message = Message(msg_id=msg_id, env=self.env, source=SOURCE_SYSTEM,
object_id=obj_id, object_type=obj_type,
display_context=display_context, level=level,
- msg=check_result, ts=ts)
+ msg=check_result, ts=dt)
collection = self.inv.collections['messages']
collection.insert_one(message.get())
diff --git a/app/test/api/responders_test/resource/test_environment_configs.py b/app/test/api/responders_test/resource/test_environment_configs.py
index 7002ed7..6356f06 100644
--- a/app/test/api/responders_test/resource/test_environment_configs.py
+++ b/app/test/api/responders_test/resource/test_environment_configs.py
@@ -72,12 +72,21 @@ class TestEnvironmentConfigs(TestBase):
},
expected_code=base.BAD_REQUEST_CODE)
+ def test_get_environment_configs_list_with_wrong_distribution_version(self):
+ self.validate_get_request(environment_configs.URL,
+ params={
+ "distribution_version":
+ environment_configs.WRONG_DIST_VER
+ },
+ expected_code=base.BAD_REQUEST_CODE)
+
@patch(base.RESPONDER_BASE_READ)
def test_get_environment_configs_list_with_distribution(self, read):
self.validate_get_request(environment_configs.URL,
params={
"distribution":
- environment_configs.CORRECT_DISTRIBUTION
+ environment_configs.
+ CORRECT_DISTRIBUTION
},
mocks={
read: environment_configs.
@@ -377,11 +386,12 @@ class TestEnvironmentConfigs(TestBase):
def mock_validate_env_config_with_supported_envs(self, scanning,
monitoring, listening):
- InventoryMgr.is_feature_supported_in_env = lambda self, matches, feature: {
- EnvironmentFeatures.SCANNING: scanning,
- EnvironmentFeatures.MONITORING: monitoring,
- EnvironmentFeatures.LISTENING: listening
- }[feature]
+ InventoryMgr.is_feature_supported_in_env = \
+ lambda self, matches, feature: {
+ EnvironmentFeatures.SCANNING: scanning,
+ EnvironmentFeatures.MONITORING: monitoring,
+ EnvironmentFeatures.LISTENING: listening
+ }[feature]
@patch(base.RESPONDER_BASE_WRITE)
def test_post_environment_config(self, write):
diff --git a/app/test/api/responders_test/test_data/base.py b/app/test/api/responders_test/test_data/base.py
index d320340..b99d5bb 100644
--- a/app/test/api/responders_test/test_data/base.py
+++ b/app/test/api/responders_test/test_data/base.py
@@ -53,7 +53,9 @@ WRONG_ENV_TYPE = ""
CORRECT_ENV_TYPE = "development"
WRONG_DISTRIBUTION = "wrong-environment"
-CORRECT_DISTRIBUTION = "Mirantis-6.0"
+WRONG_DIST_VER = "wrong-environment"
+CORRECT_DISTRIBUTION = "Mirantis"
+CORRECT_DIST_VER = "6.0"
WRONG_OBJECT_ID = "58a2406e6a283a8bee15d43"
CORRECT_OBJECT_ID = "58a2406e6a283a8bee15d43f"
@@ -150,11 +152,8 @@ CONSTANTS_BY_NAMES = {
"production"
],
"distributions": [
- "Mirantis-6.0",
- "Mirantis-7.0",
- "Mirantis-8.0",
- "Mirantis-9.0",
- "RDO-Juno"
+ "Mirantis",
+ "RDO"
],
"environment_operational_status": [
"stopped",
diff --git a/app/test/api/responders_test/test_data/environment_configs.py b/app/test/api/responders_test/test_data/environment_configs.py
index a9e8885..4cea105 100644
--- a/app/test/api/responders_test/test_data/environment_configs.py
+++ b/app/test/api/responders_test/test_data/environment_configs.py
@@ -15,7 +15,9 @@ URL = "/environment_configs"
NAME = "Mirantis-Liberty-API"
UNKNOWN_NAME = "UNKNOWN NAME"
WRONG_DISTRIBUTION = base.WRONG_DISTRIBUTION
+WRONG_DIST_VER = base.WRONG_DIST_VER
CORRECT_DISTRIBUTION = base.CORRECT_DISTRIBUTION
+CORRECT_DIST_VER = base.CORRECT_DIST_VER
WRONG_MECHANISM_DRIVER = base.WRONG_MECHANISM_DRIVER
CORRECT_MECHANISM_DRIVER = base.CORRECT_MECHANISM_DRIVER
WRONG_TYPE_DRIVER = base.WRONG_TYPE_DRIVER
@@ -29,11 +31,13 @@ BOOL_LISTEN = BOOL_SCANNED = \
ENV_CONFIGS = [
{
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"name": "Mirantis-Liberty-API"
},
{
- "distribution": "Mirantis-9.0",
+ "distribution": "Mirantis",
+ "distribution_version": "9.0",
"name": "Mirantis-Liberty"
}
]
@@ -44,7 +48,8 @@ ENV_CONFIGS_RESPONSE = {
ENV_CONFIGS_WITH_SPECIFIC_NAME = [
{
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"name": NAME
}
]
@@ -52,10 +57,12 @@ ENV_CONFIGS_WITH_SPECIFIC_NAME = [
ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION = [
{
"distribution": CORRECT_DISTRIBUTION,
+ "distribution_version": CORRECT_DIST_VER,
"name": "Mirantis-Liberty-API",
},
{
"distribution": CORRECT_DISTRIBUTION,
+ "distribution_version": CORRECT_DIST_VER,
"name": "Mirantis-Liberty"
}
]
@@ -206,7 +213,8 @@ ENV_CONFIG = {
"type": "Sensu"
}
],
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"last_scanned": "2017-03-16T11:14:54Z",
"listen": True,
"mechanism_drivers": [
diff --git a/app/test/api/test_base.py b/app/test/api/test_base.py
index c126b2b..33185ec 100644
--- a/app/test/api/test_base.py
+++ b/app/test/api/test_base.py
@@ -84,8 +84,12 @@ class TestBase(TestCase):
expected_code,
expected_response)
- def get_updated_data(self, original_data, deleted_keys=[], updates={}):
+ def get_updated_data(self, original_data, deleted_keys=None, updates=None):
copy_data = copy.deepcopy(original_data)
+ if deleted_keys is None:
+ deleted_keys = []
+ if updates is None:
+ updates = {}
for key in deleted_keys:
del copy_data[key]
diff --git a/app/test/event_based_scan/test_interface_add.py b/app/test/event_based_scan/test_interface_add.py
index 04a1982..542f84e 100644
--- a/app/test/event_based_scan/test_interface_add.py
+++ b/app/test/event_based_scan/test_interface_add.py
@@ -25,7 +25,7 @@ class TestInterfaceAdd(TestEvent):
def get_by_id(self, env, object_id):
interface = self.values["payload"]["router_interface"]
host_id = self.values["publisher_id"].replace("network.", "", 1)
- router_id = encode_router_id(host_id, interface['id'])
+ router_id = encode_router_id(interface['id'])
if object_id == host_id:
return HOST
diff --git a/app/test/event_based_scan/test_interface_delete.py b/app/test/event_based_scan/test_interface_delete.py
index e416be4..7c3684a 100644
--- a/app/test/event_based_scan/test_interface_delete.py
+++ b/app/test/event_based_scan/test_interface_delete.py
@@ -34,8 +34,7 @@ class TestInterfaceDelete(TestEvent):
self.payload = self.values['payload']
self.interface = self.payload['router_interface']
self.port_id = self.interface['port_id']
- self.host_id = self.values["publisher_id"].replace("network.", "", 1)
- self.router_id = encode_router_id(self.host_id, self.interface['id'])
+ self.router_id = encode_router_id(self.interface['id'])
port_delete_mock = port_delete_class_mock.return_value
port_delete_mock.delete_port.return_value = EventResult(result=True)
diff --git a/app/test/event_based_scan/test_router_add.py b/app/test/event_based_scan/test_router_add.py
index 03be8df..b450cf5 100644
--- a/app/test/event_based_scan/test_router_add.py
+++ b/app/test/event_based_scan/test_router_add.py
@@ -45,7 +45,7 @@ class TestRouterAdd(TestEvent):
self.router = self.payload['router']
self.network_id = self.router['external_gateway_info']['network_id']
self.host_id = self.values["publisher_id"].replace("network.", "", 1)
- self.router_id = encode_router_id(self.host_id, self.router['id'])
+ self.router_id = encode_router_id(self.router['id'])
self.inv.get_by_id.side_effect = self.get_by_id
diff --git a/app/test/event_based_scan/test_router_update.py b/app/test/event_based_scan/test_router_update.py
index 390bd6e..93f44a3 100644
--- a/app/test/event_based_scan/test_router_update.py
+++ b/app/test/event_based_scan/test_router_update.py
@@ -36,7 +36,7 @@ class TestRouterUpdate(TestEvent):
self.payload = self.values['payload']
self.router = self.payload['router']
self.host_id = self.values['publisher_id'].replace("network.", "", 1)
- self.router_id = encode_router_id(self.host_id, self.router['id'])
+ self.router_id = encode_router_id(self.router['id'])
self.gw_port_id = ROUTER_DOCUMENT['gw_port_id']
scanner_mock = scanner_class_mock.return_value
diff --git a/app/test/fetch/api_fetch/test_data/configurations.py b/app/test/fetch/api_fetch/test_data/configurations.py
index cca43be..8e7eb5d 100644
--- a/app/test/fetch/api_fetch/test_data/configurations.py
+++ b/app/test/fetch/api_fetch/test_data/configurations.py
@@ -41,7 +41,8 @@ CONFIGURATIONS = {
"pwd": "NF2nSv3SisooxPkCTr8fbfOa"
}
],
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"last_scanned:": "5/8/16",
"name": "Mirantis-Liberty-Xiaocong",
"network_plugins": [
diff --git a/app/test/fetch/cli_fetch/test_cli_access.py b/app/test/fetch/cli_fetch/test_cli_access.py
index d32e1ed..1d14450 100644
--- a/app/test/fetch/cli_fetch/test_cli_access.py
+++ b/app/test/fetch/cli_fetch/test_cli_access.py
@@ -10,10 +10,11 @@
import time
from discover.fetchers.cli.cli_access import CliAccess
+from discover.configuration import Configuration
+from test.fetch.api_fetch.test_data.configurations import CONFIGURATIONS
from test.fetch.cli_fetch.test_data.cli_access import *
from test.fetch.test_fetch import TestFetch
-from unittest.mock import MagicMock, patch
-from utils.ssh_conn import SshConn
+from unittest.mock import MagicMock
class TestCliAccess(TestFetch):
@@ -22,6 +23,11 @@ class TestCliAccess(TestFetch):
super().setUp()
self.configure_environment()
self.cli_access = CliAccess()
+ self.conf = Configuration()
+ self.cli_access.configuration = self.conf
+ self.conf.use_env = MagicMock()
+ self.conf.environment = CONFIGURATIONS
+ self.conf.configuration = CONFIGURATIONS["configuration"]
def check_run_result(self, is_gateway_host,
enable_cache,
@@ -40,7 +46,8 @@ class TestCliAccess(TestFetch):
self.ssh_conn.exec.return_value = exec_result
self.ssh_conn.is_gateway_host.return_value = is_gateway_host
result = self.cli_access.run(COMMAND, COMPUTE_HOST_ID,
- on_gateway=False, enable_cache=enable_cache)
+ on_gateway=False,
+ enable_cache=enable_cache)
self.assertEqual(result, expected_result, err_msg)
# reset the cached commands after testing
diff --git a/app/test/fetch/db_fetch/mock_cursor.py b/app/test/fetch/db_fetch/mock_cursor.py
index 71efd3b..10c67e1 100644
--- a/app/test/fetch/db_fetch/mock_cursor.py
+++ b/app/test/fetch/db_fetch/mock_cursor.py
@@ -7,19 +7,39 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+
+
+def require_open(method):
+ def wrapped(self, *args, **kwargs):
+ if self.closed:
+ raise ValueError("Cursor is closed")
+ return method(self, *args, **kwargs)
+ return wrapped
+
+
class MockCursor:
def __init__(self, result):
self.result = result
self.current = 0
+ self.closed = False
+ @require_open
def __next__(self):
if self.current < len(self.result):
- next = self.result[self.current]
+ nxt = self.result[self.current]
self.current += 1
- return next
+ return nxt
else:
raise StopIteration
+ @require_open
def __iter__(self):
return self
+
+ @require_open
+ def fetchall(self):
+ return self.result
+
+ def close(self):
+ self.closed = True
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py b/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
index 6188ddf..b6d344c 100644
--- a/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
@@ -37,13 +37,13 @@ NETWORK_AGENT = [
NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
{
'configurations': {},
- 'id': 'OVS-1764430c-c09e-4717-86fa-c04350b1fcbb',
+ 'id': 'neutron-openvswitch-agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
'binary': 'neutron-openvswitch-agent',
'name': 'neutron-openvswitch-agent'
},
{
'configurations': {},
- 'id': 'OVS-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+ 'id': 'neutron-dhcp-agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
'binary': 'neutron-dhcp-agent',
'name': 'neutron-dhcp-agent'
}
@@ -52,13 +52,13 @@ NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
NETWORK_AGENT_WITHOUT_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
{
'configurations': {},
- 'id': 'network_agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
+ 'id': 'neutron-openvswitch-agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
'binary': 'neutron-openvswitch-agent',
'name': 'neutron-openvswitch-agent'
},
{
'configurations': {},
- 'id': 'network_agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+ 'id': 'neutron-dhcp-agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
'binary': 'neutron-dhcp-agent',
'name': 'neutron-dhcp-agent'
}
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
index a5bc63d..2bd1784 100644
--- a/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
@@ -34,10 +34,12 @@ VEDGE_WITHOUT_TUNNEL_TYPES = {
}
}
NON_ICEHOUSE_CONFIGS = {
- "distribution": "Mirantis-8.0"
+ "distribution": "Mirantis",
+ "distribution_version": "8.0"
}
ICEHOUSE_CONFIGS = {
- "distribution": "Canonical-icehouse"
+ "distribution": "Canonical",
+ "distribution_version": "icehouse"
}
HOST = {
"host": "node-5.cisco.com",
diff --git a/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
index 818704c..c1f9d4f 100644
--- a/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
+++ b/app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
@@ -166,3 +166,58 @@ DOC_TO_GET_OVERLAY = {
"agent_type": "Open vSwitch agent",
"configurations": {"tunneling_ip": "192.168.2.3"},
}
+
+LIST_IFACES_LINES = [
+ "eth0",
+ "p",
+ "t"
+]
+LIST_IFACES_NAMES = LIST_IFACES_LINES
+LIST_IFACES_LINES_MIRANTIS = {
+ "eth0--br-eth0",
+ "phy-eth0"
+}
+LIST_IFACES_NAMES_MIRANTIS = ["eth0"]
+
+VEDGE_CONFIGURATIONS_MIRANTIS = {
+ "bridge_mappings": {
+ "br-prv": "eth0"
+ }
+}
+VEDGE_CONFIGURATIONS = {
+ "bridge_mappings": {
+ "physnet1": "eth0",
+ "physnet2": "p",
+ "physnet3": "t",
+ "physnet4": "p",
+ "physnet5": "p"
+ }
+}
+
+VEDGE_MIRANTIS = {
+ 'host': HOST['host'],
+ 'ports': {
+ "eth0": {"name": "eth0", "id": "eth0-port_id"}
+ },
+ 'configurations': VEDGE_CONFIGURATIONS_MIRANTIS
+}
+VEDGE = {
+ 'host': HOST['host'],
+ 'ports': {
+ "eth0": {"name": "eth0", "id": "eth0-port_id"},
+ "p": {"name": "p", "id": "p-port_id"},
+ "t": {"name": "t", "id": "t-port_id"}
+ },
+ 'configurations': VEDGE_CONFIGURATIONS
+}
+
+ANOTHER_DIST = "another distribution"
+
+PNICS_MIRANTS = {
+ "eth0": {"name": "eth0", "mac_address": "eth0 mac_address"}
+}
+PNICS = {
+ "eth0": {"name": "eth0", "mac_address": "eth0 mac_address"},
+ "p": {"name": "p", "mac_address": "p mac_address"},
+ "t": {"name": "t", "mac_address": "t mac_address"}
+}
diff --git a/app/test/fetch/db_fetch/test_db_fetch_oteps.py b/app/test/fetch/db_fetch/test_db_fetch_oteps.py
index 7d29622..a161e03 100644
--- a/app/test/fetch/db_fetch/test_db_fetch_oteps.py
+++ b/app/test/fetch/db_fetch/test_db_fetch_oteps.py
@@ -32,11 +32,13 @@ class TestDbFetchOteps(TestFetch):
original_get_vconnector = self.fetcher.get_vconnector
self.fetcher.get_vconnector = MagicMock()
self.fetcher.inv.get_by_id = MagicMock(side_effect=[vedge, host])
+ original_get_env_config = self.fetcher.config.get_env_config
self.fetcher.config.get_env_config = MagicMock(return_value=config)
self.fetcher.get_objects_list_for_id = MagicMock(return_value=oteps_from_db)
results = self.fetcher.get(VEDGE_ID)
self.assertEqual(results, expected_results, err_msg)
self.fetcher.get_vconnector = original_get_vconnector
+ self.fetcher.config.get_env_config = original_get_env_config
def test_get(self):
test_cases = [
diff --git a/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py b/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
index 0cfb500..9916e5d 100644
--- a/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
+++ b/app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
@@ -7,6 +7,8 @@
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
+import copy
+
from discover.fetchers.db.db_fetch_vedges_ovs import DbFetchVedgesOvs
from test.fetch.test_fetch import TestFetch
from test.fetch.db_fetch.test_data.db_fetch_vedges_ovs import *
@@ -20,6 +22,12 @@ class TestDbFetchVedgesOvs(TestFetch):
self.configure_environment()
self.fetcher = DbFetchVedgesOvs()
self.fetcher.set_env(self.env)
+ self.original_inv_set = self.fetcher.inv.set
+ self.fetcher.inv.set = MagicMock()
+
+ def tearDown(self):
+ super().tearDown()
+ self.fetcher.inv.set = self.original_inv_set
def check_get_result(self,
objects_from_db, host,
@@ -32,7 +40,8 @@ class TestDbFetchVedgesOvs(TestFetch):
original_fetch_ports = self.fetcher.fetch_ports
original_get_overlay_tunnels = self.fetcher.get_overlay_tunnels
- self.fetcher.get_objects_list_for_id = MagicMock(return_value=objects_from_db)
+ self.fetcher.get_objects_list_for_id = \
+ MagicMock(return_value=objects_from_db)
self.fetcher.inv.get_by_id = MagicMock(return_value=host)
self.fetcher.run_fetch_lines = MagicMock(return_value=vsctl_lines)
self.fetcher.fetch_ports = MagicMock(return_value=ports)
@@ -96,7 +105,7 @@ class TestDbFetchVedgesOvs(TestFetch):
results = self.fetcher.fetch_ports_from_dpctl(HOST['id'])
self.fetcher.run_fetch_lines = original_run_fetch_lines
self.assertEqual(results, DPCTL_RESULTS,
- "Can' t get correct ports info from dpctl lines")
+ "Can't get correct ports info from dpctl lines")
def test_fetch_port_tags_from_vsctl(self):
ports = self.fetcher.fetch_port_tags_from_vsctl(VSCTL_LINES,
@@ -108,3 +117,80 @@ class TestDbFetchVedgesOvs(TestFetch):
results = self.fetcher.get_overlay_tunnels(DOC_TO_GET_OVERLAY,
VSCTL_LINES)
self.assertEqual(results, TUNNEL_PORTS)
+
+ @staticmethod
+ def get_test_pnic_for_interface_mirantis(search: dict,
+ get_single: bool=True):
+ if not get_single:
+ # we're only supposed to get calls with get_single == True
+ return []
+ return PNICS_MIRANTS.get(search.get('name'), {})
+
+ @staticmethod
+ def get_test_pnic_for_interface(search: dict,
+ get_single: bool=True):
+ if not get_single:
+ # we're only supposed to get calls with get_single == True
+ return []
+ return PNICS.get(search.get('name'), {})
+
+ @staticmethod
+ def get_expected_results_for_get_pnics(test_pnics: dict, ports: dict,
+ ifaces_names: list) -> dict:
+ expected_results = {}
+ for p in test_pnics.values():
+ if p.get("name") not in ifaces_names:
+ continue
+ p1 = copy.deepcopy(p)
+ name = p1["name"]
+ port = ports[name]
+ p1["port_id"] = port["id"]
+ expected_results[name] = p1
+ return expected_results
+
+ def test_get_pnics(self):
+ expected_results = \
+ self.get_expected_results_for_get_pnics(PNICS_MIRANTS,
+ VEDGE_MIRANTIS["ports"],
+ LIST_IFACES_NAMES_MIRANTIS)
+ self.check_get_pnics_for_dist(VEDGE_MIRANTIS,
+ LIST_IFACES_LINES_MIRANTIS,
+ LIST_IFACES_NAMES_MIRANTIS,
+ expected_results,
+ self.get_test_pnic_for_interface_mirantis,
+ self.fetcher.MIRANTIS_DIST,
+ ver="6.0",
+ msg="Incorrect get_pnics result "
+ "(Mirantis)")
+ expected_results = \
+ self.get_expected_results_for_get_pnics(PNICS,
+ VEDGE["ports"],
+ LIST_IFACES_NAMES)
+ self.check_get_pnics_for_dist(VEDGE,
+ LIST_IFACES_LINES,
+ LIST_IFACES_NAMES,
+ expected_results,
+ self.get_test_pnic_for_interface,
+ ANOTHER_DIST,
+ msg="Incorrect get_pnics result")
+
+ def check_get_pnics_for_dist(self, test_vedge,
+ ifaces_list_output, ifaces_list_clear,
+ expected_results,
+ pnic_find_func,
+ dist, ver=None, msg=None):
+ self.fetcher.configuration.environment = {
+ "distribution": dist,
+ "distribution_version": ver
+ }
+ original_run_fetch_lines = self.fetcher.run_fetch_lines
+ self.fetcher.run_fetch_lines = \
+ MagicMock(return_value=ifaces_list_output)
+ original_find_items = self.fetcher.inv.find_items
+ self.fetcher.inv.find_items = pnic_find_func
+ vedge = copy.deepcopy(test_vedge)
+ results = self.fetcher.get_pnics(vedge)
+ self.fetcher.run_fetch_lines = original_run_fetch_lines
+ self.fetcher.inv.find_items = original_find_items
+ self.assertTrue(vedge.get("pnic") in ifaces_list_clear)
+ self.assertEqual(results, expected_results, msg)
diff --git a/app/test/scan/test_data/configurations.py b/app/test/scan/test_data/configurations.py
index 59ad649..96dbc23 100644
--- a/app/test/scan/test_data/configurations.py
+++ b/app/test/scan/test_data/configurations.py
@@ -58,7 +58,8 @@ CONFIGURATIONS = {
"type": "Sensu"
}
],
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"last_scanned:": "5/8/16",
"name": "Mirantis-Liberty-Nvn",
"mechanism_drivers": [
diff --git a/app/test/scan/test_data/scanner.py b/app/test/scan/test_data/scanner.py
index 36c2033..23838aa 100644
--- a/app/test/scan/test_data/scanner.py
+++ b/app/test/scan/test_data/scanner.py
@@ -120,7 +120,8 @@ CONFIGURATIONS = {
"pwd": "NF2nSv3SisooxPkCTr8fbfOa"
}
],
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"last_scanned:": "5/8/16",
"name": "Mirantis-Liberty-Nvn",
"mechanism_drivers": [
@@ -330,7 +331,8 @@ CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS = {
"pwd": "NF2nSv3SisooxPkCTr8fbfOa"
}
],
- "distribution": "Mirantis-8.0",
+ "distribution": "Mirantis",
+ "distribution_version": "8.0",
"last_scanned:": "5/8/16",
"name": "Mirantis-Liberty-Nvn",
"operational": "yes",
diff --git a/app/test/utils/test_cli_dist_translator.py b/app/test/utils/test_cli_dist_translator.py
new file mode 100644
index 0000000..e6a8080
--- /dev/null
+++ b/app/test/utils/test_cli_dist_translator.py
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import unittest
+
+from utils.cli_dist_translator import CliDistTranslator
+
+
+class TestCliDistTranslator(unittest.TestCase):
+
+ MERCURY_DIST = 'Mercury'
+ MERCURY_VER = '10239'
+
+ SOURCE_TEXT = 'some text'
+ IP_LINK_TEXT = 'ip link show'
+ IP_LINK_TRANSLATED_MERCURY = \
+ 'docker exec --user root ovs_vswitch_10239 ip link show'
+
+ def test_unknown_dist(self):
+ translator = CliDistTranslator('UNKNOWN')
+ result = translator.translate(self.SOURCE_TEXT)
+ self.assertEqual(result, self.SOURCE_TEXT,
+ 'unknown dist should not cause translation')
+
+ def test_mercury_dist(self):
+ translator = CliDistTranslator(self.MERCURY_DIST, self.MERCURY_VER)
+ result = translator.translate(self.SOURCE_TEXT)
+ self.assertEqual(result, self.SOURCE_TEXT,
+ 'known dist should not translate unrelated texts')
+ result = translator.translate(self.IP_LINK_TEXT)
+ self.assertEqual(result, self.IP_LINK_TRANSLATED_MERCURY,
+ 'incorrect translation of command for mercury dist')
diff --git a/app/utils/cli_dist_translator.py b/app/utils/cli_dist_translator.py
new file mode 100644
index 0000000..4073bb2
--- /dev/null
+++ b/app/utils/cli_dist_translator.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+class CliDistTranslator:
+
+ DOCKER_CALL = 'docker exec --user root'
+
+ TRANSLATIONS = {
+ # special handling of cli commands in Mercury environments
+ 'Mercury': {
+ 'ip netns list':
+ '{docker_call} neutron_l3_agent_{version} {cmd};;;'
+ '{docker_call} neutron_dhcp_agent_{version} {cmd}',
+ 'ip netns exec qdhcp': \
+ '{docker_call} neutron_dhcp_agent_{version} {cmd}',
+ 'ip netns exec qrouter': \
+ '{docker_call} neutron_l3_agent_{version} {cmd}',
+ 'virsh': '{docker_call} novalibvirt_{version} {cmd}',
+ 'ip link': '{docker_call} ovs_vswitch_{version} {cmd}',
+ 'ip -d link': '{docker_call} ovs_vswitch_{version} {cmd}',
+ 'bridge fdb show': '{docker_call} ovs_vswitch_{version} {cmd}',
+ 'brctl': '{docker_call} ovs_vswitch_{version} {cmd}',
+ 'ovs-vsctl': '{docker_call} ovs_vswitch_{version} {cmd}',
+ 'ovs-dpctl': '{docker_call} ovs_vswitch_{version} {cmd}'
+ }
+ }
+
+ def __init__(self, dist: str, dist_version: str=''):
+ self.translation = self.TRANSLATIONS.get(dist, {})
+ self.dist_version = dist_version
+
+ def translate(self, command_to_translate: str) -> str:
+ for command in self.translation.keys():
+ if command in command_to_translate:
+ return self.command_translation(command_to_translate,
+ command)
+ return command_to_translate
+
+ def command_translation(self, command_to_translate: str,
+ translation_key: str) -> str:
+ cmd_translation = self.translation.get(translation_key)
+ if not cmd_translation:
+ return command_to_translate
+ translation_dict = {
+ 'docker_call': self.DOCKER_CALL,
+ 'version': self.dist_version,
+ 'cmd': translation_key
+ }
+ cmd_translation = cmd_translation.format(**translation_dict)
+ cmd_translation = command_to_translate.replace(translation_key,
+ cmd_translation)
+ return cmd_translation
diff --git a/app/utils/inventory_mgr.py b/app/utils/inventory_mgr.py
index 257b0e3..77c1165 100644
--- a/app/utils/inventory_mgr.py
+++ b/app/utils/inventory_mgr.py
@@ -77,13 +77,16 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
self.set_collection("clique_constraints")
self.set_collection("cliques")
self.set_collection("monitoring_config")
- self.set_collection("constants", use_default_name=True)
self.set_collection("scans")
self.set_collection("messages")
- self.set_collection("monitoring_config_templates",
- use_default_name=True)
self.set_collection("environments_config")
self.set_collection("supported_environments")
+ self.set_collection("constants",
+ use_default_name=True)
+ self.set_collection("monitoring_config_templates",
+ use_default_name=True)
+ self.set_collection("api_tokens",
+ use_default_name=True)
def clear(self, scan_plan):
if scan_plan.inventory_only:
@@ -348,9 +351,13 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
if isinstance(env_config['mechanism_drivers'], list) \
else env_config['mechanism_drivers']
- full_env = {'environment.distribution': env_config['distribution'],
- 'environment.type_drivers': env_config['type_drivers'],
- 'environment.mechanism_drivers': mechanism_driver}
+ full_env = {
+ 'environment.distribution': env_config['distribution'],
+ 'environment.distribution_version':
+ {"$in": [env_config['distribution_version']]},
+ 'environment.type_drivers': env_config['type_drivers'],
+ 'environment.mechanism_drivers': mechanism_driver
+ }
return self.is_feature_supported_in_env(full_env, feature)
def is_feature_supported_in_env(self, env_def: dict,
diff --git a/app/utils/logging/mongo_logging_handler.py b/app/utils/logging/mongo_logging_handler.py
index b69270e..ffb6f85 100644
--- a/app/utils/logging/mongo_logging_handler.py
+++ b/app/utils/logging/mongo_logging_handler.py
@@ -44,10 +44,9 @@ class MongoLoggingHandler(logging.Handler):
# make ID from current timestamp
now = datetime.datetime.utcnow()
d = now - datetime.datetime(1970, 1, 1)
- ts = stringify_datetime(now)
timestamp_id = '{}.{}.{}'.format(d.days, d.seconds, d.microseconds)
source = self.SOURCE_SYSTEM
message = Message(msg_id=timestamp_id, env=self.env, source=source,
- msg=Logger.formatter.format(record), ts=ts,
+ msg=Logger.formatter.format(record), ts=now,
level=record.levelname)
self.inv.collections['messages'].insert_one(message.get()) \ No newline at end of file
diff --git a/app/utils/mongo_access.py b/app/utils/mongo_access.py
index d39794f..d4599f1 100644
--- a/app/utils/mongo_access.py
+++ b/app/utils/mongo_access.py
@@ -86,7 +86,7 @@ class MongoAccess(DictNamingConverter):
self.prepare_connect_uri()
MongoAccess.client = MongoClient(
self.connect_params["server"],
- self.connect_params["port"]
+ int(self.connect_params["port"])
)
MongoAccess.db = getattr(MongoAccess.client,
config_params.get('auth_db', self.DB_NAME))
diff --git a/app/utils/util.py b/app/utils/util.py
index 385dea7..ae7b518 100644
--- a/app/utils/util.py
+++ b/app/utils/util.py
@@ -147,8 +147,8 @@ def setup_args(args: dict,
return dict(defaults, **args)
-def encode_router_id(host_id: str, uuid: str):
- return '-'.join([host_id, 'qrouter', uuid])
+def encode_router_id(uuid: str):
+ return '-'.join(['qrouter', uuid])
def decode_router_id(router_id: str):