aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYaron Yogev <yaronyogev@gmail.com>2017-07-27 09:02:54 +0300
committerYaron Yogev <yaronyogev@gmail.com>2017-07-27 14:56:25 +0300
commit7e83d0876ddb84a45e130eeba28bc40ef53c074b (patch)
tree47d76239ae7658d87c66abd142df92709427e7dd
parent378ecbd8947589b9cbb39013a0c2e2aa201e03bd (diff)
Calipso initial release for OPNFV
Change-Id: I7210c244b0c10fa80bfa8c77cb86c9d6ddf8bc88 Signed-off-by: Yaron Yogev <yaronyogev@gmail.com>
-rw-r--r--LICENSE2
-rw-r--r--README.md50
-rw-r--r--app/api/__init__.py10
-rw-r--r--app/api/app.py71
-rw-r--r--app/api/auth/__init__.py10
-rw-r--r--app/api/auth/auth.py71
-rw-r--r--app/api/auth/token.py39
-rw-r--r--app/api/backends/__init__.py10
-rw-r--r--app/api/backends/ldap_access.py89
-rw-r--r--app/api/exceptions/__init__.py10
-rw-r--r--app/api/exceptions/exceptions.py26
-rw-r--r--app/api/middleware/__init__.py10
-rw-r--r--app/api/middleware/authentication.py63
-rw-r--r--app/api/responders/__init__.py10
-rw-r--r--app/api/responders/auth/__init__.py10
-rw-r--r--app/api/responders/auth/tokens.py117
-rw-r--r--app/api/responders/resource/__init__.py10
-rw-r--r--app/api/responders/resource/aggregates.py157
-rw-r--r--app/api/responders/resource/clique_constraints.py67
-rw-r--r--app/api/responders/resource/clique_types.py103
-rw-r--r--app/api/responders/resource/cliques.py73
-rw-r--r--app/api/responders/resource/constants.py30
-rw-r--r--app/api/responders/resource/environment_configs.py381
-rw-r--r--app/api/responders/resource/inventory.py65
-rw-r--r--app/api/responders/resource/links.py76
-rw-r--r--app/api/responders/resource/messages.py78
-rw-r--r--app/api/responders/resource/monitoring_config_templates.py65
-rw-r--r--app/api/responders/resource/scans.py111
-rw-r--r--app/api/responders/resource/scheduled_scans.py113
-rw-r--r--app/api/responders/responder_base.py223
-rwxr-xr-xapp/api/server.py74
-rw-r--r--app/api/validation/__init__.py10
-rw-r--r--app/api/validation/data_validate.py185
-rw-r--r--app/api/validation/regex.py57
-rw-r--r--app/config/events.json58
-rw-r--r--app/config/scanners.json370
-rw-r--r--app/discover/__init__.py10
-rw-r--r--app/discover/clique_finder.py174
-rw-r--r--app/discover/configuration.py70
-rw-r--r--app/discover/event_handler.py45
-rw-r--r--app/discover/event_manager.py265
-rw-r--r--app/discover/events/__init__.py10
-rw-r--r--app/discover/events/event_base.py36
-rw-r--r--app/discover/events/event_delete_base.py60
-rw-r--r--app/discover/events/event_instance_add.py45
-rw-r--r--app/discover/events/event_instance_delete.py18
-rw-r--r--app/discover/events/event_instance_update.py55
-rw-r--r--app/discover/events/event_interface_add.py139
-rw-r--r--app/discover/events/event_interface_delete.py40
-rw-r--r--app/discover/events/event_metadata_parser.py75
-rw-r--r--app/discover/events/event_network_add.py50
-rw-r--r--app/discover/events/event_network_delete.py17
-rw-r--r--app/discover/events/event_network_update.py44
-rw-r--r--app/discover/events/event_port_add.py309
-rw-r--r--app/discover/events/event_port_delete.py80
-rw-r--r--app/discover/events/event_port_update.py38
-rw-r--r--app/discover/events/event_router_add.py123
-rw-r--r--app/discover/events/event_router_delete.py37
-rw-r--r--app/discover/events/event_router_update.py82
-rw-r--r--app/discover/events/event_subnet_add.py154
-rw-r--r--app/discover/events/event_subnet_delete.py57
-rw-r--r--app/discover/events/event_subnet_update.py102
-rw-r--r--app/discover/events/listeners/__init__.py10
-rwxr-xr-xapp/discover/events/listeners/default_listener.py314
-rw-r--r--app/discover/events/listeners/listener_base.py18
-rw-r--r--app/discover/fetch_host_object_types.py37
-rw-r--r--app/discover/fetch_region_object_types.py37
-rw-r--r--app/discover/fetcher.py35
-rw-r--r--app/discover/fetcher_new.py30
-rw-r--r--app/discover/fetchers/__init__.py9
-rw-r--r--app/discover/fetchers/aci/__init__.py9
-rw-r--r--app/discover/fetchers/aci/aci_access.py200
-rw-r--r--app/discover/fetchers/aci/aci_fetch_switch_pnic.py91
-rw-r--r--app/discover/fetchers/api/__init__.py9
-rw-r--r--app/discover/fetchers/api/api_access.py195
-rw-r--r--app/discover/fetchers/api/api_fetch_availability_zones.py56
-rw-r--r--app/discover/fetchers/api/api_fetch_end_points.py35
-rw-r--r--app/discover/fetchers/api/api_fetch_host_instances.py59
-rw-r--r--app/discover/fetchers/api/api_fetch_network.py76
-rw-r--r--app/discover/fetchers/api/api_fetch_networks.py86
-rw-r--r--app/discover/fetchers/api/api_fetch_port.py60
-rw-r--r--app/discover/fetchers/api/api_fetch_ports.py55
-rw-r--r--app/discover/fetchers/api/api_fetch_project_hosts.py144
-rw-r--r--app/discover/fetchers/api/api_fetch_projects.py66
-rw-r--r--app/discover/fetchers/api/api_fetch_regions.py51
-rw-r--r--app/discover/fetchers/cli/__init__.py9
-rw-r--r--app/discover/fetchers/cli/cli_access.py206
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics.py122
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py44
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservice.py80
-rw-r--r--app/discover/fetchers/cli/cli_fetch_host_vservices.py27
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics.py22
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py68
-rw-r--r--app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py18
-rw-r--r--app/discover/fetchers/cli/cli_fetch_oteps_lxb.py86
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors.py40
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py35
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py56
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py64
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vpp_vedges.py58
-rw-r--r--app/discover/fetchers/cli/cli_fetch_vservice_vnics.py140
-rw-r--r--app/discover/fetchers/db/__init__.py9
-rw-r--r--app/discover/fetchers/db/db_access.py142
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregate_hosts.py36
-rw-r--r--app/discover/fetchers/db/db_fetch_aggregates.py21
-rw-r--r--app/discover/fetchers/db/db_fetch_availability_zones.py22
-rw-r--r--app/discover/fetchers/db/db_fetch_az_network_hosts.py31
-rw-r--r--app/discover/fetchers/db/db_fetch_host_instances.py15
-rw-r--r--app/discover/fetchers/db/db_fetch_host_network_agents.py35
-rw-r--r--app/discover/fetchers/db/db_fetch_instances.py60
-rw-r--r--app/discover/fetchers/db/db_fetch_oteps.py81
-rw-r--r--app/discover/fetchers/db/db_fetch_port.py34
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_ovs.py178
-rw-r--r--app/discover/fetchers/db/db_fetch_vedges_vpp.py56
-rw-r--r--app/discover/fetchers/folder_fetcher.py36
-rw-r--r--app/discover/find_links.py30
-rw-r--r--app/discover/find_links_for_instance_vnics.py59
-rw-r--r--app/discover/find_links_for_oteps.py85
-rw-r--r--app/discover/find_links_for_pnics.py58
-rw-r--r--app/discover/find_links_for_vconnectors.py88
-rw-r--r--app/discover/find_links_for_vedges.py124
-rw-r--r--app/discover/find_links_for_vservice_vnics.py56
-rw-r--r--app/discover/manager.py45
-rw-r--r--app/discover/monitoring_mgr.py10
-rw-r--r--app/discover/network_agents_list.py23
-rw-r--r--app/discover/plugins/__init__.py10
-rwxr-xr-xapp/discover/scan.py324
-rw-r--r--app/discover/scan_error.py11
-rw-r--r--app/discover/scan_manager.py294
-rw-r--r--app/discover/scan_metadata_parser.py202
-rw-r--r--app/discover/scanner.py253
-rw-r--r--app/install/calipso-installer.py380
-rw-r--r--app/install/calipso_mongo_access.conf.example4
-rw-r--r--app/install/db/attributes_for_hover_on_data.json89
-rw-r--r--app/install/db/clique_constraints.json20
-rw-r--r--app/install/db/clique_types.json56
-rw-r--r--app/install/db/cliques.json3
-rw-r--r--app/install/db/constants.json668
-rw-r--r--app/install/db/environments_config.json78
-rw-r--r--app/install/db/inventory.json3
-rw-r--r--app/install/db/link_types.json184
-rw-r--r--app/install/db/links.json3
-rw-r--r--app/install/db/messages.json44
-rw-r--r--app/install/db/meteor_accounts_loginServiceConfiguration.json3
-rw-r--r--app/install/db/monitoring_config.json3
-rw-r--r--app/install/db/monitoring_config_templates.json378
-rw-r--r--app/install/db/network_agent_types.json52
-rw-r--r--app/install/db/roles.json26
-rw-r--r--app/install/db/scans.json24
-rw-r--r--app/install/db/scheduled_scans.json43
-rw-r--r--app/install/db/statistics.json23
-rw-r--r--app/install/db/supported_environments.json230
-rw-r--r--app/install/db/users.json51
-rw-r--r--app/install/ldap.conf.example10
-rw-r--r--app/messages/message.py65
-rw-r--r--app/monitoring/__init__.py10
-rw-r--r--app/monitoring/checks/binary_converter.py17
-rwxr-xr-xapp/monitoring/checks/check_interface.py50
-rwxr-xr-xapp/monitoring/checks/check_ping.py121
-rwxr-xr-xapp/monitoring/checks/check_pnic_vpp.py53
-rwxr-xr-xapp/monitoring/checks/check_vedge_ovs.py43
-rwxr-xr-xapp/monitoring/checks/check_vedge_vpp.py50
-rwxr-xr-xapp/monitoring/checks/check_vnic_vconnector.py72
-rwxr-xr-xapp/monitoring/checks/check_vnic_vpp.py48
-rw-r--r--app/monitoring/checks/check_vservice.py82
-rw-r--r--app/monitoring/handlers/__init__.py10
-rw-r--r--app/monitoring/handlers/basic_check_handler.py25
-rw-r--r--app/monitoring/handlers/handle_link.py36
-rw-r--r--app/monitoring/handlers/handle_otep.py48
-rw-r--r--app/monitoring/handlers/handle_pnic.py29
-rw-r--r--app/monitoring/handlers/handle_pnic_vpp.py28
-rw-r--r--app/monitoring/handlers/handle_vnic_vpp.py28
-rwxr-xr-xapp/monitoring/handlers/monitor.py92
-rw-r--r--app/monitoring/handlers/monitoring_check_handler.py94
-rw-r--r--app/monitoring/setup/__init__.py10
-rw-r--r--app/monitoring/setup/monitoring_check_handler.py54
-rw-r--r--app/monitoring/setup/monitoring_handler.py485
-rw-r--r--app/monitoring/setup/monitoring_host.py91
-rw-r--r--app/monitoring/setup/monitoring_link_vnic_vconnector.py37
-rw-r--r--app/monitoring/setup/monitoring_otep.py34
-rw-r--r--app/monitoring/setup/monitoring_pnic.py21
-rw-r--r--app/monitoring/setup/monitoring_setup_manager.py84
-rw-r--r--app/monitoring/setup/monitoring_simple_object.py25
-rw-r--r--app/monitoring/setup/monitoring_vedge.py19
-rw-r--r--app/monitoring/setup/monitoring_vnic.py20
-rw-r--r--app/monitoring/setup/monitoring_vservice.py23
-rwxr-xr-xapp/statistics/stats_consumer.py134
-rw-r--r--app/test/__init__.py10
-rw-r--r--app/test/api/__init__.py10
-rw-r--r--app/test/api/responders_test/__init__.py10
-rw-r--r--app/test/api/responders_test/auth/__init__.py10
-rw-r--r--app/test/api/responders_test/auth/test_tokens.py105
-rw-r--r--app/test/api/responders_test/resource/__init__.py10
-rw-r--r--app/test/api/responders_test/resource/test_aggregates.py103
-rw-r--r--app/test/api/responders_test/resource/test_clique_constraints.py138
-rw-r--r--app/test/api/responders_test/resource/test_clique_types.py267
-rw-r--r--app/test/api/responders_test/resource/test_cliques.py240
-rw-r--r--app/test/api/responders_test/resource/test_constants.py53
-rw-r--r--app/test/api/responders_test/resource/test_environment_configs.py420
-rw-r--r--app/test/api/responders_test/resource/test_inventory.py162
-rw-r--r--app/test/api/responders_test/resource/test_links.py193
-rw-r--r--app/test/api/responders_test/resource/test_messages.py236
-rw-r--r--app/test/api/responders_test/resource/test_monitoring_config_templates.py156
-rw-r--r--app/test/api/responders_test/resource/test_scans.py239
-rw-r--r--app/test/api/responders_test/resource/test_scheduled_scans.py247
-rw-r--r--app/test/api/responders_test/test_data/__init__.py10
-rw-r--r--app/test/api/responders_test/test_data/aggregates.py67
-rw-r--r--app/test/api/responders_test/test_data/base.py179
-rw-r--r--app/test/api/responders_test/test_data/clique_constraints.py74
-rw-r--r--app/test/api/responders_test/test_data/clique_types.py170
-rw-r--r--app/test/api/responders_test/test_data/cliques.py171
-rw-r--r--app/test/api/responders_test/test_data/constants.py23
-rw-r--r--app/test/api/responders_test/test_data/environment_configs.py221
-rw-r--r--app/test/api/responders_test/test_data/inventory.py37
-rw-r--r--app/test/api/responders_test/test_data/links.py90
-rw-r--r--app/test/api/responders_test/test_data/messages.py108
-rw-r--r--app/test/api/responders_test/test_data/monitoring_config_templates.py98
-rw-r--r--app/test/api/responders_test/test_data/scans.py187
-rw-r--r--app/test/api/responders_test/test_data/scheduled_scans.py138
-rw-r--r--app/test/api/responders_test/test_data/tokens.py83
-rw-r--r--app/test/api/test_base.py101
-rw-r--r--app/test/event_based_scan/__init__.py10
-rw-r--r--app/test/event_based_scan/config/__init__.py10
-rw-r--r--app/test/event_based_scan/config/test_config.py17
-rw-r--r--app/test/event_based_scan/test_data/__init__.py10
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_add.py122
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_delete.py97
-rw-r--r--app/test/event_based_scan/test_data/event_payload_instance_update.py99
-rw-r--r--app/test/event_based_scan/test_data/event_payload_interface_add.py350
-rw-r--r--app/test/event_based_scan/test_data/event_payload_interface_delete.py350
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_add.py32
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_delete.py88
-rw-r--r--app/test/event_based_scan/test_data/event_payload_network_update.py65
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_add.py314
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_delete.py290
-rw-r--r--app/test/event_based_scan/test_data/event_payload_port_update.py103
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_add.py176
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_delete.py59
-rw-r--r--app/test/event_based_scan/test_data/event_payload_router_update.py271
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_add.py124
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_delete.py95
-rw-r--r--app/test/event_based_scan/test_data/event_payload_subnet_update.py76
-rw-r--r--app/test/event_based_scan/test_event.py55
-rw-r--r--app/test/event_based_scan/test_event_delete_base.py64
-rw-r--r--app/test/event_based_scan/test_instance_add.py61
-rw-r--r--app/test/event_based_scan/test_instance_delete.py24
-rw-r--r--app/test/event_based_scan/test_instance_update.py46
-rw-r--r--app/test/event_based_scan/test_interface_add.py74
-rw-r--r--app/test/event_based_scan/test_interface_delete.py44
-rw-r--r--app/test/event_based_scan/test_network_add.py47
-rw-r--r--app/test/event_based_scan/test_network_delete.py24
-rw-r--r--app/test/event_based_scan/test_network_update.py33
-rw-r--r--app/test/event_based_scan/test_port_add.py75
-rw-r--r--app/test/event_based_scan/test_port_delete.py47
-rw-r--r--app/test/event_based_scan/test_port_update.py34
-rw-r--r--app/test/event_based_scan/test_router_add.py79
-rw-r--r--app/test/event_based_scan/test_router_delete.py23
-rw-r--r--app/test/event_based_scan/test_router_update.py62
-rw-r--r--app/test/event_based_scan/test_subnet_add.py68
-rw-r--r--app/test/event_based_scan/test_subnet_delete.py54
-rw-r--r--app/test/event_based_scan/test_subnet_update.py45
-rw-r--r--app/test/fetch/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/test_api_access.py142
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_availability_zone.py72
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_host_instances.py83
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_networks.py65
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_ports.py89
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_project_hosts.py137
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_projects.py120
-rw-r--r--app/test/fetch/api_fetch/test_api_fetch_regions.py41
-rw-r--r--app/test/fetch/api_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/api_fetch/test_data/api_access.py55
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_availability_zones.py71
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_host_instances.py85
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_host_project_hosts.py225
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_networks.py72
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_ports.py72
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_projects.py88
-rw-r--r--app/test/fetch/api_fetch/test_data/api_fetch_regions.py50
-rw-r--r--app/test/fetch/api_fetch/test_data/configurations.py52
-rw-r--r--app/test/fetch/api_fetch/test_data/regions.py110
-rw-r--r--app/test/fetch/api_fetch/test_data/token.py23
-rw-r--r--app/test/fetch/cli_fetch/__init__.py9
-rw-r--r--app/test/fetch/cli_fetch/test_cli_access.py159
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_pnics.py135
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_pnics_vpp.py34
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_host_vservices.py132
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics.py111
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_ovs.py36
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_instance_vnics_vpp.py23
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors.py66
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_ovs.py38
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vconnectors_vpp.py50
-rw-r--r--app/test/fetch/cli_fetch/test_cli_fetch_vservice_vnics.py124
-rw-r--r--app/test/fetch/cli_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_access.py58
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics.py147
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_pnics_vpp.py204
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_host_verservices.py276
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_instance_vnics.py288
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors.py103
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_ovs.py234
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vconnectors_vpp.py137
-rw-r--r--app/test/fetch/cli_fetch/test_data/cli_fetch_vservice_vnics.py616
-rw-r--r--app/test/fetch/config/__init__.py9
-rw-r--r--app/test/fetch/config/test_config.py17
-rw-r--r--app/test/fetch/db_fetch/__init__.py9
-rw-r--r--app/test/fetch/db_fetch/mock_cursor.py25
-rw-r--r--app/test/fetch/db_fetch/test_data/__init__.py9
-rw-r--r--app/test/fetch/db_fetch/test_data/db_access.py40
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_aggregate_hosts.py34
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_aggregates.py17
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py65
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_instances.py91
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_oteps.py131
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py168
-rw-r--r--app/test/fetch/db_fetch/test_data/db_fetch_vedges_vpp.py89
-rw-r--r--app/test/fetch/db_fetch/test_db_access.py108
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_aggregate_hosts.py60
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_aggregates.py26
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_instances.py37
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_oteps.py92
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py109
-rw-r--r--app/test/fetch/db_fetch/test_db_fetch_vedges_vpp.py82
-rw-r--r--app/test/fetch/db_fetch/test_fetch_host_network_agents.py66
-rw-r--r--app/test/fetch/test_fetch.py46
-rw-r--r--app/test/scan/__init__.py10
-rw-r--r--app/test/scan/config/__init__.py9
-rw-r--r--app/test/scan/config/test_config.py17
-rw-r--r--app/test/scan/main.py17
-rw-r--r--app/test/scan/mock_module.py37
-rw-r--r--app/test/scan/test_data/__init__.py9
-rw-r--r--app/test/scan/test_data/configurations.py69
-rw-r--r--app/test/scan/test_data/metadata.py318
-rw-r--r--app/test/scan/test_data/scan.py435
-rw-r--r--app/test/scan/test_data/scanner.py355
-rw-r--r--app/test/scan/test_scan.py46
-rw-r--r--app/test/scan/test_scan_controller.py215
-rw-r--r--app/test/scan/test_scan_metadata_parser.py152
-rw-r--r--app/test/scan/test_scanner.py355
-rw-r--r--app/test/test_suite.py25
-rw-r--r--app/utils/__init__.py10
-rw-r--r--app/utils/binary_converter.py27
-rw-r--r--app/utils/config_file.py48
-rw-r--r--app/utils/constants.py37
-rw-r--r--app/utils/deep_merge.py77
-rw-r--r--app/utils/dict_naming_converter.py40
-rw-r--r--app/utils/exceptions.py13
-rw-r--r--app/utils/inventory_mgr.py445
-rw-r--r--app/utils/logging/__init__.py10
-rw-r--r--app/utils/logging/console_logger.py21
-rw-r--r--app/utils/logging/file_logger.py23
-rw-r--r--app/utils/logging/full_logger.py47
-rw-r--r--app/utils/logging/logger.py99
-rw-r--r--app/utils/logging/message_logger.py21
-rw-r--r--app/utils/logging/mongo_logging_handler.py53
-rw-r--r--app/utils/metadata_parser.py83
-rw-r--r--app/utils/mongo_access.py137
-rw-r--r--app/utils/singleton.py16
-rw-r--r--app/utils/special_char_converter.py32
-rw-r--r--app/utils/ssh_conn.py94
-rw-r--r--app/utils/ssh_connection.py217
-rw-r--r--app/utils/string_utils.py59
-rw-r--r--app/utils/util.py172
365 files changed, 32568 insertions, 0 deletions
diff --git a/LICENSE b/LICENSE
index f4346f8..cf46283 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,3 +1,5 @@
+Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)
+and others
Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2a9e986
--- /dev/null
+++ b/README.md
@@ -0,0 +1,50 @@
+This work is licensed under a Creative Commons Attribution 4.0
+International License.
+http://creativecommons.org/licenses/by/4.0
+
+Calipso - OpenStack Network Discovery and Assurance
+==================================================
+### About
+We are going to enhance the way Cloud Network Administrators(CNA) and Tenant Network Administrators(TNA)
+Understands, Monitors and Troubleshoot highly distributed OpenStack and other virtual Environments.
+
+We are following Domain-Driven-Design process and procedures:
+ref: http://www.methodsandtools.com/archive/archive.php?id=97
+<br>
+WIKI Central:<br>
+http://wikicentral.cisco.com/display/OSDNA/Home
+<br>
+JIVE:<br>
+https://cisco.jiveon.com/people/korlev/blog/2015/12/13/os-dna-project
+<br>
+Rally:<br>
+https://rally1.rallydev.com/#/48530212030d/dashboard
+<br>
+
+### Prototype Intent:
+
+Provide CNA and TNA with support for:
+<br>
+1. Building virtual Network inventory and visualizing all inter-connections in real-time
+<br>
+2. Monitor virtual network objects state and health
+<br>
+3. Troubleshoot failures in virtual networks
+<br>
+4. Assess impact of failure in virtual networks
+<br>
+
+### High Level Functionality Tree:
+<br>
+http://korlev-calipso.cisco.com/ <br>
+Code: https://cto-github.cisco.com/korlev/Calipso
+
+### Proto (mockups, updated Nov 15th)
+korlev-calipso-be.cisco.com:8081
+
+#### High Level Architecture: <br>
+see under /Architecture (outdated)
+
+### Contacts
+* Koren Lev (korlev)
+* Yaron Yogev (yayogev)
diff --git a/app/api/__init__.py b/app/api/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/app.py b/app/api/app.py
new file mode 100644
index 0000000..5fa3da9
--- /dev/null
+++ b/app/api/app.py
@@ -0,0 +1,71 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import importlib
+
+import falcon
+
+from api.auth.token import Token
+from api.backends.ldap_access import LDAPAccess
+from api.exceptions.exceptions import CalipsoApiException
+from api.middleware.authentication import AuthenticationMiddleware
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class App:
+
+ ROUTE_DECLARATIONS = {
+ "/inventory": "resource.inventory.Inventory",
+ "/links": "resource.links.Links",
+ "/messages": "resource.messages.Messages",
+ "/cliques": "resource.cliques.Cliques",
+ "/clique_types": "resource.clique_types.CliqueTypes",
+ "/clique_constraints": "resource.clique_constraints.CliqueConstraints",
+ "/scans": "resource.scans.Scans",
+ "/scheduled_scans": "resource.scheduled_scans.ScheduledScans",
+ "/constants": "resource.constants.Constants",
+ "/monitoring_config_templates":
+ "resource.monitoring_config_templates.MonitoringConfigTemplates",
+ "/aggregates": "resource.aggregates.Aggregates",
+ "/environment_configs":
+ "resource.environment_configs.EnvironmentConfigs",
+ "/auth/tokens": "auth.tokens.Tokens"
+ }
+
+ responders_path = "api.responders"
+
+ def __init__(self, mongo_config="", ldap_config="",
+ log_level="", inventory="", token_lifetime=86400):
+ MongoAccess.set_config_file(mongo_config)
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory)
+ self.log = FullLogger()
+ self.log.set_loglevel(log_level)
+ self.ldap_access = LDAPAccess(ldap_config)
+ Token.set_token_lifetime(token_lifetime)
+ self.middleware = AuthenticationMiddleware()
+ self.app = falcon.API(middleware=[self.middleware])
+ self.app.add_error_handler(CalipsoApiException)
+ self.set_routes(self.app)
+
+ def get_app(self):
+ return self.app
+
+ def set_routes(self, app):
+ for url in self.ROUTE_DECLARATIONS.keys():
+ class_path = self.ROUTE_DECLARATIONS.get(url)
+ module = self.responders_path + "." + \
+ class_path[:class_path.rindex(".")]
+ class_name = class_path.split('.')[-1]
+ module = importlib.import_module(module)
+ class_ = getattr(module, class_name)
+ resource = class_()
+ app.add_route(url, resource)
diff --git a/app/api/auth/__init__.py b/app/api/auth/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/auth/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/auth/auth.py b/app/api/auth/auth.py
new file mode 100644
index 0000000..04fc4b9
--- /dev/null
+++ b/app/api/auth/auth.py
@@ -0,0 +1,71 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.auth.token import Token
+from api.backends.ldap_access import LDAPAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+
+
+class Auth:
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.log = FullLogger()
+ self.tokens_coll = self.inv.client['tokens']['api_tokens']
+ self.ldap_access = LDAPAccess()
+
+ def get_token(self, token):
+ tokens = None
+ try:
+ tokens = list(self.tokens_coll.find({'token': token}))
+ except Exception as e:
+ self.log.error('Failed to get token for ', str(e))
+
+ return tokens
+
+ def write_token(self, token):
+ error = None
+ try:
+ self.tokens_coll.insert_one(token)
+ except Exception as e:
+ self.log.error("Failed to write new token {0} to database for {1}"
+ .format(token[token], str(e)))
+ error = 'Failed to create new token'
+
+ return error
+
+ def delete_token(self, token):
+ error = None
+ try:
+ self.tokens_coll.delete_one({'token': token})
+ except Exception as e:
+ self.log.error('Failed to delete token {0} for {1}'.
+ format(token, str(e)))
+ error = 'Failed to delete token {0}'.format(token)
+
+ return error
+
+ def validate_credentials(self, username, pwd):
+ return self.ldap_access.authenticate_user(username, pwd)
+
+ def validate_token(self, token):
+ error = None
+ tokens = self.get_token(token)
+ if not tokens:
+ error = "Token {0} doesn't exist".format(token)
+ elif len(tokens) > 1:
+ self.log.error('Multiple tokens found for {0}'.format(token))
+ error = "Multiple tokens found"
+ else:
+ t = tokens[0]
+ error = Token.validate_token(t)
+
+ return error
diff --git a/app/api/auth/token.py b/app/api/auth/token.py
new file mode 100644
index 0000000..d057d22
--- /dev/null
+++ b/app/api/auth/token.py
@@ -0,0 +1,39 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+import uuid
+
+
+class Token:
+ token_lifetime = 86400
+ FIELD = 'X-AUTH-TOKEN'
+
+ @classmethod
+ def set_token_lifetime(cls, lifetime):
+ Token.token_lifetime = lifetime
+
+ @classmethod
+ def new_uuid_token(cls, method):
+ token = {}
+ token['issued_at'] = datetime.datetime.now()
+ token['expires_at'] = token['issued_at'] +\
+ datetime.timedelta(seconds=Token.token_lifetime)
+ token['token'] = uuid.uuid4().hex
+ token['method'] = method
+ return token
+
+ @classmethod
+ def validate_token(cls, token):
+ error = None
+ now = datetime.datetime.now()
+ if now > token['expires_at']:
+ error = 'Token {0} has expired'.format(token['token'])
+
+ return error
diff --git a/app/api/backends/__init__.py b/app/api/backends/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/backends/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/backends/ldap_access.py b/app/api/backends/ldap_access.py
new file mode 100644
index 0000000..a998656
--- /dev/null
+++ b/app/api/backends/ldap_access.py
@@ -0,0 +1,89 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import ssl
+
+from ldap3 import Server, Connection, Tls
+
+from utils.config_file import ConfigFile
+from utils.logging.full_logger import FullLogger
+from utils.singleton import Singleton
+
+
+class LDAPAccess(metaclass=Singleton):
+
+ default_config_file = "ldap.conf"
+ TLS_REQUEST_CERTS = {
+ "demand": ssl.CERT_REQUIRED,
+ "allow": ssl.CERT_OPTIONAL,
+ "never": ssl.CERT_NONE,
+ "default": ssl.CERT_NONE
+ }
+ user_ssl = True
+
+ def __init__(self, config_file_path=""):
+ super().__init__()
+ self.log = FullLogger()
+ self.ldap_params = self.get_ldap_params(config_file_path)
+ self.server = self.connect_ldap_server()
+
+ def get_ldap_params(self, config_file_path):
+ ldap_params = {
+ "url": "ldap://localhost:389"
+ }
+ if not config_file_path:
+ config_file_path = ConfigFile.get(self.default_config_file)
+ if config_file_path:
+ try:
+ config_file = ConfigFile(config_file_path)
+ params = config_file.read_config()
+ ldap_params.update(params)
+ except Exception as e:
+ self.log.error(str(e))
+ raise
+ if "user_tree_dn" not in ldap_params:
+ raise ValueError("user_tree_dn must be specified in " +
+ config_file_path)
+ if "user_id_attribute" not in ldap_params:
+ raise ValueError("user_id_attribute must be specified in " +
+ config_file_path)
+ return ldap_params
+
+ def connect_ldap_server(self):
+ ca_certificate_file = self.ldap_params.get('tls_cacertfile')
+ req_cert = self.ldap_params.get('tls_req_cert')
+ ldap_url = self.ldap_params.get('url')
+
+ if ca_certificate_file:
+ if not req_cert or req_cert not in self.TLS_REQUEST_CERTS.keys():
+ req_cert = 'default'
+ tls_req_cert = self.TLS_REQUEST_CERTS[req_cert]
+ tls = Tls(local_certificate_file=ca_certificate_file,
+ validate=tls_req_cert)
+ return Server(ldap_url, use_ssl=self.user_ssl, tls=tls)
+
+ return Server(ldap_url, use_ssl=self.user_ssl)
+
+ def authenticate_user(self, username, pwd):
+ if not self.server:
+ self.server = self.connect_ldap_server()
+
+ user_dn = self.ldap_params['user_id_attribute'] + "=" +
+ username + "," + self.ldap_params['user_tree_dn']
+ connection = Connection(self.server, user=user_dn, password=pwd)
+ # validate the user by binding
+ # bound is true if binding succeed, otherwise false
+ bound = False
+ try:
+ bound = connection.bind()
+ connection.unbind()
+ except Exception as e:
+ self.log.error('Failed to bind the server for {0}'.format(str(e)))
+
+ return bound
diff --git a/app/api/exceptions/__init__.py b/app/api/exceptions/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/exceptions/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/exceptions/exceptions.py b/app/api/exceptions/exceptions.py
new file mode 100644
index 0000000..f0a1d9f
--- /dev/null
+++ b/app/api/exceptions/exceptions.py
@@ -0,0 +1,26 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.logging.console_logger import ConsoleLogger
+
+
+class CalipsoApiException(Exception):
+ log = ConsoleLogger()
+
+ def __init__(self, status, body="", message=""):
+ super().__init__(message)
+ self.message = message
+ self.status = status
+ self.body = body
+
+ @staticmethod
+ def handle(ex, req, resp, params):
+ CalipsoApiException.log.error(ex.message)
+ resp.status = ex.status
+ resp.body = ex.body
diff --git a/app/api/middleware/__init__.py b/app/api/middleware/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/middleware/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/middleware/authentication.py b/app/api/middleware/authentication.py
new file mode 100644
index 0000000..bc62fa8
--- /dev/null
+++ b/app/api/middleware/authentication.py
@@ -0,0 +1,63 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import base64
+
+from api.responders.responder_base import ResponderBase
+from api.auth.auth import Auth
+from api.auth.token import Token
+
+
+class AuthenticationMiddleware(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.auth = Auth()
+ self.BASIC_AUTH = "AUTHORIZATION"
+ self.EXCEPTION_ROUTES = ['/auth/tokens']
+
+ def process_request(self, req, resp):
+ if req.path in self.EXCEPTION_ROUTES:
+ return
+
+ self.log.debug("Authentication middleware is processing the request")
+ headers = self.change_dict_naming_convention(req.headers,
+ lambda s: s.upper())
+ auth_error = None
+ if self.BASIC_AUTH in headers:
+ # basic authentication
+ self.log.debug("Authenticating the basic credentials")
+ basic = headers[self.BASIC_AUTH]
+ auth_error = self.authenticate_with_basic_auth(basic)
+ elif Token.FIELD in headers:
+ # token authentication
+ self.log.debug("Authenticating token")
+ token = headers[Token.FIELD]
+ auth_error = self.auth.validate_token(token)
+ else:
+ auth_error = "Authentication required"
+
+ if auth_error:
+ self.unauthorized(auth_error)
+
+ def authenticate_with_basic_auth(self, basic):
+ error = None
+ if not basic or not basic.startswith("Basic"):
+ error = "Credentials not provided"
+ else:
+ # get username and password
+ credential = basic.lstrip("Basic").lstrip()
+ username_password = base64.b64decode(credential).decode("utf-8")
+ credentials = username_password.split(":")
+ if not self.auth.validate_credentials(credentials[0], credentials[1]):
+ self.log.info("Authentication for {0} failed".format(credentials[0]))
+ error = "Authentication failed"
+ else:
+ self.log.info("Authentication for {0} succeeded".format(credentials[0]))
+
+ return error
diff --git a/app/api/responders/__init__.py b/app/api/responders/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/auth/__init__.py b/app/api/responders/auth/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/auth/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/auth/tokens.py b/app/api/responders/auth/tokens.py
new file mode 100644
index 0000000..0b3a22f
--- /dev/null
+++ b/app/api/responders/auth/tokens.py
@@ -0,0 +1,117 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime
+
+from bson.objectid import ObjectId
+
+from api.auth.auth import Auth
+from api.auth.token import Token
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from utils.string_utils import stringify_object_values_by_types
+
+
+class Tokens(ResponderBase):
+
+ def __init__(self):
+ super().__init__()
+ self.auth_requirements = {
+ 'methods': self.require(list, False,
+ DataValidate.LIST,
+ ['credentials', 'token'],
+ True),
+ 'credentials': self.require(dict, True),
+ 'token': self.require(str)
+ }
+
+ self.credential_requirements = {
+ 'username': self.require(str, mandatory=True),
+ 'password': self.require(str, mandatory=True)
+ }
+ self.auth = Auth()
+
+ def on_post(self, req, resp):
+ self.log.debug('creating new token')
+ error, data = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ if 'auth' not in data:
+ self.bad_request('Request must contain auth object')
+
+ auth = data['auth']
+
+ self.validate_query_data(auth, self.auth_requirements)
+
+ if 'credentials' in auth:
+ self.validate_query_data(auth['credentials'],
+ self.credential_requirements)
+
+ auth_error = self.authenticate(auth)
+ if auth_error:
+ self.unauthorized(auth_error)
+
+ new_token = Token.new_uuid_token(auth['method'])
+ write_error = self.auth.write_token(new_token)
+
+ if write_error:
+ # TODO if writing token to the database failed, what kind of error should be return?
+ self.bad_request(write_error)
+
+ stringify_object_values_by_types(new_token, [datetime, ObjectId])
+ self.set_successful_response(resp, new_token, '201')
+
+ def authenticate(self, auth):
+ error = None
+ methods = auth['methods']
+ credentials = auth.get('credentials')
+ token = auth.get('token')
+
+ if not token and not credentials:
+ return 'must provide credentials or token'
+
+ if 'credentials' in methods:
+ if not credentials:
+ return'credentials must be provided for credentials method'
+ else:
+ if not self.auth.validate_credentials(credentials['username'],
+ credentials['password']):
+ error = 'authentication failed'
+ else:
+ auth['method'] = "credentials"
+ return None
+
+ if 'token' in methods:
+ if not token:
+ return 'token must be provided for token method'
+ else:
+ error = self.auth.validate_token(token)
+ if not error:
+ auth['method'] = 'token'
+
+ return error
+
+ def on_delete(self, req, resp):
+ headers = self.change_dict_naming_convention(req.headers,
+ lambda s: s.upper())
+ if Token.FIELD not in headers:
+ self.unauthorized('Authentication failed')
+
+ token = headers[Token.FIELD]
+ error = self.auth.validate_token(token)
+ if error:
+ self.unauthorized(error)
+
+ delete_error = self.auth.delete_token(token)
+
+ if delete_error:
+ self.bad_request(delete_error)
+
+ self.set_successful_response(resp)
diff --git a/app/api/responders/resource/__init__.py b/app/api/responders/resource/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/responders/resource/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/responders/resource/aggregates.py b/app/api/responders/resource/aggregates.py
new file mode 100644
index 0000000..36fcfa4
--- /dev/null
+++ b/app/api/responders/resource/aggregates.py
@@ -0,0 +1,157 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+
+
+class Aggregates(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.AGGREGATE_TYPES = ["environment", "message", "constant"]
+ self.AGGREGATES_MAP = {
+ "environment": self.get_environments_aggregates,
+ "message": self.get_messages_aggregates,
+ "constant": self.get_constants_aggregates
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting aggregates information")
+
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ "env_name": self.require(str),
+ "type": self.require(str, validate=DataValidate.LIST,
+ requirement=self.AGGREGATE_TYPES,
+ mandatory=True,
+ error_messages={"mandatory":
+ "type must be specified: " +
+ "environment/" +
+ " message/" +
+ "constant"})
+ }
+ self.validate_query_data(filters, filters_requirements)
+ query = self.build_query(filters)
+ query_type = query["type"]
+ if query_type == "environment":
+ env_name = query.get("env_name")
+ if not env_name:
+ self.bad_request("env_name must be specified")
+ if not self.check_environment_name(env_name):
+ self.bad_request("unknown environment: " + env_name)
+
+ aggregates = self.AGGREGATES_MAP[query_type](query)
+ self.set_successful_response(resp, aggregates)
+
+ def build_query(self, filters):
+ query = {}
+ env_name = filters.get("env_name")
+ query_type = filters["type"]
+ query["type"] = filters["type"]
+ if query_type == "environment":
+ if env_name:
+ query['env_name'] = env_name
+ return query
+ return query
+
+ def get_environments_aggregates(self, query):
+ env_name = query['env_name']
+ aggregates = {
+ "type": query["type"],
+ "env_name": env_name,
+ "aggregates": {
+ "object_types": {
+
+ }
+ }
+ }
+ pipeline = [
+ {
+ '$match': {
+ 'environment': env_name
+ }
+ },
+ {
+ '$group': {
+ '_id': '$type',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ groups = self.aggregate(pipeline, "inventory")
+ for group in groups:
+ aggregates['aggregates']['object_types'][group['_id']] = \
+ group['total']
+ return aggregates
+
+ def get_messages_aggregates(self, query):
+ aggregates = {
+ "type": query['type'],
+ "aggregates": {
+ "levels": {},
+ "environments": {}
+ }
+ }
+ env_pipeline = [
+ {
+ '$group': {
+ '_id': '$environment',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ environments = self.aggregate(env_pipeline, "messages")
+ for environment in environments:
+ aggregates['aggregates']['environments'][environment['_id']] = \
+ environment['total']
+ level_pipeline = [
+ {
+ '$group': {
+ '_id': '$level',
+ 'total': {
+ '$sum': 1
+ }
+ }
+ }
+ ]
+ levels = self.aggregate(level_pipeline, "messages")
+ for level in levels:
+ aggregates['aggregates']['levels'][level['_id']] = \
+ level['total']
+
+ return aggregates
+
+ def get_constants_aggregates(self, query):
+ aggregates = {
+ "type": query['type'],
+ "aggregates": {
+ "names": {}
+ }
+ }
+ pipeline = [
+ {
+ '$project': {
+ '_id': 0,
+ 'name': 1,
+ 'total': {
+ '$size': '$data'
+ }
+ }
+ }
+ ]
+ constants = self.aggregate(pipeline, "constants")
+ for constant in constants:
+ aggregates['aggregates']['names'][constant['name']] = \
+ constant['total']
+
+ return aggregates
diff --git a/app/api/responders/resource/clique_constraints.py b/app/api/responders/resource/clique_constraints.py
new file mode 100644
index 0000000..eddead9
--- /dev/null
+++ b/app/api/responders/resource/clique_constraints.py
@@ -0,0 +1,67 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class CliqueConstraints(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True
+ }
+ self.COLLECTION = 'clique_constraints'
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting clique_constraints")
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ filters_requirements = {
+ 'id': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str, False, DataValidate.LIST,
+ focal_point_types),
+ 'constraint': self.require([list, str]),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ clique_constraint = self.get_object_by_id(self.COLLECTION,
+ query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique_constraint)
+ else:
+ clique_constraints_ids = self.get_objects_list(self.COLLECTION,
+ query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(
+ resp, {"clique_constraints": clique_constraints_ids}
+ )
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ constraints = filters.get('constraint')
+ if constraints:
+ if type(constraints) != list:
+ constraints = [constraints]
+
+ query['constraints'] = {
+ '$all': constraints
+ }
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ return query
diff --git a/app/api/responders/resource/clique_types.py b/app/api/responders/resource/clique_types.py
new file mode 100644
index 0000000..9a39dc8
--- /dev/null
+++ b/app/api/responders/resource/clique_types.py
@@ -0,0 +1,103 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class CliqueTypes(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "clique_types"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "focal_point_type": True,
+ "link_types": True,
+ "environment": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting clique types")
+
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str,
+ validate=DataValidate.LIST,
+ requirement=focal_point_types),
+ 'link_type': self.require([list, str],
+ validate=DataValidate.LIST,
+ requirement=link_types),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ clique_type = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique_type)
+ else:
+ clique_types_ids = self.get_objects_list(self.COLLECTION,
+ query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp,
+ {"clique_types": clique_types_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new clique_type")
+ error, clique_type = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ clique_type_requirements = {
+ 'environment': self.require(str, mandatory=True),
+ 'focal_point_type': self.require(str, False, DataValidate.LIST,
+ focal_point_types, True),
+ 'link_types': self.require(list, False, DataValidate.LIST,
+ link_types, True),
+ 'name': self.require(str, mandatory=True)
+ }
+
+ self.validate_query_data(clique_type, clique_type_requirements)
+
+ env_name = clique_type['environment']
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ self.write(clique_type, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new clique_type "
+ "for environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ link_types = filters.get('link_type')
+ if link_types:
+ if type(link_types) != list:
+ link_types = [link_types]
+ query['link_types'] = {'$all': link_types}
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/cliques.py b/app/api/responders/resource/cliques.py
new file mode 100644
index 0000000..ece347a
--- /dev/null
+++ b/app/api/responders/resource/cliques.py
@@ -0,0 +1,73 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+from utils.util import generate_object_ids
+
+
+class Cliques(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "cliques"
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True,
+ "focal_point_type": True,
+ "environment": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting cliques")
+
+ filters = self.parse_query_params(req)
+ focal_point_types = self.get_constants_by_name("object_types")
+ link_types = self.get_constants_by_name("link_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'focal_point': self.require(ObjectId, True),
+ 'focal_point_type': self.require(str, validate=DataValidate.LIST,
+ requirement=focal_point_types),
+ 'link_type': self.require(str, validate=DataValidate.LIST,
+ requirement=link_types),
+ 'link_id': self.require(ObjectId, True),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+
+ if self.ID in query:
+ clique = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, clique)
+ else:
+ cliques_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"cliques": cliques_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['focal_point', 'focal_point_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ link_type = filters.get('link_type')
+ if link_type:
+ query['links_detailed.link_type'] = link_type
+ link_id = filters.get('link_id')
+ if link_id:
+ query['links_detailed._id'] = link_id
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/constants.py b/app/api/responders/resource/constants.py
new file mode 100644
index 0000000..be71b5d
--- /dev/null
+++ b/app/api/responders/resource/constants.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+
+class Constants(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = '_id'
+ self.COLLECTION = 'constants'
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting constants with name")
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ "name": self.require(str, mandatory=True),
+ }
+ self.validate_query_data(filters, filters_requirements)
+ query = {"name": filters['name']}
+ constant = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, constant)
diff --git a/app/api/responders/resource/environment_configs.py b/app/api/responders/resource/environment_configs.py
new file mode 100644
index 0000000..bee6a4d
--- /dev/null
+++ b/app/api/responders/resource/environment_configs.py
@@ -0,0 +1,381 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation import regex
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+
+
+class EnvironmentConfigs(ResponderBase):
+ def __init__(self):
+ super(EnvironmentConfigs, self).__init__()
+ self.inv = InventoryMgr()
+ self.ID = "name"
+ self.PROJECTION = {
+ self.ID: True,
+ "_id": False,
+ "name": True,
+ "distribution": True
+ }
+ self.COLLECTION = "environments_config"
+ self.CONFIGURATIONS_NAMES = ["mysql", "OpenStack",
+ "CLI", "AMQP", "Monitoring",
+ "NFV_provider", "ACI"]
+ self.OPTIONAL_CONFIGURATIONS_NAMES = ["AMQP", "Monitoring",
+ "NFV_provider", "ACI"]
+
+ self.provision_types = self.\
+ get_constants_by_name("environment_provision_types")
+ self.env_types = self.get_constants_by_name("env_types")
+ self.monitoring_types = self.\
+ get_constants_by_name("environment_monitoring_types")
+ self.distributions = self.\
+ get_constants_by_name("distributions")
+ self.mechanism_drivers = self.\
+ get_constants_by_name("mechanism_drivers")
+ self.operational_values = self.\
+ get_constants_by_name("environment_operational_status")
+ self.type_drivers = self.\
+ get_constants_by_name("type_drivers")
+
+ self.CONFIGURATIONS_REQUIREMENTS = {
+ "mysql": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "password": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ DataValidate.REGEX,
+ regex.PORT,
+ mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "OpenStack": {
+ "name": self.require(str, mandatory=True),
+ "admin_token": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "pwd": self.require(str, mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "CLI": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str),
+ "key": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=regex.PATH)
+ },
+ "AMQP": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "password": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "user": self.require(str, mandatory=True)
+ },
+ "Monitoring": {
+ "name": self.require(str, mandatory=True),
+ "config_folder": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=regex.PATH,
+ mandatory=True),
+ "provision": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.provision_types,
+ mandatory=True),
+ "env_type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.env_types,
+ mandatory=True),
+ "api_port": self.require(int, True, mandatory=True),
+ "rabbitmq_pass": self.require(str, mandatory=True),
+ "rabbitmq_user": self.require(str, mandatory=True),
+ "rabbitmq_port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT,
+ mandatory=True),
+ "ssh_port": self.require(int,
+ True,
+ validate=DataValidate.REGEX,
+ requirement=regex.PORT),
+ "ssh_user": self.require(str),
+ "ssh_password": self.require(str),
+ "server_ip": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "server_name": self.require(str, mandatory=True),
+ "type": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=self.monitoring_types,
+ mandatory=True)
+ },
+ "NFV_provider": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "nfv_token": self.require(str, mandatory=True),
+ "port": self.require(int,
+ True,
+ DataValidate.REGEX,
+ regex.PORT,
+ True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str, mandatory=True)
+ },
+ "ACI": {
+ "name": self.require(str, mandatory=True),
+ "host": self.require(str,
+ validate=DataValidate.REGEX,
+ requirement=[regex.IP, regex.HOSTNAME],
+ mandatory=True),
+ "user": self.require(str, mandatory=True),
+ "pwd": self.require(str, mandatory=True)
+ }
+ }
+ self.AUTH_REQUIREMENTS = {
+ "view-env": self.require(list, mandatory=True),
+ "edit-env": self.require(list, mandatory=True)
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting environment config")
+ filters = self.parse_query_params(req)
+
+ filters_requirements = {
+ "name": self.require(str),
+ "distribution": self.require(str, False,
+ DataValidate.LIST,
+ self.distributions),
+ "mechanism_drivers": self.require([str, list],
+ False,
+ DataValidate.LIST,
+ self.mechanism_drivers),
+ "type_drivers": self.require(str, False,
+ DataValidate.LIST,
+ self.type_drivers),
+ "user": self.require(str),
+ "listen": self.require(bool, True),
+ "scanned": self.require(bool, True),
+ "monitoring_setup_done": self.require(bool, True),
+ "operational": self.require(str, False,
+ DataValidate.LIST,
+ self.operational_values),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+
+ if self.ID in query:
+ environment_config = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, environment_config)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {'environment_configs': objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["name", "distribution", "type_drivers", "user",
+ "listen", "monitoring_setup_done", "scanned",
+ "operational"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ mechanism_drivers = filters.get("mechanism_drivers")
+ if mechanism_drivers:
+ if type(mechanism_drivers) != list:
+ mechanism_drivers = [mechanism_drivers]
+ query['mechanism_drivers'] = {'$all': mechanism_drivers}
+
+ return query
+
+ def on_post(self, req, resp):
+ self.log.debug("Creating a new environment config")
+
+ error, env_config = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ environment_config_requirement = {
+ "app_path": self.require(str, mandatory=True),
+ "configuration": self.require(list, mandatory=True),
+ "distribution": self.require(str, False, DataValidate.LIST,
+ self.distributions, True),
+ "listen": self.require(bool, True, mandatory=True),
+ "user": self.require(str),
+ "mechanism_drivers": self.require(list, False, DataValidate.LIST,
+ self.mechanism_drivers, True),
+ "name": self.require(str, mandatory=True),
+ "operational": self.require(str, True, DataValidate.LIST,
+ self.operational_values, mandatory=True),
+ "scanned": self.require(bool, True),
+ "last_scanned": self.require(str),
+ "type": self.require(str, mandatory=True),
+ "type_drivers": self.require(str, False, DataValidate.LIST,
+ self.type_drivers, True),
+ "enable_monitoring": self.require(bool, True),
+ "monitoring_setup_done": self.require(bool, True),
+ "auth": self.require(dict)
+ }
+ self.validate_query_data(env_config,
+ environment_config_requirement,
+ can_be_empty_keys=["last_scanned"]
+ )
+ self.check_and_convert_datetime("last_scanned", env_config)
+ # validate the configurations
+ configurations = env_config['configuration']
+ config_validation = self.validate_environment_config(configurations)
+
+ if not config_validation['passed']:
+ self.bad_request(config_validation['error_message'])
+
+ err_msg = self.validate_env_config_with_supported_envs(env_config)
+ if err_msg:
+ self.bad_request(err_msg)
+
+ err_msg = self.validate_env_config_with_constraints(env_config)
+ if err_msg:
+ self.bad_request(err_msg)
+
+ if "auth" in env_config:
+ err_msg = self.validate_data(env_config.get("auth"),
+ self.AUTH_REQUIREMENTS)
+ if err_msg:
+ self.bad_request("auth error: " + err_msg)
+
+ if "scanned" not in env_config:
+ env_config["scanned"] = False
+
+ self.write(env_config, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created environment_config "
+ "for {0}"
+ .format(env_config["name"])},
+ "201")
+
+ def validate_environment_config(self, configurations):
+ configurations_of_names = {}
+ validation = {"passed": True}
+ if [config for config in configurations
+ if 'name' not in config]:
+ validation['passed'] = False
+ validation['error_message'] = "configuration must have name"
+ return validation
+
+ unknown_configs = [config['name'] for config in configurations
+ if config['name'] not in self.CONFIGURATIONS_NAMES]
+ if unknown_configs:
+ validation['passed'] = False
+ validation['error_message'] = 'Unknown configurations: {0}'. \
+ format(' and '.join(unknown_configs))
+ return validation
+
+ for name in self.CONFIGURATIONS_NAMES:
+ configs = self.get_configuration_by_name(name, configurations)
+ if configs:
+ if len(configs) > 1:
+ validation["passed"] = False
+ validation["error_message"] = "environment configurations can " \
+ "only contain one " \
+ "configuration for {0}".format(name)
+ return validation
+ configurations_of_names[name] = configs[0]
+ else:
+ if name not in self.OPTIONAL_CONFIGURATIONS_NAMES:
+ validation["passed"] = False
+ validation['error_message'] = "configuration for {0} " \
+ "is mandatory".format(name)
+ return validation
+
+ for name, config in configurations_of_names.items():
+ error_message = self.validate_configuration(name, config)
+ if error_message:
+ validation['passed'] = False
+ validation['error_message'] = "{0} error: {1}".\
+ format(name, error_message)
+ break
+ if name is 'CLI':
+ if 'key' not in config and 'pwd' not in config:
+ validation['passed'] = False
+ validation['error_message'] = 'CLI error: either key ' \
+ 'or pwd must be provided'
+ return validation
+
+ def validate_env_config_with_supported_envs(self, env_config):
+ # validate the environment config with supported environments
+ matches = {
+ 'environment.distribution': env_config['distribution'],
+ 'environment.type_drivers': env_config['type_drivers'],
+ 'environment.mechanism_drivers': {'$in': env_config['mechanism_drivers']}
+ }
+
+ err_prefix = 'configuration not accepted: '
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.SCANNING):
+ return err_prefix + 'scanning is not supported in this environment'
+
+ configs = env_config['configuration']
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.MONITORING) \
+ and self.get_configuration_by_name('Monitoring', configs):
+ return err_prefix + 'monitoring is not supported in this environment, ' \
+ 'please remove the Monitoring configuration'
+
+ if not self.inv.is_feature_supported_in_env(matches,
+ EnvironmentFeatures.LISTENING) \
+ and self.get_configuration_by_name('AMQP', configs):
+ return err_prefix + 'listening is not supported in this environment, ' \
+ 'please remove the AMQP configuration'
+
+ return None
+
+ def validate_env_config_with_constraints(self, env_config):
+ if env_config['listen'] and \
+ not self.get_configuration_by_name('AMQP', env_config['configuration']):
+ return 'configuration not accepted: ' \
+ 'must provide AMQP configuration to listen the environment'
+
+ def get_configuration_by_name(self, name, configurations):
+ return [config for config in configurations if config['name'] == name]
+
+ def validate_configuration(self, name, configuration):
+ return self.validate_data(configuration,
+ self.CONFIGURATIONS_REQUIREMENTS[name])
diff --git a/app/api/responders/resource/inventory.py b/app/api/responders/resource/inventory.py
new file mode 100644
index 0000000..02bc486
--- /dev/null
+++ b/app/api/responders/resource/inventory.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class Inventory(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = 'inventory'
+ self.ID = 'id'
+ self.PROJECTION = {
+ self.ID: True,
+ "name": True,
+ "name_path": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting objects from inventory")
+
+ filters = self.parse_query_params(req)
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(str),
+ 'id_path': self.require(str),
+ 'parent_id': self.require(str),
+ 'parent_path': self.require(str),
+ 'sub_tree': self.require(bool, True),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ obj = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, obj)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"objects": objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['parent_id', 'id_path', 'id']
+ self.update_query_with_filters(filters, filters_keys, query)
+ parent_path = filters.get('parent_path')
+ if parent_path:
+ regular_expression = parent_path
+ if filters.get('sub_tree', False):
+ regular_expression += "[/]?"
+ else:
+ regular_expression += "/[^/]+$"
+ query['id_path'] = {"$regex": regular_expression}
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/links.py b/app/api/responders/resource/links.py
new file mode 100644
index 0000000..33fd432
--- /dev/null
+++ b/app/api/responders/resource/links.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class Links(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = 'links'
+ self.ID = '_id'
+ self.PROJECTION = {
+ self.ID: True,
+ "link_name": True,
+ "link_type": True,
+ "environment": True,
+ "host": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting links from links")
+
+ filters = self.parse_query_params(req)
+
+ link_types = self.get_constants_by_name("link_types")
+ link_states = self.get_constants_by_name("link_states")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'id': self.require(ObjectId, True),
+ 'host': self.require(str),
+ 'link_type': self.require(str, validate=DataValidate.LIST,
+ requirement=link_types),
+ 'link_name': self.require(str),
+ 'source_id': self.require(str),
+ 'target_id': self.require(str),
+ 'state': self.require(str, validate=DataValidate.LIST,
+ requirement=link_states),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements, r'^attributes\:\w+$')
+ filters = self.change_dict_naming_convention(filters, self.replace_colon_with_dot)
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ link = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, link)
+ else:
+ links_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"links": links_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['host', 'link_type', 'link_name',
+ 'source_id', 'target_id', 'state']
+ self.update_query_with_filters(filters, filters_keys, query)
+ # add attributes to the query
+ for key in filters.keys():
+ if key.startswith("attributes."):
+ query[key] = filters[key]
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/messages.py b/app/api/responders/resource/messages.py
new file mode 100644
index 0000000..0dda31b
--- /dev/null
+++ b/app/api/responders/resource/messages.py
@@ -0,0 +1,78 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from datetime import datetime
+
+from api.responders.responder_base import ResponderBase
+from api.validation.data_validate import DataValidate
+from bson.objectid import ObjectId
+
+
+class Messages(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = "id"
+ self.COLLECTION = 'messages'
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "source_system": True,
+ "level": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting messages from messages")
+ filters = self.parse_query_params(req)
+ messages_severity = self.get_constants_by_name("messages_severity")
+ object_types = self.get_constants_by_name("object_types")
+ filters_requirements = {
+ 'env_name': self.require(str, mandatory=True),
+ 'source_system': self.require(str),
+ 'id': self.require(str),
+ 'level': self.require(str, validate=DataValidate.LIST,
+ requirement=messages_severity),
+ 'related_object': self.require(str),
+ 'related_object_type': self.require(str, validate=DataValidate.LIST,
+ requirement=object_types),
+ 'start_time': self.require(str),
+ 'end_time': self.require(str),
+ 'page': self.require(int, True),
+ 'page_size': self.require(int, True)
+ }
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+ self.check_and_convert_datetime('start_time', filters)
+ self.check_and_convert_datetime('end_time', filters)
+
+ query = self.build_query(filters)
+ if self.ID in query:
+ message = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, message)
+ else:
+ objects_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {'messages': objects_ids})
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ['source_system', 'id', 'level', 'related_object',
+ 'related_object_type']
+ self.update_query_with_filters(filters, filters_keys, query)
+ start_time = filters.get('start_time')
+ if start_time:
+ query['timestamp'] = {"$gte": start_time}
+ end_time = filters.get('end_time')
+ if end_time:
+ if 'timestamp' in query:
+ query['timestamp'].update({"$lte": end_time})
+ else:
+ query['timestamp'] = {"$lte": end_time}
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/monitoring_config_templates.py b/app/api/responders/resource/monitoring_config_templates.py
new file mode 100644
index 0000000..42d3973
--- /dev/null
+++ b/app/api/responders/resource/monitoring_config_templates.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+
+
+class MonitoringConfigTemplates(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.ID = "_id"
+ self.COLLECTION = "monitoring_config_templates"
+ self.PROJECTION = {
+ self.ID: True,
+ "side": True,
+ "type": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting monitoring config template")
+
+ filters = self.parse_query_params(req)
+
+ sides = self.get_constants_by_name("monitoring_sides")
+ filters_requirements = {
+ "id": self.require(ObjectId, True),
+ "order": self.require(int, True),
+ "side": self.require(str, validate=DataValidate.LIST,
+ requirement=sides),
+ "type": self.require(str),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+
+ page, page_size = self.get_pagination(filters)
+ query = self.build_query(filters)
+ if self.ID in query:
+ template = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId], self.ID)
+ self.set_successful_response(resp, template)
+ else:
+ templates = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(
+ resp,
+ {"monitoring_config_templates": templates}
+ )
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["order", "side", "type"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ _id = filters.get('id')
+ if _id:
+ query[self.ID] = _id
+ return query
diff --git a/app/api/responders/resource/scans.py b/app/api/responders/resource/scans.py
new file mode 100644
index 0000000..c9ad2e2
--- /dev/null
+++ b/app/api/responders/resource/scans.py
@@ -0,0 +1,111 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class Scans(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "scans"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "status": True,
+ "scan_completed": True
+ }
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting scans")
+ filters = self.parse_query_params(req)
+
+ scan_statuses = self.get_constants_by_name("scan_statuses")
+ filters_requirements = {
+ "env_name": self.require(str, mandatory=True),
+ "id": self.require(ObjectId, True),
+ "base_object": self.require(str),
+ "status": self.require(str, False, DataValidate.LIST, scan_statuses),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+ if "_id" in query:
+ scan = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime], self.ID)
+ self.set_successful_response(resp, scan)
+ else:
+ scans_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size, self.PROJECTION)
+ self.set_successful_response(resp, {"scans": scans_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new scan")
+ error, scan = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ scan_statuses = self.get_constants_by_name("scan_statuses")
+ log_levels = self.get_constants_by_name("log_levels")
+
+ scan_requirements = {
+ "status": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=scan_statuses,
+ mandatory=True),
+ "log_level": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=log_levels),
+ "clear": self.require(bool, True),
+ "scan_only_inventory": self.require(bool, True),
+ "scan_only_links": self.require(bool, True),
+ "scan_only_cliques": self.require(bool, True),
+ "environment": self.require(str, mandatory=True),
+ "inventory": self.require(str),
+ "object_id": self.require(str)
+ }
+ self.validate_query_data(scan, scan_requirements)
+ scan_only_keys = [k for k in scan if k.startswith("scan_only_")]
+ if len(scan_only_keys) > 1:
+ self.bad_request("multiple scan_only_* flags found: {0}. "
+ "only one of them can be set."
+ .format(", ".join(scan_only_keys)))
+
+ env_name = scan["environment"]
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ scan["scan_completed"] = False
+ scan["submit_timestamp"] = datetime.now()
+ self.write(scan, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new scan for "
+ "environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["status"]
+ self.update_query_with_filters(filters, filters_keys, query)
+ base_object = filters.get("base_object")
+ if base_object:
+ query['object_id'] = base_object
+ _id = filters.get("id")
+ if _id:
+ query['_id'] = _id
+ query['environment'] = filters['env_name']
+ return query
diff --git a/app/api/responders/resource/scheduled_scans.py b/app/api/responders/resource/scheduled_scans.py
new file mode 100644
index 0000000..0588cd0
--- /dev/null
+++ b/app/api/responders/resource/scheduled_scans.py
@@ -0,0 +1,113 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from api.validation.data_validate import DataValidate
+from api.responders.responder_base import ResponderBase
+from bson.objectid import ObjectId
+from datetime import datetime
+
+
+class ScheduledScans(ResponderBase):
+ def __init__(self):
+ super().__init__()
+ self.COLLECTION = "scheduled_scans"
+ self.ID = "_id"
+ self.PROJECTION = {
+ self.ID: True,
+ "environment": True,
+ "scheduled_timestamp": True,
+ "freq": True
+ }
+ self.SCAN_FREQ = [
+ "YEARLY",
+ "MONTHLY",
+ "WEEKLY",
+ "DAILY",
+ "HOURLY"
+ ]
+
+ def on_get(self, req, resp):
+ self.log.debug("Getting scheduled scans")
+ filters = self.parse_query_params(req)
+
+ filters_requirements = {
+ "environment": self.require(str, mandatory=True),
+ "id": self.require(ObjectId, True),
+ "freq": self.require(str, False,
+ DataValidate.LIST, self.SCAN_FREQ),
+ "page": self.require(int, True),
+ "page_size": self.require(int, True)
+ }
+
+ self.validate_query_data(filters, filters_requirements)
+ page, page_size = self.get_pagination(filters)
+
+ query = self.build_query(filters)
+ if self.ID in query:
+ scheduled_scan = self.get_object_by_id(self.COLLECTION, query,
+ [ObjectId, datetime],
+ self.ID)
+ self.set_successful_response(resp, scheduled_scan)
+ else:
+ scheduled_scan_ids = self.get_objects_list(self.COLLECTION, query,
+ page, page_size,
+ self.PROJECTION,
+ [datetime])
+ self.set_successful_response(resp,
+ {"scheduled_scans": scheduled_scan_ids})
+
+ def on_post(self, req, resp):
+ self.log.debug("Posting new scheduled scan")
+ error, scheduled_scan = self.get_content_from_request(req)
+ if error:
+ self.bad_request(error)
+
+ log_levels = self.get_constants_by_name("log_levels")
+ scheduled_scan_requirements = {
+ "environment": self.require(str, mandatory=True),
+ "scan_only_links": self.require(bool, True),
+ "scan_only_cliques": self.require(bool, True),
+ "scan_only_inventory": self.require(bool, True),
+ "freq": self.require(str, validate=DataValidate.LIST,
+ requirement=self.SCAN_FREQ,
+ mandatory=True),
+ "log_level": self.require(str,
+ validate=DataValidate.LIST,
+ requirement=log_levels),
+ "clear": self.require(bool, True),
+ "submit_timestamp": self.require(str, mandatory=True)
+ }
+ self.validate_query_data(scheduled_scan, scheduled_scan_requirements)
+ self.check_and_convert_datetime("submit_timestamp", scheduled_scan)
+ scan_only_keys = [k for k in scheduled_scan if k.startswith("scan_only_")]
+ if len(scan_only_keys) > 1:
+ self.bad_request("multiple scan_only_* flags found: {0}. "
+ "only one of them can be set."
+ .format(", ".join(scan_only_keys)))
+
+ env_name = scheduled_scan["environment"]
+ if not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+
+ self.write(scheduled_scan, self.COLLECTION)
+ self.set_successful_response(resp,
+ {"message": "created a new scheduled scan for "
+ "environment {0}"
+ .format(env_name)},
+ "201")
+
+ def build_query(self, filters):
+ query = {}
+ filters_keys = ["freq", "environment"]
+ self.update_query_with_filters(filters, filters_keys, query)
+
+ _id = filters.get("id")
+ if _id:
+ query["_id"] = _id
+ return query
diff --git a/app/api/responders/responder_base.py b/app/api/responders/responder_base.py
new file mode 100644
index 0000000..479a897
--- /dev/null
+++ b/app/api/responders/responder_base.py
@@ -0,0 +1,223 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+import re
+from urllib import parse
+
+from dateutil import parser
+from pymongo import errors
+
+from api.exceptions import exceptions
+from api.validation.data_validate import DataValidate
+from utils.dict_naming_converter import DictNamingConverter
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.string_utils import jsonify, stringify_object_values_by_types
+
+
+class ResponderBase(DataValidate, DictNamingConverter):
+ UNCHANGED_COLLECTIONS = ["monitoring_config_templates",
+ "environments_config",
+ "messages",
+ "scheduled_scans"]
+
+ def __init__(self):
+ super().__init__()
+ self.log = FullLogger()
+ self.inv = InventoryMgr()
+
+ def set_successful_response(self, resp, body="", status="200"):
+ if not isinstance(body, str):
+ try:
+ body = jsonify(body)
+ except Exception as e:
+ self.log.exception(e)
+ raise ValueError("The response body should be a string")
+ resp.status = status
+ resp.body = body
+
+ def set_error_response(self, title="", code="", message="", body=""):
+ if body:
+ raise exceptions.CalipsoApiException(code, body, message)
+ body = {
+ "error": {
+ "message": message,
+ "code": code,
+ "title": title
+ }
+ }
+ body = jsonify(body)
+ raise exceptions.CalipsoApiException(code, body, message)
+
+ def not_found(self, message="Requested resource not found"):
+ self.set_error_response("Not Found", "404", message)
+
+ def conflict(self,
+ message="The posted data conflicts with the existing data"):
+ self.set_error_response("Conflict", "409", message)
+
+ def bad_request(self, message="Invalid request content"):
+ self.set_error_response("Bad Request", "400", message)
+
+ def unauthorized(self, message="Request requires authorization"):
+ self.set_error_response("Unauthorized", "401", message)
+
+ def validate_query_data(self, data, data_requirements,
+ additional_key_reg=None,
+ can_be_empty_keys=[]):
+ error_message = self.validate_data(data, data_requirements,
+ additional_key_reg,
+ can_be_empty_keys)
+ if error_message:
+ self.bad_request(error_message)
+
+ def check_and_convert_datetime(self, time_key, data):
+ time = data.get(time_key)
+
+ if time:
+ time = time.replace(' ', '+')
+ try:
+ data[time_key] = parser.parse(time)
+ except Exception:
+ self.bad_request("{0} must follow ISO 8610 date and time format,"
+ "YYYY-MM-DDThh:mm:ss.sss+hhmm".format(time_key))
+
+ def check_environment_name(self, env_name):
+ query = {"name": env_name}
+ objects = self.read("environments_config", query)
+ if not objects:
+ return False
+ return True
+
+ def get_object_by_id(self, collection, query, stringify_types, id):
+ objs = self.read(collection, query)
+ if not objs:
+ env_name = query.get("environment")
+ if env_name and \
+ not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+ self.not_found()
+ obj = objs[0]
+ stringify_object_values_by_types(obj, stringify_types)
+ if id is "_id":
+ obj['id'] = obj.get('_id')
+ return obj
+
+ def get_objects_list(self, collection, query, page, page_size,
+ projection, stringify_types=None):
+ objects = self.read(collection, query, projection, page, page_size)
+ if not objects:
+ env_name = query.get("environment")
+ if env_name and \
+ not self.check_environment_name(env_name):
+ self.bad_request("unkown environment: " + env_name)
+ self.not_found()
+ for obj in objects:
+ if "id" not in obj and "_id" in obj:
+ obj["id"] = str(obj["_id"])
+ if "_id" in obj:
+ del obj["_id"]
+ if stringify_types:
+ stringify_object_values_by_types(objects, stringify_types)
+
+ return objects
+
+ def parse_query_params(self, req):
+ query_string = req.query_string
+ if not query_string:
+ return {}
+ try:
+ query_params = dict((k, v if len(v) > 1 else v[0])
+ for k, v in
+ parse.parse_qs(query_string,
+ keep_blank_values=True,
+ strict_parsing=True).items())
+ return query_params
+ except ValueError as e:
+ self.bad_request(str("Invalid query string: {0}".format(str(e))))
+
+ def replace_colon_with_dot(self, s):
+ return s.replace(':', '.')
+
+ def get_pagination(self, filters):
+ page_size = filters.get('page_size', 1000)
+ page = filters.get('page', 0)
+ return page, page_size
+
+ def update_query_with_filters(self, filters, filters_keys, query):
+ for filter_key in filters_keys:
+ filter = filters.get(filter_key)
+ if filter is not None:
+ query.update({filter_key: filter})
+
+ def get_content_from_request(self, req):
+ error = ""
+ content = ""
+ if not req.content_length:
+ error = "No data found in the request body"
+ return error, content
+
+ data = req.stream.read()
+ content_string = data.decode()
+ try:
+ content = json.loads(content_string)
+ if not isinstance(content, dict):
+ error = "The data in the request body must be an object"
+ except Exception:
+ error = "The request can not be fulfilled due to bad syntax"
+
+ return error, content
+
+ def get_collection_by_name(self, name):
+ if name in self.UNCHANGED_COLLECTIONS:
+ return self.inv.db[name]
+ return self.inv.collections[name]
+
+ def get_constants_by_name(self, name):
+ constants = self.get_collection_by_name("constants").\
+ find_one({"name": name})
+ # consts = [d['value'] for d in constants['data']]
+ consts = []
+ if not constants:
+ self.log.error('constant type: ' + name +
+ 'no constants exists')
+ return consts
+ for d in constants['data']:
+ try:
+ consts.append(d['value'])
+ except KeyError:
+ self.log.error('constant type: ' + name +
+ ': no "value" key for data: ' + str(d))
+ return consts
+
+ def read(self, collection, matches={}, projection=None, skip=0, limit=1000):
+ collection = self.get_collection_by_name(collection)
+ skip *= limit
+ query = collection.find(matches, projection).skip(skip).limit(limit)
+ return list(query)
+
+ def write(self, document, collection="inventory"):
+ try:
+ self.get_collection_by_name(collection).\
+ insert_one(document)
+ except errors.DuplicateKeyError as e:
+ self.conflict("The key value ({0}) already exists".
+ format(', '.
+ join(self.get_duplicate_key_values(e.details['errmsg']))))
+ except errors.WriteError as e:
+ self.bad_request('Failed to create resource for {0}'.format(str(e)))
+
+ def get_duplicate_key_values(self, err_msg):
+ return ["'{0}'".format(key) for key in re.findall(r'"([^",]+)"', err_msg)]
+
+ def aggregate(self, pipeline, collection):
+ collection = self.get_collection_by_name(collection)
+ data = collection.aggregate(pipeline)
+ return list(data)
diff --git a/app/api/server.py b/app/api/server.py
new file mode 100755
index 0000000..3fef46e
--- /dev/null
+++ b/app/api/server.py
@@ -0,0 +1,74 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+
+from gunicorn.app.base import BaseApplication
+from gunicorn.six import iteritems
+
+from api.app import App
+
+
+# This class is used to integrate Gunicorn with falcon application
+class StandaloneApplication(BaseApplication):
+ def __init__(self, app, options=None):
+ self.options = options
+ self.application = app
+ super().__init__()
+
+ def load_config(self):
+ config = dict([(key, value) for key, value in iteritems(self.options)
+ if key in self.cfg.settings and value is not None])
+ for key, value in iteritems(config):
+ self.cfg.set(key.lower(), value)
+
+ def load(self):
+ return self.application
+
+
+def get_args():
+ parser = argparse.ArgumentParser(description="Parameters for Calipso API")
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default="",
+ help="name of config file with mongo access "
+ "details")
+ parser.add_argument("--ldap_config", nargs="?", type=str,
+ default="",
+ help="name of the config file with ldap server "
+ "config details")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default="INFO",
+ help="logging level \n(default: 'INFO')")
+ parser.add_argument("-b", "--bind", nargs="?", type=str,
+ default="127.0.0.1:8000",
+ help="binding address of the API server\n"
+ "(default 127.0.0.1:8000)")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default="inventory",
+ help="name of inventory collection \n" +
+ "(default: 'inventory')")
+ parser.add_argument("-t", "--token-lifetime", nargs="?", type=int,
+ default=86400,
+ help="lifetime of the token")
+ args = parser.parse_args()
+ return args
+
+
+if __name__ == "__main__":
+ args = get_args()
+ # Gunicorn configuration
+ options = {
+ "bind": args.bind
+ }
+ app = App(args.mongo_config,
+ args.ldap_config,
+ args.loglevel,
+ args.inventory,
+ args.token_lifetime).get_app()
+ StandaloneApplication(app, options).run()
diff --git a/app/api/validation/__init__.py b/app/api/validation/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/api/validation/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/api/validation/data_validate.py b/app/api/validation/data_validate.py
new file mode 100644
index 0000000..6928c4b
--- /dev/null
+++ b/app/api/validation/data_validate.py
@@ -0,0 +1,185 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+
+from api.validation import regex
+
+
+class DataValidate:
+ LIST = "list"
+ REGEX = "regex"
+
+ def __init__(self):
+ super().__init__()
+ self.BOOL_CONVERSION = {
+ "true": True,
+ "1": True,
+ 1: True,
+ "false": False,
+ "0": False,
+ 0: False
+ }
+ self.TYPES_CUSTOMIZED_NAMES = {
+ 'str': 'string',
+ 'bool': 'boolean',
+ 'int': 'integer',
+ 'ObjectId': 'MongoDB ObjectId'
+ }
+ self.VALIDATE_SWITCHER = {
+ self.LIST: self.validate_value_in_list,
+ self.REGEX: regex.validate
+ }
+
+ def validate_type(self, obj, t, convert_to_type):
+ if convert_to_type:
+ # user may input different values for the
+ # boolean True or False, convert the number or
+ # the string to corresponding python bool values
+ if t == bool:
+ if isinstance(obj, str):
+ obj = obj.lower()
+ if obj in self.BOOL_CONVERSION:
+ return self.BOOL_CONVERSION[obj]
+ return None
+ try:
+ obj = t(obj)
+ except Exception:
+ return None
+ return obj
+ else:
+ return obj if isinstance(obj, t) else None
+
+ # get the requirement for validation
+ # this requirement object will be used in validate_data method
+ @staticmethod
+ def require(types, convert_to_type=False, validate=None,
+ requirement=None, mandatory=False, error_messages=None):
+ if error_messages is None:
+ error_messages = {}
+ return {
+ "types": types,
+ "convert_to_type": convert_to_type,
+ "validate": validate,
+ "requirement": requirement,
+ "mandatory": mandatory,
+ "error_messages": error_messages
+ }
+
+ def validate_data(self, data, requirements,
+ additional_key_re=None,
+ can_be_empty_keys=[]):
+
+ illegal_keys = [key for key in data.keys()
+ if key not in requirements.keys()]
+
+ if additional_key_re:
+ illegal_keys = [key for key in illegal_keys
+ if not re.match(additional_key_re, key)]
+
+ if illegal_keys:
+ return 'Invalid key(s): {0}'.format(' and '.join(illegal_keys))
+
+ for key, requirement in requirements.items():
+ value = data.get(key)
+ error_messages = requirement['error_messages']
+
+ if not value and value is not False and value is not 0:
+ if key in data and key not in can_be_empty_keys:
+ return "Invalid data: value of {0} key doesn't exist ".format(key)
+ # check if the key is mandatory
+ mandatory_error = error_messages.get('mandatory')
+ error_message = self.mandatory_check(key,
+ requirement['mandatory'],
+ mandatory_error)
+ if error_message:
+ return error_message
+ continue
+
+ # check the required types
+ error_message = self.types_check(requirement["types"],
+ requirement["convert_to_type"],
+ key,
+ value, data,
+ error_messages.get('types'))
+ if error_message:
+ return error_message
+
+ # after the types check, the value of the key may be changed
+ # get the value again
+ value = data[key]
+ validate = requirement.get('validate')
+ if not validate:
+ continue
+ requirement_value = requirement.get('requirement')
+ # validate the data against the requirement
+ req_error = error_messages.get("requirement")
+ error_message = self.requirement_check(key, value, validate,
+ requirement_value,
+ req_error)
+ if error_message:
+ return error_message
+ return None
+
+ @staticmethod
+ def mandatory_check(key, mandatory, error_message):
+ if mandatory:
+ return error_message if error_message \
+ else "{} must be specified".format(key)
+ return None
+
+ def types_check(self, requirement_types, convert_to_type, key,
+ value, data, error_message):
+ if not isinstance(requirement_types, list):
+ requirement_types = [requirement_types]
+ for requirement_type in requirement_types:
+ converted_val = self.validate_type(
+ value, requirement_type, convert_to_type
+ )
+ if converted_val is not None:
+ if convert_to_type:
+ # value has been converted, update the data
+ data[key] = converted_val
+ return None
+ required_types = self.get_type_names(requirement_types)
+ return error_message if error_message else \
+ "{0} must be {1}".format(key, " or ".join(required_types))
+
+ def requirement_check(self, key, value, validate,
+ requirement, error_message):
+ return self.VALIDATE_SWITCHER[validate](key, value, requirement,
+ error_message)
+
+ @staticmethod
+ def validate_value_in_list(key, value,
+ required_list, error_message):
+ if not isinstance(value, list):
+ value = [value]
+
+ if [v for v in value if v not in required_list]:
+ return error_message if error_message else\
+ "The possible value of {0} is {1}".\
+ format(key, " or ".join(required_list))
+ return None
+
+ # get customized type names from type names array
+ def get_type_names(self, types):
+ return [self.get_type_name(t) for t in types]
+
+ # get customized type name from string <class 'type'>
+ def get_type_name(self, t):
+ t = str(t)
+ a = t.split(" ")[1]
+ type_name = a.rstrip(">").strip("'")
+ # strip the former module names
+ type_name = type_name.split('.')[-1]
+ if type_name in self.TYPES_CUSTOMIZED_NAMES.keys():
+ type_name = self.TYPES_CUSTOMIZED_NAMES[type_name]
+ return type_name
diff --git a/app/api/validation/regex.py b/app/api/validation/regex.py
new file mode 100644
index 0000000..2684636
--- /dev/null
+++ b/app/api/validation/regex.py
@@ -0,0 +1,57 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+PORT = "port number"
+IP = "ipv4/ipv6 address"
+HOSTNAME = "host name"
+PATH = "path"
+
+_PORT_REGEX = re.compile('^0*(?:6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|'
+ '6[0-4][0-9]{3}|[1-5][0-9]{4}|[1-9][0-9]{1,3}|[0-9])$')
+
+_HOSTNAME_REGEX = re.compile('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])'
+ '(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$')
+
+_PATH_REGEX = re.compile('^(\/){1}([^\/\0]+(\/)?)+$')
+
+_IPV4_REGEX = re.compile('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$')
+_IPV6_REGEX = re.compile('^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}'
+ '(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$')
+
+_REGEX_MAP = {
+ PORT: _PORT_REGEX,
+ HOSTNAME: _HOSTNAME_REGEX,
+ PATH: _PATH_REGEX,
+ IP: [_IPV4_REGEX, _IPV6_REGEX]
+}
+
+
+def validate(key, value, regex_names, error_message=None):
+ if not isinstance(regex_names, list):
+ regex_names = [regex_names]
+
+ for regex_name in regex_names:
+ regexes = _REGEX_MAP[regex_name]
+
+ if not isinstance(regexes, list):
+ regexes = [regexes]
+
+ try:
+ value = str(value)
+ match_regexes = [regex for regex in regexes
+ if regex.match(value)]
+ if match_regexes:
+ return None
+ except:
+ pass
+
+ return error_message if error_message else \
+ '{0} must be a valid {1}'.format(key, " or ".join(regex_names))
diff --git a/app/config/events.json b/app/config/events.json
new file mode 100644
index 0000000..c067754
--- /dev/null
+++ b/app/config/events.json
@@ -0,0 +1,58 @@
+{
+ "handlers_package": "discover.events",
+ "queues": [
+ {
+ "queue": "notifications.nova",
+ "exchange": "nova"
+ },
+ {
+ "queue": "notifications.neutron",
+ "exchange": "neutron"
+ },
+ {
+ "queue": "notifications.neutron",
+ "exchange": "dhcp_agent"
+ },
+ {
+ "queue": "notifications.info",
+ "exchange": "info"
+ }
+ ],
+ "event_handlers": {
+ "compute.instance.create.end": "EventInstanceAdd",
+ "compute.instance.rebuild.end": "EventInstanceAdd",
+ "compute.instance.update": "EventInstanceUpdate",
+ "compute.instance.delete.end": "EventInstanceDelete",
+ "network.create.start": "EventNetworkAdd",
+ "network.create.end": "EventNetworkAdd",
+ "network.update": "EventNetworkUpdate",
+ "network.update.start": "EventNetworkUpdate",
+ "network.update.end": "EventNetworkUpdate",
+ "network.delete": "EventNetworkDelete",
+ "network.delete.start": "EventNetworkDelete",
+ "network.delete.end": "EventNetworkDelete",
+ "subnet.create": "EventSubnetAdd",
+ "subnet.create.start": "EventSubnetAdd",
+ "subnet.create.end": "EventSubnetAdd",
+ "subnet.update": "EventSubnetUpdate",
+ "subnet.update.start": "EventSubnetUpdate",
+ "subnet.update.end": "EventSubnetUpdate",
+ "subnet.delete": "EventSubnetDelete",
+ "subnet.delete.start": "EventSubnetDelete",
+ "subnet.delete.end": "EventSubnetDelete",
+ "port.create.end": "EventPortAdd",
+ "port.update.end": "EventPortUpdate",
+ "port.delete.end": "EventPortDelete",
+ "router.create": "EventRouterAdd",
+ "router.create.start": "EventRouterAdd",
+ "router.create.end": "EventRouterAdd",
+ "router.update": "EventRouterUpdate",
+ "router.update.start": "EventRouterUpdate",
+ "router.update.end": "EventRouterUpdate",
+ "router.delete": "EventRouterDelete",
+ "router.delete.start": "EventRouterDelete",
+ "router.delete.end": "EventRouterDelete",
+ "router.interface.create": "EventInterfaceAdd",
+ "router.interface.delete": "EventInterfaceDelete"
+ }
+} \ No newline at end of file
diff --git a/app/config/scanners.json b/app/config/scanners.json
new file mode 100644
index 0000000..3c17918
--- /dev/null
+++ b/app/config/scanners.json
@@ -0,0 +1,370 @@
+{
+ "scanners_package": "discover.fetchers",
+ "scanners": {
+ "ScanAggregate": [
+ {
+ "type": "host_ref",
+ "fetcher": "DbFetchAggregateHosts"
+ }
+ ],
+ "ScanAggregatesRoot": [
+ {
+ "type": "aggregate",
+ "fetcher": "DbFetchAggregates",
+ "children_scanner": "ScanAggregate"
+ }
+ ],
+ "ScanAvailabilityZone": [
+ {
+ "type": "host",
+ "fetcher": "DbFetchAZNetworkHosts",
+ "children_scanner": "ScanHost"
+ }
+ ],
+ "ScanAvailabilityZonesRoot": [
+ {
+ "type": "availability_zone",
+ "fetcher": "DbFetchAvailabilityZones",
+ "children_scanner": "ScanAvailabilityZone"
+ }
+ ],
+ "ScanEnvironment": [
+ {
+ "type": "regions_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "regions",
+ "parent_type": "environment"
+ },
+ "children_scanner": "ScanRegionsRoot"
+ },
+ {
+ "type": "projects_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "projects",
+ "parent_type": "environment"
+ },
+ "children_scanner": "ScanProjectsRoot"
+ }
+ ],
+ "ScanHostNetworkAgentsRoot": [
+ {
+ "type": "network_agent",
+ "fetcher": "DbFetchHostNetworkAgents"
+ }
+ ],
+ "ScanHost": [
+ {
+ "_comment": "creating only top folder for vServices",
+ "type": "vservices_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vservices",
+ "parent_type": "host",
+ "text": "vServices"
+ },
+ "children_scanner": "ScanInstancesRoot"
+ },
+ {
+ "type": "vservice",
+ "fetcher": "CliFetchHostVservices"
+ },
+ {
+ "_comment": "fetch vService vNICs from host for efficiency",
+ "type": "vnic",
+ "fetcher": "CliFetchVserviceVnics"
+ },
+ {
+ "type": "instances_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "instances",
+ "parent_type": "host"
+ },
+ "children_scanner": "ScanInstancesRoot"
+ },
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "pnics",
+ "parent_type": "host",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "children_scanner": "ScanPnicsRoot"
+ },
+ {
+ "type": "vconnectors_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vconnectors",
+ "parent_type": "host",
+ "text": "vConnectors"
+ },
+ "children_scanner": "ScanVconnectorsRoot"
+ },
+ {
+ "type": "vedges_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vedges",
+ "parent_type": "host",
+ "text": "vEdges"
+ },
+ "children_scanner": "ScanVedgesRoot"
+ },
+ {
+ "type": "network_agents_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "network_agents",
+ "parent_type": "host",
+ "text": "Network agents"
+ },
+ "children_scanner": "ScanHostNetworkAgentsRoot"
+ }
+ ],
+ "ScanInstance": [
+ {
+ "type": "vnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "vnics",
+ "parent_type": "instance",
+ "text": "vNICs"
+ },
+ "children_scanner": "ScanVnicsRoot"
+ }
+ ],
+ "ScanInstancesRoot": [
+ {
+ "type": "instance",
+ "fetcher": "ApiFetchHostInstances",
+ "children_scanner": "ScanInstance"
+ }
+ ],
+ "ScanNetworkAgentsRoot": [
+ {
+ "type": "network_agent",
+ "fetcher": "DbFetchHostNetworkAgents"
+ }
+ ],
+ "ScanNetwork": [
+ {
+ "type": "ports_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "ports",
+ "parent_type": "network"
+ }
+ },
+ {
+ "type": "network_services_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "network_services",
+ "parent_type": "network",
+ "text": "Network vServices"
+ }
+ }
+ ],
+ "ScanNetworksRoot": [
+ {
+ "type": "network",
+ "fetcher": "ApiFetchNetworks",
+ "children_scanner": "ScanNetwork"
+ },
+ {
+ "type": "port",
+ "fetcher": "ApiFetchPorts"
+ }
+ ],
+ "ScanOteps": [
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "fetcher": "DbFetchOteps"
+ },
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "DbFetchOteps"
+ },
+ {
+ "type": "otep",
+ "environment_condition": {
+ "mechanism_drivers": "LXB"
+ },
+ "fetcher": "CliFetchOtepsLxb"
+ }
+ ],
+ "ScanPnicsRoot": [
+ {
+ "type": "pnic",
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "fetcher": "CliFetchHostPnics",
+ "children_scanner": "ScanHostPnic"
+ }
+ ],
+ "ScanHostPnic": [
+ {
+ "type": "pnic",
+ "fetcher": "AciFetchSwitchPnic"
+ }
+ ],
+ "ScanProject": [
+ {
+ "type": "availability_zone",
+ "fetcher": "ApiFetchAvailabilityZones"
+ },
+ {
+ "type": "host",
+ "fetcher": "ApiFetchProjectHosts",
+ "children_scanner": "ScanHost"
+ }
+ ],
+ "ScanProjectsRoot": [
+ {
+ "type": "project",
+ "fetcher": "ApiFetchProjects",
+ "object_id_to_use_in_child": "name",
+ "children_scanner": "ScanProject"
+ }
+ ],
+ "ScanRegion": [
+ {
+ "type": "aggregates_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "aggregates",
+ "parent_type": "region"
+ },
+ "children_scanner": "ScanAggregatesRoot"
+ },
+ {
+ "type": "network",
+ "fetcher": "ApiFetchNetworks",
+ "children_scanner": "ScanNetwork"
+ },
+ {
+ "type": "port",
+ "fetcher": "ApiFetchPorts"
+ }
+ ],
+ "ScanRegionsRoot": [
+ {
+ "type": "region",
+ "fetcher": "ApiFetchRegions",
+ "children_scanner": "ScanRegion"
+ }
+ ],
+ "ScanVconnectorsRoot": [
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "fetcher": "CliFetchVconnectorsOvs"
+ },
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "LXB"
+ },
+ "fetcher": "CliFetchVconnectorsLxb",
+ "children_scanner": "ScanOteps"
+ },
+ {
+ "type": "vconnector",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "CliFetchVconnectorsVpp"
+ }
+ ],
+ "ScanVedgePnicsRoot": [
+ {
+ "type": "pnics_folder",
+ "fetcher": {
+ "folder": true,
+ "types_name": "pnics",
+ "parent_type": "vedge",
+ "text": "pNICs"
+ },
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanVppPnicsRoot"
+ }
+ ],
+ "ScanVedgesRoot": [
+ {
+ "type": "vedge",
+ "fetcher": "DbFetchVedgesOvs",
+ "environment_condition": {
+ "mechanism_drivers": "OVS"
+ },
+ "children_scanner": "ScanOteps"
+ },
+ {
+ "type": "vedge",
+ "fetcher": "DbFetchVedgesVpp",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanVedgePnicsRoot"
+ }
+ ],
+ "ScanVnicsRoot": [
+ {
+ "type": "vnic",
+ "environment_condition": {
+ "mechanism_drivers": [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "fetcher": "CliFetchInstanceVnics"
+ },
+ {
+ "type": "vnic",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "fetcher": "CliFetchInstanceVnicsVpp"
+ }
+ ],
+ "ScanVppPnicsRoot": [
+ {
+ "type": "pnic",
+ "fetcher": "CliFetchHostPnicsVpp",
+ "environment_condition": {
+ "mechanism_drivers": "VPP"
+ },
+ "children_scanner": "ScanOteps"
+ }
+ ],
+ "ScanVservicesRoot": [
+ {
+ "type": "vservice",
+ "fetcher": "CliFetchHostVservices"
+ }
+ ]
+ }
+}
diff --git a/app/discover/__init__.py b/app/discover/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/clique_finder.py b/app/discover/clique_finder.py
new file mode 100644
index 0000000..9b5aad2
--- /dev/null
+++ b/app/discover/clique_finder.py
@@ -0,0 +1,174 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from bson.objectid import ObjectId
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliqueFinder(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inventory = self.inv.inventory_collection
+ self.links = self.inv.collections["links"]
+ self.clique_types = self.inv.collections["clique_types"]
+ self.clique_types_by_type = {}
+ self.clique_constraints = self.inv.collections["clique_constraints"]
+ self.cliques = self.inv.collections["cliques"]
+
+ def find_cliques_by_link(self, links_list):
+ return self.links.find({'links': {'$in': links_list}})
+
+ def find_links_by_source(self, db_id):
+ return self.links.find({'source': db_id})
+
+ def find_links_by_target(self, db_id):
+ return self.links.find({'target': db_id})
+
+ def find_cliques(self):
+ self.log.info("scanning for cliques")
+ clique_types = self.get_clique_types().values()
+ for clique_type in clique_types:
+ self.find_cliques_for_type(clique_type)
+ self.log.info("finished scanning for cliques")
+
+ def get_clique_types(self):
+ if not self.clique_types_by_type:
+ clique_types = self.clique_types.find({"environment": self.get_env()})
+ default_clique_types = \
+ self.clique_types.find({'environment': 'ANY'})
+ for clique_type in clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ self.clique_types_by_type[focal_point_type] = clique_type
+ # if some focal point type does not have an explicit definition in
+ # clique_types for this specific environment, use the default
+ # clique type definition with environment=ANY
+ for clique_type in default_clique_types:
+ focal_point_type = clique_type['focal_point_type']
+ if focal_point_type not in clique_types:
+ self.clique_types_by_type[focal_point_type] = clique_type
+ return self.clique_types_by_type
+
+ def find_cliques_for_type(self, clique_type):
+ type = clique_type["focal_point_type"]
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ object_type = clique_type["focal_point_type"]
+ objects_for_focal_point_type = self.inventory.find({
+ "environment": self.get_env(),
+ "type": object_type
+ })
+ for o in objects_for_focal_point_type:
+ self.construct_clique_for_focal_point(o, clique_type, constraints)
+
+ def rebuild_clique(self, clique):
+ focal_point_db_id = clique['focal_point']
+ constraint = self.clique_constraints.find_one({"focal_point_type": type})
+ constraints = [] if not constraint else constraint["constraints"]
+ clique_types = self.get_clique_types()
+ o = self.inventory.find_one({'_id': focal_point_db_id})
+ clique_type = clique_types[o['type']]
+ new_clique = self.construct_clique_for_focal_point(o, clique_type, constraints)
+ if not new_clique:
+ self.cliques.delete({'_id': clique['_id']})
+
+ def construct_clique_for_focal_point(self, o, clique_type, constraints):
+ # keep a hash of nodes in clique that were visited for each type
+ # start from the focal point
+ nodes_of_type = {o["type"]: {str(o["_id"]): 1}}
+ clique = {
+ "environment": self.env,
+ "focal_point": o["_id"],
+ "focal_point_type": o["type"],
+ "links": [],
+ "links_detailed": [],
+ "constraints": {}
+ }
+ for c in constraints:
+ val = o[c] if c in o else None
+ clique["constraints"][c] = val
+ for link_type in clique_type["link_types"]:
+ # check if it's backwards
+ link_type_parts = link_type.split('-')
+ link_type_parts.reverse()
+ link_type_reversed = '-'.join(link_type_parts)
+ matches = self.links.find_one({
+ "environment": self.env,
+ "link_type": link_type_reversed
+ })
+ reversed = True if matches else False
+ if reversed:
+ link_type = link_type_reversed
+ from_type = link_type[:link_type.index("-")]
+ to_type = link_type[link_type.index("-") + 1:]
+ side_to_match = 'target' if reversed else 'source'
+ other_side = 'target' if not reversed else 'source'
+ match_type = to_type if reversed else from_type
+ if match_type not in nodes_of_type.keys():
+ continue
+ other_side_type = to_type if not reversed else from_type
+ for match_point in nodes_of_type[match_type].keys():
+ matches = self.links.find({
+ "environment": self.env,
+ "link_type": link_type,
+ side_to_match: ObjectId(match_point)
+ })
+ for link in matches:
+ id = link["_id"]
+ if id in clique["links"]:
+ continue
+ if not self.check_constraints(clique, link):
+ continue
+ clique["links"].append(id)
+ clique["links_detailed"].append(link)
+ other_side_point = str(link[other_side])
+ if other_side_type not in nodes_of_type:
+ nodes_of_type[other_side_type] = {}
+ nodes_of_type[other_side_type][other_side_point] = 1
+
+ # after adding the links to the clique, create/update the clique
+ if not clique["links"]:
+ return None
+ focal_point_obj = self.inventory.find({"_id": clique["focal_point"]})
+ if not focal_point_obj:
+ return None
+ focal_point_obj = focal_point_obj[0]
+ focal_point_obj["clique"] = True
+ focal_point_obj.pop("_id", None)
+ self.cliques.update_one(
+ {
+ "environment": self.get_env(),
+ "focal_point": clique["focal_point"]
+ },
+ {'$set': clique},
+ upsert=True)
+ clique_document = self.inventory.update_one(
+ {"_id": clique["focal_point"]},
+ {'$set': focal_point_obj},
+ upsert=True)
+ return clique_document
+
+ def check_constraints(self, clique, link):
+ if "attributes" not in link:
+ return True
+ attributes = link["attributes"]
+ constraints = clique["constraints"]
+ for c in constraints:
+ if c not in attributes:
+ continue # constraint not applicable to this link
+ constr_values = constraints[c]
+ link_val = attributes[c]
+ if isinstance(constr_values, list):
+ if link_val not in constr_values:
+ return False
+ elif link_val != constraints[c]:
+ return False
+ return True
diff --git a/app/discover/configuration.py b/app/discover/configuration.py
new file mode 100644
index 0000000..c7bc0c0
--- /dev/null
+++ b/app/discover/configuration.py
@@ -0,0 +1,70 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.singleton import Singleton
+
+
+class Configuration(metaclass=Singleton):
+ def __init__(self, environments_collection="environments_config"):
+ super().__init__()
+ self.db_client = MongoAccess()
+ self.db = MongoAccess.db
+ self.inv = InventoryMgr()
+ self.collection = self.inv.collections.get(environments_collection)
+ self.env_name = None
+ self.environment = None
+ self.configuration = None
+ self.log = FullLogger()
+
+ def use_env(self, env_name):
+ self.log.info("Configuration taken from environment: {}".format(env_name))
+ self.env_name = env_name
+
+ envs = self.collection.find({"name": env_name})
+ if envs.count() == 0:
+ raise ValueError("use_env: could not find matching environment")
+ if envs.count() > 1:
+ raise ValueError("use_env: found multiple matching environments")
+
+ self.environment = envs[0]
+ self.configuration = self.environment["configuration"]
+
+ def get_env_config(self):
+ return self.environment
+
+ def get_configuration(self):
+ return self.configuration
+
+ def get_env_name(self):
+ return self.env_name
+
+ def update_env(self, values):
+ self.collection.update_one({"name": self.env_name},
+ {'$set': MongoAccess.encode_mongo_keys(values)})
+
+ def get(self, component):
+ try:
+ matches = [c for c in self.configuration if c["name"] == component]
+ except AttributeError:
+ raise ValueError("Configuration: environment not set")
+ if len(matches) == 0:
+ raise IndexError("No matches for configuration component: " + component)
+ if len(matches) > 1:
+ raise IndexError("Found multiple matches for configuration component: " + component)
+ return matches[0]
+
+ def has_network_plugin(self, name):
+ if 'mechanism_drivers' not in self.environment:
+ self.log.error('Environment missing mechanism_drivers definition: ' +
+ self.environment['name'])
+ mechanism_drivers = self.environment['mechanism_drivers']
+ return name in mechanism_drivers
diff --git a/app/discover/event_handler.py b/app/discover/event_handler.py
new file mode 100644
index 0000000..12199f8
--- /dev/null
+++ b/app/discover/event_handler.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.util import ClassResolver
+
+
+class EventHandler:
+
+ def __init__(self, env: str, inventory_collection: str):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ self.env = env
+ self.log = FullLogger(env=env)
+ self.handlers = {}
+
+ def discover_handlers(self, handlers_package: str, event_handlers: dict):
+ if not event_handlers:
+ raise TypeError("Event handlers list is empty")
+
+ for event_name, handler_name in event_handlers.items():
+ handler = ClassResolver.get_instance_of_class(handler_name, handlers_package)
+ if not issubclass(handler.__class__, EventBase):
+ raise TypeError("Event handler '{}' is not a subclass of EventBase"
+ .format(handler_name))
+ if event_name in self.handlers:
+ self.log.warning("A handler is already registered for event type '{}'. Overwriting"
+ .format(event_name))
+ self.handlers[event_name] = handler
+
+ def handle(self, event_name: str, notification: dict) -> EventResult:
+ if event_name not in self.handlers:
+ self.log.info("No handler is able to process event of type '{}'"
+ .format(event_name))
+ return self.handlers[event_name].handle(self.env, notification)
+
diff --git a/app/discover/event_manager.py b/app/discover/event_manager.py
new file mode 100644
index 0000000..ce40ce4
--- /dev/null
+++ b/app/discover/event_manager.py
@@ -0,0 +1,265 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import signal
+import time
+from multiprocessing import Process, Manager as SharedManager
+
+import os
+
+from discover.events.listeners.default_listener import DefaultListener
+from discover.events.listeners.listener_base import ListenerBase
+from discover.manager import Manager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+
+
+class EventManager(Manager):
+
+ # After EventManager receives a SIGTERM,
+ # it will try to terminate all listeners.
+ # After this delay, a SIGKILL will be sent
+ # to each listener that is still alive.
+ SIGKILL_DELAY = 5 # in seconds
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "collection": "environments_config",
+ "inventory": "inventory",
+ "interval": 5,
+ "loglevel": "INFO"
+ }
+
+ LISTENERS = {
+ 'Mirantis-6.0': DefaultListener,
+ 'Mirantis-7.0': DefaultListener,
+ 'Mirantis-8.0': DefaultListener,
+ 'RDO-Mitaka': DefaultListener,
+ 'RDO-Liberty': DefaultListener,
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.interval = None
+ self.processes = []
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=EventManager.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB server access details")
+ parser.add_argument("-c", "--collection", nargs="?", type=str,
+ default=EventManager.DEFAULTS["collection"],
+ help="Environments collection to read from "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["collection"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=EventManager.DEFAULTS["inventory"],
+ help="name of inventory collection "
+ "(default: '{}')"
+ .format(EventManager.DEFAULTS["inventory"]))
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=EventManager.DEFAULTS["interval"],
+ help="Interval between collection polls "
+ "(must be more than {} seconds. Default: {})"
+ .format(EventManager.MIN_INTERVAL,
+ EventManager.DEFAULTS["interval"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=EventManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(EventManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections(self.args.inventory)
+ self.collection = self.db_client.db[self.args.collection]
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started EventManager with following configuration:\n"
+ "Mongo config file path: {0}\n"
+ "Collection: {1}\n"
+ "Polling interval: {2} second(s)"
+ .format(self.args.mongo_config, self.collection.name, self.interval))
+
+ def get_listener(self, env: str):
+ env_config = self.inv.get_env_config(env)
+ return self.LISTENERS.get(env_config.get('distribution'))
+
+ def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
+ listener.listen({
+ 'env': env_name,
+ 'mongo_config': self.args.mongo_config,
+ 'inventory': self.args.inventory,
+ 'loglevel': self.args.loglevel,
+ 'environments_collection': self.args.collection,
+ 'process_vars': process_vars
+ })
+
+ def _get_alive_processes(self):
+ return [p for p in self.processes
+ if p['process'].is_alive()]
+
+ # Get all processes that should be terminated
+ def _get_stuck_processes(self, stopped_processes: list):
+ return [p for p in self._get_alive_processes()
+ if p.get("name") in map(lambda p: p.get("name"), stopped_processes)]
+
+ # Give processes time to finish and kill them if they are stuck
+ def _kill_stuck_processes(self, process_list: list):
+ if self._get_stuck_processes(process_list):
+ time.sleep(self.SIGKILL_DELAY)
+ for process in self._get_stuck_processes(process_list):
+ self.log.info("Killing event listener '{0}'".format(process.get("name")))
+ os.kill(process.get("process").pid, signal.SIGKILL)
+
+ def _get_operational(self, process: dict) -> OperationalStatus:
+ try:
+ return process.get("vars", {})\
+ .get("operational")
+ except:
+ self.log.error("Event listener '{0}' is unreachable".format(process.get("name")))
+ return OperationalStatus.STOPPED
+
+ def _update_operational_status(self, status: OperationalStatus):
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) == status]}},
+ {"$set": {"operational": status.value}}
+ )
+
+ def update_operational_statuses(self):
+ self._update_operational_status(OperationalStatus.RUNNING)
+ self._update_operational_status(OperationalStatus.ERROR)
+ self._update_operational_status(OperationalStatus.STOPPED)
+
+ def cleanup_processes(self):
+ # Query for envs that are no longer eligible for listening
+ # (scanned == false and/or listen == false)
+ dropped_envs = [env['name']
+ for env
+ in self.collection
+ .find(filter={'$or': [{'scanned': False},
+ {'listen': False}]},
+ projection=['name'])]
+
+ live_processes = []
+ stopped_processes = []
+ # Drop already terminated processes
+ # and for all others perform filtering
+ for process in self._get_alive_processes():
+ # If env no longer qualifies for listening,
+ # stop the listener.
+ # Otherwise, keep the process
+ if process['name'] in dropped_envs:
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process['process'].terminate()
+ stopped_processes.append(process)
+ else:
+ live_processes.append(process)
+
+ self._kill_stuck_processes(stopped_processes)
+
+ # Update all 'operational' statuses
+ # for processes stopped on the previous step
+ self.collection.update_many(
+ {"name": {"$in": [process.get("name")
+ for process
+ in stopped_processes]}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+ # Keep the living processes
+ self.processes = live_processes
+
+ def do_action(self):
+ try:
+ while True:
+ # Update "operational" field in db before removing dead processes
+ # so that we keep last statuses of env listeners before they were terminated
+ self.update_operational_statuses()
+
+ # Perform a cleanup that filters out all processes
+ # that are no longer eligible for listening
+ self.cleanup_processes()
+
+ envs = self.collection.find({'scanned': True, 'listen': True})
+
+ # Iterate over environments that don't have an event listener attached
+ for env in filter(lambda e: e['name'] not in
+ map(lambda process: process["name"], self.processes),
+ envs):
+ env_name = env['name']
+
+ if not self.inv.is_feature_supported(env_name, EnvironmentFeatures.LISTENING):
+ self.log.error("Listening is not supported for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ listener = self.get_listener(env_name)
+ if not listener:
+ self.log.error("No listener is defined for env '{}'".format(env_name))
+ self.collection.update({"name": env_name},
+ {"$set": {"operational": OperationalStatus.ERROR.value}})
+ continue
+
+ # A dict that is shared between event manager and newly created env listener
+ process_vars = SharedManager().dict()
+ p = Process(target=self.listen_to_events,
+ args=(listener, env_name, process_vars,),
+ name=env_name)
+ self.processes.append({"process": p, "name": env_name, "vars": process_vars})
+ self.log.info("Starting event listener '{0}'".format(env_name))
+ p.start()
+
+ # Make sure statuses are up-to-date before event manager goes to sleep
+ self.update_operational_statuses()
+ time.sleep(self.interval)
+ finally:
+ # Fetch operational statuses before terminating listeners.
+ # Shared variables won't be available after termination.
+ stopping_processes = [process.get("name")
+ for process
+ in self.processes
+ if self._get_operational(process) != OperationalStatus.ERROR]
+ self._update_operational_status(OperationalStatus.ERROR)
+
+ # Gracefully stop processes
+ for process in self._get_alive_processes():
+ self.log.info("Stopping event listener '{0}'".format(process.get("name")))
+ process.get("process").terminate()
+
+ # Kill all remaining processes
+ self._kill_stuck_processes(self.processes)
+
+ # Updating operational statuses for stopped processes
+ self.collection.update_many(
+ {"name": {"$in": stopping_processes}},
+ {"$set": {"operational": OperationalStatus.STOPPED.value}}
+ )
+
+if __name__ == "__main__":
+ EventManager().run()
diff --git a/app/discover/events/__init__.py b/app/discover/events/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/event_base.py b/app/discover/events/event_base.py
new file mode 100644
index 0000000..6b3b290
--- /dev/null
+++ b/app/discover/events/event_base.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABC
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class EventResult:
+ def __init__(self,
+ result: bool, retry: bool = False, message: str = None,
+ related_object: str = None,
+ display_context: str = None):
+ self.result = result
+ self.retry = retry
+ self.message = message
+ self.related_object = related_object
+ self.display_context = display_context
+
+
+class EventBase(Fetcher, ABC):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def handle(self, env, values) -> EventResult:
+ pass
diff --git a/app/discover/events/event_delete_base.py b/app/discover/events/event_delete_base.py
new file mode 100644
index 0000000..1cf94c3
--- /dev/null
+++ b/app/discover/events/event_delete_base.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from bson.objectid import ObjectId
+
+from discover.clique_finder import CliqueFinder
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventDeleteBase(EventBase):
+
+ def delete_handler(self, env, object_id, object_type) -> EventResult:
+ item = self.inv.get_by_id(env, object_id)
+ if not item:
+ self.log.info('{0} document is not found, aborting {0} delete'.format(object_type))
+ return EventResult(result=False, retry=False)
+
+ db_id = ObjectId(item['_id'])
+ id_path = item['id_path'] + '/'
+
+ # remove related clique
+ clique_finder = CliqueFinder()
+ self.inv.delete('cliques', {'focal_point': db_id})
+
+ # keep related links to do rebuild of cliques using them
+ matched_links_source = clique_finder.find_links_by_source(db_id)
+ matched_links_target = clique_finder.find_links_by_target(db_id)
+
+ links_using_object = []
+ links_using_object.extend([l['_id'] for l in matched_links_source])
+ links_using_object.extend([l['_id'] for l in matched_links_target])
+
+ # find cliques using these links
+ if links_using_object:
+ matched_cliques = clique_finder.find_cliques_by_link(links_using_object)
+ # find cliques using these links and rebuild them
+ for clique in matched_cliques:
+ clique_finder.rebuild_clique(clique)
+
+ # remove all related links
+ self.inv.delete('links', {'source': db_id})
+ self.inv.delete('links', {'target': db_id})
+
+ # remove object itself
+ self.inv.delete('inventory', {'_id': db_id})
+
+ # remove children
+ regexp = re.compile('^' + id_path)
+ self.inv.delete('inventory', {'id_path': {'$regex': regexp}})
+ return EventResult(result=True,
+ related_object=object_id,
+ display_context=object_id)
diff --git a/app/discover/events/event_instance_add.py b/app/discover/events/event_instance_add.py
new file mode 100644
index 0000000..4dd2b20
--- /dev/null
+++ b/app/discover/events/event_instance_add.py
@@ -0,0 +1,45 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.scanner import Scanner
+
+
+class EventInstanceAdd(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ instance_id = values['payload']['instance_id']
+ host_id = values['payload']['host']
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instances root not found, aborting instance add')
+ return EventResult(result=False, retry=True)
+
+ # scan instance
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan("ScanInstancesRoot", instances_root,
+ limit_to_child_id=instance_id,
+ limit_to_child_type='instance')
+ scanner.scan_from_queue()
+
+ # scan host
+ host = self.inv.get_by_id(env, host_id)
+ scanner.scan('ScanHost', host,
+ limit_to_child_type=['vconnectors_folder',
+ 'vedges_folder'])
+ scanner.scan_from_queue()
+ scanner.scan_links()
+ scanner.scan_cliques()
+
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_instance_delete.py b/app/discover/events/event_instance_delete.py
new file mode 100644
index 0000000..714d0c7
--- /dev/null
+++ b/app/discover/events/event_instance_delete.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventInstanceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ # find the corresponding object
+ instance_id = values['payload']['instance_id']
+ return self.delete_handler(env, instance_id, "instance")
diff --git a/app/discover/events/event_instance_update.py b/app/discover/events/event_instance_update.py
new file mode 100644
index 0000000..6231c30
--- /dev/null
+++ b/app/discover/events/event_instance_update.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_instance_add import EventInstanceAdd
+from discover.events.event_instance_delete import EventInstanceDelete
+
+
+class EventInstanceUpdate(EventBase):
+
+ def handle(self, env, values):
+ # find the host, to serve as parent
+ payload = values['payload']
+ instance_id = payload['instance_id']
+ state = payload['state']
+ old_state = payload['old_state']
+
+ if state == 'building':
+ return EventResult(result=False, retry=False)
+
+ if state == 'active' and old_state == 'building':
+ return EventInstanceAdd().handle(env, values)
+
+ if state == 'deleted' and old_state == 'active':
+ return EventInstanceDelete().handle(env, values)
+
+ name = payload['display_name']
+ instance = self.inv.get_by_id(env, instance_id)
+ if not instance:
+ self.log.info('instance document not found, aborting instance update')
+ return EventResult(result=False, retry=True)
+
+ instance['name'] = name
+ instance['object_name'] = name
+ name_path = instance['name_path']
+ instance['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ if name_path != instance['name_path']:
+ self.inv.values_replace({
+ "environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": instance['name_path']}})
+ self.inv.set(instance)
+ return EventResult(result=True,
+ related_object=instance_id,
+ display_context=instance_id)
diff --git a/app/discover/events/event_interface_add.py b/app/discover/events/event_interface_add.py
new file mode 100644
index 0000000..a06ad14
--- /dev/null
+++ b/app/discover/events/event_interface_add.py
@@ -0,0 +1,139 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import time
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventInterfaceAdd(EventBase):
+
+ def __init__(self):
+ super().__init__()
+ self.delay = 2
+
+ def add_gateway_port(self, env, project, network_name, router_doc, host_id):
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_id = router_doc['id']
+ router = fetcher.get_vservice(host_id, router_id)
+ device_id = decode_router_id(router_id)
+ router_doc['gw_port_id'] = router['gw_port_id']
+
+ # add gateway port documents.
+ port_doc = EventSubnetAdd().add_port_document(env, router_doc['gw_port_id'], project_name=project)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=device_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if not ret:
+ time.sleep(self.delay)
+ self.log.info("Wait %s second, and then fetch vnic document again." % self.delay)
+ add_vnic_document()
+
+ def update_router(self, env, project, network_id, network_name, router_doc, host_id):
+ if router_doc:
+ if 'network' in router_doc:
+ if network_id not in router_doc['network']:
+ router_doc['network'].append(network_id)
+ else:
+ router_doc['network'] = [network_id]
+
+ # if gw_port_id is None, add gateway port first.
+ if not router_doc.get('gw_port_id'):
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ else:
+ # check the gateway port document, add it if document does not exist.
+ port = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ if not port:
+ self.add_gateway_port(env, project, network_name, router_doc, host_id)
+ self.inv.set(router_doc)
+ else:
+ self.log.info("router document not found, aborting interface adding")
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ project = values['_context_project_name']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ port_id = interface['port_id']
+ subnet_id = interface['subnet_id']
+ router_id = encode_router_id(host_id, interface['id'])
+
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting interface adding")
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+ network_id = network_document['id']
+
+ # add router-interface port document.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+ port_doc = EventSubnetAdd().add_port_document(env, port_id, network_name=network_name)
+
+ mac_address = port_doc['mac_address'] if port_doc else None
+
+ # add vnic document
+ host = self.inv.get_by_id(env, host_id)
+ router_doc = self.inv.get_by_id(env, router_id)
+
+ add_vnic_document = partial(EventPortAdd().add_vnic_document,
+ env=env,
+ host=host,
+ object_id=interface['id'],
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'],
+ mac_address=mac_address)
+
+ ret = add_vnic_document()
+ if ret is False:
+ # try it again to fetch vnic document, vnic will be created a little bit late before CLI fetch.
+ time.sleep(self.delay)
+ self.log.info("Wait {} seconds, and then fetch vnic document again.".format(self.delay))
+ add_vnic_document()
+
+ # update the router document: gw_port_id, network.
+ self.update_router(env, project, network_id, network_name, router_doc, host_id)
+
+ # update vservice-vnic, vnic-network,
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+
+ scanner.scan_cliques()
+ self.log.info("Finished router-interface added.")
+
+ return EventResult(result=True,
+ related_object=interface['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_interface_delete.py b/app/discover/events/event_interface_delete.py
new file mode 100644
index 0000000..b1df978
--- /dev/null
+++ b/app/discover/events/event_interface_delete.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.events.event_port_delete import EventPortDelete
+from utils.util import encode_router_id
+
+
+class EventInterfaceDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ interface = values['payload']['router_interface']
+ port_id = interface['port_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = encode_router_id(host_id, interface['id'])
+
+ # update router document
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Interface deleting handler: port document not found.")
+ return EventResult(result=False, retry=False)
+ network_id = port_doc['network_id']
+
+ router_doc = self.inv.get_by_id(env, router_id)
+ if router_doc and network_id in router_doc.get('network', []):
+ router_doc['network'].remove(network_id)
+ self.inv.set(router_doc)
+
+ # delete port document
+ result = EventPortDelete().delete_port(env, port_id)
+ result.related_object = interface['id']
+ result.display_context = network_id
+ return result
diff --git a/app/discover/events/event_metadata_parser.py b/app/discover/events/event_metadata_parser.py
new file mode 100644
index 0000000..5d09376
--- /dev/null
+++ b/app/discover/events/event_metadata_parser.py
@@ -0,0 +1,75 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from typing import List, Tuple
+
+from utils.metadata_parser import MetadataParser
+
+
+class EventMetadataParser(MetadataParser):
+
+ HANDLERS_PACKAGE = 'handlers_package'
+ QUEUES = 'queues'
+ EVENT_HANDLERS = 'event_handlers'
+
+ REQUIRED_EXPORTS = [HANDLERS_PACKAGE, EVENT_HANDLERS]
+
+ def __init__(self):
+ super().__init__()
+ self.handlers_package = None
+ self.queues = []
+ self.event_handlers = []
+
+ def get_required_fields(self) -> list:
+ return self.REQUIRED_EXPORTS
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+
+ package = metadata.get(self.HANDLERS_PACKAGE)
+ if not package or not isinstance(package, str):
+ self.add_error("Handlers package '{}' is invalid".format(package))
+
+ event_handlers = metadata.get(self.EVENT_HANDLERS)
+ if not event_handlers or not isinstance(event_handlers, dict):
+ self.add_error("Event handlers attribute is invalid or empty"
+ "(should be a non-empty dict)")
+
+ return len(self.errors) == 0
+
+ def _finalize_parsing(self, metadata):
+ handlers_package = metadata[self.HANDLERS_PACKAGE]
+ queues = metadata.get(self.QUEUES, None)
+ event_handlers = metadata[self.EVENT_HANDLERS]
+
+ # Convert variables to EventHandler-friendly format
+ self.handlers_package = handlers_package
+
+ try:
+ if queues and isinstance(queues, list):
+ self.queues = [{"queue": q["queue"],
+ "exchange": q["exchange"]}
+ for q in queues]
+ except KeyError:
+ self.add_error("Queues variable has invalid format")
+ return
+
+ self.event_handlers = event_handlers
+
+ def parse_metadata_file(self, file_path: str) -> dict:
+ metadata = super().parse_metadata_file(file_path)
+ self._finalize_parsing(metadata)
+ super().check_errors()
+ return metadata
+
+
+def parse_metadata_file(file_path: str):
+ parser = EventMetadataParser()
+ parser.parse_metadata_file(file_path)
+ return parser.handlers_package, parser.queues, parser.event_handlers
diff --git a/app/discover/events/event_network_add.py b/app/discover/events/event_network_add.py
new file mode 100644
index 0000000..41fafd4
--- /dev/null
+++ b/app/discover/events/event_network_add.py
@@ -0,0 +1,50 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkAdd(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if network_document:
+ self.log.info('network already existed, aborting network add')
+ return EventResult(result=False, retry=False)
+
+ # build network document for adding network
+ project_name = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ parent_id = project_id + '-networks'
+ network_name = network['name']
+
+ network['environment'] = env
+ network['type'] = 'network'
+ network['id_path'] = "/%s/%s-projects/%s/%s/%s" \
+ % (env, env, project_id, parent_id, network_id)
+ network['cidrs'] = []
+ network['subnet_ids'] = []
+ network['last_scanned'] = notification['timestamp']
+ network['name_path'] = "/%s/Projects/%s/Networks/%s" \
+ % (env, project_name, network_name)
+ network['network'] = network_id
+ network['object_name'] = network_name
+ network['parent_id'] = parent_id
+ network['parent_text'] = "Networks"
+ network['parent_type'] = "networks_folder"
+ network['project'] = project_name
+ network["show_in_tree"] = True
+ network['subnets'] = {}
+
+ self.inv.set(network)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_network_delete.py b/app/discover/events/event_network_delete.py
new file mode 100644
index 0000000..b3277da
--- /dev/null
+++ b/app/discover/events/event_network_delete.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventNetworkDelete(EventDeleteBase):
+
+ def handle(self, env, notification):
+ network_id = notification['payload']['network_id']
+ return self.delete_handler(env, network_id, "network")
diff --git a/app/discover/events/event_network_update.py b/app/discover/events/event_network_update.py
new file mode 100644
index 0000000..3e1432e
--- /dev/null
+++ b/app/discover/events/event_network_update.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventNetworkUpdate(EventBase):
+
+ def handle(self, env, notification):
+ network = notification['payload']['network']
+ network_id = network['id']
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('Network document not found, aborting network update')
+ return EventResult(result=False, retry=True)
+
+ # update network document
+ name = network['name']
+ if name != network_document['name']:
+ network_document['name'] = name
+ network_document['object_name'] = name
+
+ name_path = network_document['name_path']
+ network_document['name_path'] = name_path[:name_path.rindex('/') + 1] + name
+
+ # TBD: fix name_path for descendants
+ self.inv.values_replace({"environment": env,
+ "name_path": {"$regex": r"^" + re.escape(name_path + '/')}},
+ {"name_path": {"from": name_path, "to": network_document['name_path']}})
+
+ network_document['admin_state_up'] = network['admin_state_up']
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=network_id,
+ display_context=network_id)
diff --git a/app/discover/events/event_port_add.py b/app/discover/events/event_port_add.py
new file mode 100644
index 0000000..63a5e80
--- /dev/null
+++ b/app/discover/events/event_port_add.py
@@ -0,0 +1,309 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+from discover.fetchers.cli.cli_fetch_instance_vnics import CliFetchInstanceVnics
+from discover.fetchers.cli.cli_fetch_instance_vnics_vpp import CliFetchInstanceVnicsVpp
+from discover.fetchers.cli.cli_fetch_vservice_vnics import CliFetchVserviceVnics
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.scanner import Scanner
+
+
+class EventPortAdd(EventBase):
+
+ def get_name_by_id(self, object_id):
+ item = self.inv.get_by_id(self.env, object_id)
+ if item:
+ return item['name']
+ return None
+
+ def add_port_document(self, env, project_name, project_id, network_name, network_id, port):
+ # add other data for port document
+ port['type'] = 'port'
+ port['environment'] = env
+
+ port['parent_id'] = port['network_id'] + '-ports'
+ port['parent_text'] = 'Ports'
+ port['parent_type'] = 'ports_folder'
+
+ port['name'] = port['mac_address']
+ port['object'] = port['name']
+ port['project'] = project_name
+
+ port['id_path'] = "{}/{}-projects/{}/{}-networks/{}/{}-ports/{}" \
+ .format(env, env,
+ project_id, project_id,
+ network_id, network_id, port['id'])
+ port['name_path'] = "/{}/Projects/{}/Networks/{}/Ports/{}" \
+ .format(env, project_name, network_name, port['id'])
+
+ port['show_in_tree'] = True
+ port['last_scanned'] = datetime.datetime.utcnow()
+ self.inv.set(port)
+ self.log.info("add port document for port: {}".format(port['id']))
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "{}/{}-projects/{}/{}-networks/{}/{}-ports/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/{}/Projects/{}/Networks/{}/Ports"
+ .format(env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+ self.inv.set(port_folder)
+ self.log.info("add ports_folder document for network: {}.".format(network_id))
+
+ def add_network_services_folder(self, env, project_id, network_id, network_name):
+ network_services_folder = {
+ "create_object": True,
+ "environment": env,
+ "id": network_id + "-network_services",
+ "id_path": "{}/{}-projects/{}/{}-networks/{}/{}-network_services/"
+ .format(env, env, project_id, project_id,
+ network_id, network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "Network vServices",
+ "name_path": "/{}/Projects/{}/Networks/{}/Network vServices"
+ .format(env, project_id, network_name),
+ "object_name": "Network vServices",
+ "parent_id": network_id,
+ "parent_type": "network",
+ "show_in_tree": True,
+ "text": "Network vServices",
+ "type": "network_services_folder"
+ }
+ self.inv.set(network_services_folder)
+ self.log.info("add network services folder for network:{}".format(network_id))
+
+ def add_dhcp_document(self, env, host, network_id, network_name):
+ dhcp_document = {
+ "environment": env,
+ "host": host['id'],
+ "id": "qdhcp-" + network_id,
+ "id_path": "{}/{}-vservices/{}-vservices-dhcps/qdhcp-{}"
+ .format(host['id_path'], host['id'],
+ host['id'], network_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "local_service_id": "qdhcp-" + network_id,
+ "name": "dhcp-" + network_name,
+ "name_path": host['name_path'] + "/Vservices/DHCP servers/dhcp-" + network_name,
+ "network": [network_id],
+ "object_name": "dhcp-" + network_name,
+ "parent_id": host['id'] + "-vservices-dhcps",
+ "parent_text": "DHCP servers",
+ "parent_type": "vservice_dhcps_folder",
+ "service_type": "dhcp",
+ "show_in_tree": True,
+ "type": "vservice"
+ }
+ self.inv.set(dhcp_document)
+ self.log.info("add DHCP document for network: {}.".format(network_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnics_folder(self,
+ env, host,
+ object_id, network_name='',
+ object_type="dhcp", router_name=''):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ vnics_folder = {
+ "environment": env,
+ "id": "q{}-{}-vnics".format(object_type, object_id),
+ "id_path": "{}/{}-vservices/{}-vservices-{}s/q{}-{}/q{}-{}-vnics"
+ .format(host['id_path'], host['id'], host['id'],
+ object_type, object_type, object_id,
+ object_type, object_id),
+ "last_scanned": datetime.datetime.utcnow(),
+ "name": "q{}-{}-vnics".format(object_type, object_id),
+ "name_path": "{}/Vservices/{}/{}/vNICs"
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1]),
+ "object_name": "vNICs",
+ "parent_id": "q{}-{}".format(object_type, object_id),
+ "parent_type": "vservice",
+ "show_in_tree": True,
+ "text": "vNICs",
+ "type": "vnics_folder"
+ }
+ self.inv.set(vnics_folder)
+ self.log.info("add vnics_folder document for q{}-{}-vnics"
+ .format(object_type, object_id))
+
+ # This method has dynamic usages, take caution when changing its signature
+ def add_vnic_document(self,
+ env, host,
+ object_id, network_name='',
+ object_type='dhcp', router_name='',
+ mac_address=None):
+ # when vservice is DHCP, id = network_id,
+ # when vservice is router, id = router_id
+ type_map = {"dhcp": ('DHCP servers', 'dhcp-' + network_name),
+ "router": ('Gateways', router_name)}
+
+ fetcher = CliFetchVserviceVnics()
+ fetcher.set_env(env)
+ namespace = 'q{}-{}'.format(object_type, object_id)
+ vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
+ if not vnic_documents:
+ self.log.info("Vnic document not found in namespace.")
+ return False
+
+ if mac_address is not None:
+ for doc in vnic_documents:
+ if doc['mac_address'] == mac_address:
+ # add a specific vnic document.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], object_type, namespace,
+ namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(mac_address))
+ return True
+
+ self.log.info("Can not find vnic document by mac_address: {}"
+ .format(mac_address))
+ return False
+ else:
+ for doc in vnic_documents:
+ # add all vnic documents.
+ doc["environment"] = env
+ doc["id_path"] = "{}/{}-vservices/{}-vservices-{}s/{}/{}-vnics/{}" \
+ .format(host['id_path'], host['id'],
+ host['id'], object_type,
+ namespace, namespace, doc["id"])
+ doc["name_path"] = "{}/Vservices/{}/{}/vNICs/{}" \
+ .format(host['name_path'],
+ type_map[object_type][0],
+ type_map[object_type][1],
+ doc["id"])
+ self.inv.set(doc)
+ self.log.info("add vnic document with mac_address: {}."
+ .format(doc["mac_address"]))
+ return True
+
+ def handle_dhcp_device(self, env, notification, network_id, network_name, mac_address=None):
+ # add dhcp vservice document.
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ host = self.inv.get_by_id(env, host_id)
+
+ self.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ self.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic document.
+ self.add_vnic_document(env, host, network_id, network_name, mac_address=mac_address)
+
+ def handle(self, env, notification):
+ project = notification['_context_project_name']
+ project_id = notification['_context_project_id']
+ payload = notification['payload']
+ port = payload['port']
+ network_id = port['network_id']
+ network_name = self.get_name_by_id(network_id)
+ mac_address = port['mac_address']
+
+ # check ports folder document.
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("ports folder not found, add ports folder first.")
+ self.add_ports_folder(env, project_id, network_id, network_name)
+ self.add_port_document(env, project, project_id, network_name, network_id, port)
+
+ # update the port related documents.
+ if 'compute' in port['device_owner']:
+ # update the instance related document.
+ host_id = port['binding:host_id']
+ instance_id = port['device_id']
+ old_instance_doc = self.inv.get_by_id(env, instance_id)
+ instances_root_id = host_id + '-instances'
+ instances_root = self.inv.get_by_id(env, instances_root_id)
+ if not instances_root:
+ self.log.info('instance document not found, aborting port adding')
+ return EventResult(result=False, retry=True)
+
+ # update instance
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+
+ if instance:
+ old_instance_doc['network_info'] = instance['network_info']
+ old_instance_doc['network'] = instance['network']
+ if old_instance_doc.get('mac_address') is None:
+ old_instance_doc['mac_address'] = mac_address
+
+ self.inv.set(old_instance_doc)
+ self.log.info("update instance document")
+
+ # add vnic document.
+ if port['binding:vif_type'] == 'vpp':
+ vnic_fetcher = CliFetchInstanceVnicsVpp()
+ else:
+ # set ovs as default type.
+ vnic_fetcher = CliFetchInstanceVnics()
+
+ vnic_fetcher.set_env(env)
+ vnic_docs = vnic_fetcher.get(instance_id + '-')
+ vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
+
+ if vnic:
+ vnic['environment'] = env
+ vnic['type'] = 'vnic'
+ vnic['name_path'] = old_instance_doc['name_path'] + '/vNICs/' + vnic['name']
+ vnic['id_path'] = '{}/{}/{}'.format(old_instance_doc['id_path'],
+ old_instance_doc['id'],
+ vnic['name'])
+ self.inv.set(vnic)
+ self.log.info("add instance-vnic document, mac_address: {}"
+ .format(mac_address))
+
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [FindLinksForInstanceVnics(), FindLinksForVedges()]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.add_links()
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ port_document = self.inv.get_by_id(env, port['id'])
+ if not port_document:
+ self.log.error("Port {} failed to add".format(port['id']))
+ return EventResult(result=False, retry=True)
+
+ return EventResult(result=True,
+ related_object=port['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_port_delete.py b/app/discover/events/event_port_delete.py
new file mode 100644
index 0000000..1e55870
--- /dev/null
+++ b/app/discover/events/event_port_delete.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from discover.fetchers.api.api_fetch_host_instances import ApiFetchHostInstances
+
+
+class EventPortDelete(EventDeleteBase):
+
+ def delete_port(self, env, port_id):
+ port_doc = self.inv.get_by_id(env, port_id)
+ if not port_doc:
+ self.log.info("Port document not found, aborting port deleting.")
+ return EventResult(result=False, retry=False)
+
+ # if port is binding to a instance, instance document needs to be updated.
+ if 'compute' in port_doc['device_owner']:
+ self.log.info("update instance document to which port is binding.")
+ self.update_instance(env, port_doc)
+
+ # delete port document
+ self.inv.delete('inventory', {'id': port_id})
+
+ # delete vnic and related document
+ vnic_doc = self.inv.get_by_field(env, 'vnic', 'mac_address', port_doc['mac_address'], get_single=True)
+ if not vnic_doc:
+ self.log.info("Vnic document not found, aborting vnic deleting.")
+ return EventResult(result=False, retry=False)
+
+ result = self.delete_handler(env, vnic_doc['id'], 'vnic')
+ result.related_object = port_id
+ result.display_context = port_doc.get('network_id')
+ self.log.info('Finished port deleting')
+ return result
+
+ def update_instance(self, env, port_doc):
+ # update instance document if port
+ network_id = port_doc['network_id']
+ instance_doc = self.inv.get_by_field(env, 'instance', 'network_info.id', port_doc['id'], get_single=True)
+ if instance_doc:
+ port_num = 0
+
+ for port in instance_doc['network_info']:
+ if port['network']['id'] == network_id:
+ port_num += 1
+ if port['id'] == port_doc['id']:
+ instance_doc['network_info'].remove(port)
+ self.log.info("update network information of instance document.")
+
+ if port_num == 1:
+ # remove network information only when last port in network will be deleted.
+ instance_doc['network'].remove(network_id)
+
+ # update instance mac address.
+ if port_doc['mac_address'] == instance_doc['mac_address']:
+ instance_fetcher = ApiFetchHostInstances()
+ instance_fetcher.set_env(env)
+ host_id = port_doc['binding:host_id']
+ instance_id = port_doc['device_id']
+ instance_docs = instance_fetcher.get(host_id + '-')
+ instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
+ if instance:
+ if 'mac_address' not in instance:
+ instance_doc['mac_address'] = None
+ self.log.info("update mac_address:%s of instance document." % instance_doc['mac_address'])
+
+ self.inv.set(instance_doc)
+ else:
+ self.log.info("No instance document binding to network:%s." % network_id)
+
+ def handle(self, env, notification):
+ port_id = notification['payload']['port_id']
+ return self.delete_port(env, port_id)
diff --git a/app/discover/events/event_port_update.py b/app/discover/events/event_port_update.py
new file mode 100644
index 0000000..298b565
--- /dev/null
+++ b/app/discover/events/event_port_update.py
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+
+
+class EventPortUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check port document.
+ port = notification['payload']['port']
+ port_id = port['id']
+ port_document = self.inv.get_by_id(env, port_id)
+ if not port_document:
+ self.log.info('port document does not exist, aborting port update')
+ return EventResult(result=False, retry=True)
+
+ # build port document
+ port_document['name'] = port['name']
+ port_document['admin_state_up'] = port['admin_state_up']
+ if port_document['admin_state_up']:
+ port_document['status'] = 'ACTIVE'
+ else:
+ port_document['status'] = 'DOWN'
+
+ port_document['binding:vnic_type'] = port['binding:vnic_type']
+
+ # update port document.
+ self.inv.set(port_document)
+ return EventResult(result=True,
+ related_object=port_id,
+ display_context=port_document.get('network_id'))
diff --git a/app/discover/events/event_router_add.py b/app/discover/events/event_router_add.py
new file mode 100644
index 0000000..20e07e5
--- /dev/null
+++ b/app/discover/events/event_router_add.py
@@ -0,0 +1,123 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from functools import partial
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import decode_router_id, encode_router_id
+
+
+class EventRouterAdd(EventBase):
+
+ def add_router_document(self, env, network_id, router_doc, host):
+ router_doc["environment"] = env
+ router_doc["id_path"] = "{}/{}-vservices/{}-vservices-routers/{}"\
+ .format(host['id_path'], host['id'],
+ host['id'], router_doc['id'])
+ router_doc['last_scanned'] = datetime.datetime.utcnow()
+ router_doc['name_path'] = "{}/Vservices/Gateways/{}"\
+ .format(host['name_path'],
+ router_doc['name'])
+ router_doc['network'] = []
+ if network_id:
+ router_doc['network'] = [network_id]
+
+ router_doc['object_name'] = router_doc['name']
+ router_doc['parent_id'] = host['id'] + "-vservices-routers"
+ router_doc['show_in_tree'] = True
+ router_doc['type'] = "vservice"
+
+ self.inv.set(router_doc)
+
+ def add_children_documents(self, env, project_id, network_id, host, router_doc):
+
+ network_document = self.inv.get_by_id(env, network_id)
+ network_name = network_document['name']
+ router_id = decode_router_id(router_doc['id'])
+
+ # add port for binding to vservice:router
+ subnet_handler = EventSubnetAdd()
+ ports_folder = self.inv.get_by_id(env, network_id + '-ports')
+ if not ports_folder:
+ self.log.info("Ports_folder not found.")
+ subnet_handler.add_ports_folder(env, project_id, network_id, network_name)
+ add_port_return = subnet_handler.add_port_document(env,
+ router_doc['gw_port_id'],
+ network_name=network_name)
+
+ # add vnics folder and vnic document
+ port_handler = EventPortAdd()
+ add_vnic_folder = partial(port_handler.add_vnics_folder,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+ add_vnic_document = partial(port_handler.add_vnic_document,
+ env=env,
+ host=host,
+ object_id=router_id,
+ object_type='router',
+ network_name=network_name,
+ router_name=router_doc['name'])
+
+ add_vnic_folder()
+ if add_port_return:
+ add_vnic_return = add_vnic_document()
+ if not add_vnic_return:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+ else:
+ # in some cases, port has been created,
+ # but port doc cannot be fetched by OpenStack API
+ self.log.info("Try to add port document again.")
+ # TODO: #AskCheng - this never returns anything!
+ add_port_return = add_vnic_folder()
+ # TODO: #AskCheng - this will never evaluate to True!
+ if add_port_return is False:
+ self.log.info("Try to add vnic document again.")
+ add_vnic_document()
+
+ def handle(self, env, values):
+ router = values['payload']['router']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ project_id = values['_context_project_id']
+ router_id = encode_router_id(host_id, router['id'])
+ host = self.inv.get_by_id(env, host_id)
+
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_doc = fetcher.get_vservice(host_id, router_id)
+ gateway_info = router['external_gateway_info']
+
+ if gateway_info:
+ network_id = gateway_info['network_id']
+ self.add_router_document(env, network_id, router_doc, host)
+ self.add_children_documents(env, project_id, network_id, host, router_doc)
+ else:
+ self.add_router_document(env, None, router_doc, host)
+
+ # scan links and cliques
+ FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router added.")
+
+ return EventResult(result=True,
+ related_object=router_id,
+ display_context=router_id)
diff --git a/app/discover/events/event_router_delete.py b/app/discover/events/event_router_delete.py
new file mode 100644
index 0000000..65072d6
--- /dev/null
+++ b/app/discover/events/event_router_delete.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+from utils.util import encode_router_id
+
+
+class EventRouterDelete(EventDeleteBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+
+ if 'publisher_id' not in values:
+ self.log.error("Publisher_id is not in event values. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ host_id = values['publisher_id'].replace('network.', '', 1)
+ if 'router_id' in payload:
+ router_id = payload['router_id']
+ elif 'id' in payload:
+ router_id = payload['id']
+ else:
+ router_id = payload.get('router', {}).get('id')
+
+ if not router_id:
+ self.log.error("Router id is not in payload. Aborting router delete")
+ return EventResult(result=False, retry=False)
+
+ router_full_id = encode_router_id(host_id, router_id)
+ return self.delete_handler(env, router_full_id, "vservice")
diff --git a/app/discover/events/event_router_update.py b/app/discover/events/event_router_update.py
new file mode 100644
index 0000000..8dd53f0
--- /dev/null
+++ b/app/discover/events/event_router_update.py
@@ -0,0 +1,82 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_router_add import EventRouterAdd
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+from utils.util import encode_router_id
+
+
+class EventRouterUpdate(EventBase):
+
+ def handle(self, env, values):
+ payload = values['payload']
+ router = payload['router']
+
+ project_id = values['_context_project_id']
+ host_id = values["publisher_id"].replace("network.", "", 1)
+ router_id = payload['id'] if 'id' in payload else router['id']
+
+ router_full_id = encode_router_id(host_id, router_id)
+ router_doc = self.inv.get_by_id(env, router_full_id)
+ if not router_doc:
+ self.log.info("Router document not found, aborting router updating")
+ return EventResult(result=False, retry=True)
+
+ router_doc['admin_state_up'] = router['admin_state_up']
+ router_doc['name'] = router['name']
+ gateway_info = router.get('external_gateway_info')
+ if gateway_info is None:
+ # when delete gateway, need to delete the port relate document.
+ port_doc = {}
+ if router_doc.get('gw_port_id'):
+ port_doc = self.inv.get_by_id(env, router_doc['gw_port_id'])
+ EventPortDelete().delete_port(env, router_doc['gw_port_id'])
+
+ if router_doc.get('network'):
+ if port_doc:
+ router_doc['network'].remove(port_doc['network_id'])
+ router_doc['gw_port_id'] = None
+
+ # remove related links
+ self.inv.delete('links', {'source_id': router_full_id})
+ else:
+ if 'network' in router_doc:
+ if gateway_info['network_id'] not in router_doc['network']:
+ router_doc['network'].append(gateway_info['network_id'])
+ else:
+ router_doc['network'] = [gateway_info['network_id']]
+ # update static route
+ router_doc['routes'] = router['routes']
+
+ # add gw_port_id info and port document.
+ fetcher = CliFetchHostVservice()
+ fetcher.set_env(env)
+ router_vservice = fetcher.get_vservice(host_id, router_full_id)
+ if router_vservice.get('gw_port_id'):
+ router_doc['gw_port_id'] = router_vservice['gw_port_id']
+
+ host = self.inv.get_by_id(env, host_id)
+ EventRouterAdd().add_children_documents(env, project_id, gateway_info['network_id'], host, router_doc)
+
+ # rescan the vnic links.
+ FindLinksForVserviceVnics().add_links(search={'parent_id': router_full_id + '-vnics'})
+ self.inv.set(router_doc)
+
+ # update the cliques.
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished router update.")
+ return EventResult(result=True,
+ related_object=router_full_id,
+ display_context=router_full_id)
diff --git a/app/discover/events/event_subnet_add.py b/app/discover/events/event_subnet_add.py
new file mode 100644
index 0000000..b519b1c
--- /dev/null
+++ b/app/discover/events/event_subnet_add.py
@@ -0,0 +1,154 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import datetime
+
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_port import ApiFetchPort
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetAdd(EventBase):
+
+ def add_port_document(self, env, port_id, network_name=None, project_name=''):
+ # when add router-interface port, network_name need to be given to enhance efficiency.
+ # when add gateway port, project_name need to be specified, cause this type of port
+ # document does not has project attribute. In this case, network_name should not be provided.
+
+ fetcher = ApiFetchPort()
+ fetcher.set_env(env)
+ ports = fetcher.get(port_id)
+
+ if ports:
+ port = ports[0]
+ project_id = port['tenant_id']
+ network_id = port['network_id']
+
+ if not network_name:
+ network = self.inv.get_by_id(env, network_id)
+ network_name = network['name']
+
+ port['type'] = "port"
+ port['environment'] = env
+ port_id = port['id']
+ port['id_path'] = "%s/%s-projects/%s/%s-networks/%s/%s-ports/%s" % \
+ (env, env, project_id, project_id, network_id, network_id, port_id)
+ port['last_scanned'] = datetime.datetime.utcnow()
+ if 'project' in port:
+ project_name = port['project']
+ port['name_path'] = "/%s/Projects/%s/Networks/%s/Ports/%s" % \
+ (env, project_name, network_name, port_id)
+ self.inv.set(port)
+ self.log.info("add port document for port:%s" % port_id)
+ return port
+ return False
+
+ def add_ports_folder(self, env, project_id, network_id, network_name):
+ port_folder = {
+ "id": network_id + "-ports",
+ "create_object": True,
+ "name": "Ports",
+ "text": "Ports",
+ "type": "ports_folder",
+ "parent_id": network_id,
+ "parent_type": "network",
+ 'environment': env,
+ 'id_path': "%s/%s-projects/%s/%s-networks/%s/%s-ports/" % (env, env, project_id, project_id,
+ network_id, network_id),
+ 'name_path': "/%s/Projects/%s/Networks/%s/Ports" % (env, project_id, network_name),
+ "show_in_tree": True,
+ "last_scanned": datetime.datetime.utcnow(),
+ "object_name": "Ports",
+ }
+
+ self.inv.set(port_folder)
+
+ def add_children_documents(self, env, project_id, network_id, network_name, host_id):
+ # generate port folder data.
+ self.add_ports_folder(env, project_id, network_id, network_name)
+
+ # get ports ID.
+ port_id = DbFetchPort().get_id(network_id)
+
+ # add specific ports documents.
+ self.add_port_document(env, port_id, network_name=network_name)
+
+ port_handler = EventPortAdd()
+
+ # add network_services_folder document.
+ port_handler.add_network_services_folder(env, project_id, network_id, network_name)
+
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+
+ port_handler.add_dhcp_document(env, host, network_id, network_name)
+
+ # add vnics folder.
+ port_handler.add_vnics_folder(env, host, network_id, network_name)
+
+ # add vnic docuemnt.
+ port_handler.add_vnic_document(env, host, network_id, network_name)
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project_id = subnet['tenant_id']
+ network_id = subnet['network_id']
+ if 'id' not in subnet:
+ self.log.info('Subnet payload doesn\'t have id, aborting subnet add')
+ return EventResult(result=False, retry=False)
+
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet add')
+ return EventResult(result=False, retry=True)
+ network_name = network_document['name']
+
+ # build subnet document for adding network
+ if subnet['cidr'] not in network_document['cidrs']:
+ network_document['cidrs'].append(subnet['cidr'])
+ if not network_document.get('subnets'):
+ network_document['subnets'] = {}
+
+ network_document['subnets'][subnet['name']] = subnet
+ if subnet['id'] not in network_document['subnet_ids']:
+ network_document['subnet_ids'].append(subnet['id'])
+ self.inv.set(network_document)
+
+ # Check DHCP enable, if true, scan network.
+ if subnet['enable_dhcp'] is True:
+ # update network
+ # TODO: #AskCheng - why is this necessary?
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add new subnet.")
+ host_id = notification["publisher_id"].replace("network.", "", 1)
+ self.add_children_documents(env, project_id, network_id, network_name, host_id)
+
+ # scan links and cliques
+ self.log.info("scanning for links")
+ FindLinksForPnics().add_links()
+ FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
+
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ self.log.info("Finished subnet added.")
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/event_subnet_delete.py b/app/discover/events/event_subnet_delete.py
new file mode 100644
index 0000000..900e701
--- /dev/null
+++ b/app/discover/events/event_subnet_delete.py
@@ -0,0 +1,57 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventResult
+from discover.events.event_delete_base import EventDeleteBase
+
+
+class EventSubnetDelete(EventDeleteBase):
+
+ def delete_children_documents(self, env, vservice_id):
+ vnic_parent_id = vservice_id + '-vnics'
+ vnic = self.inv.get_by_field(env, 'vnic', 'parent_id', vnic_parent_id, get_single=True)
+ if not vnic:
+ self.log.info("Vnic document not found.")
+ return EventResult(result=False, retry=False)
+
+ # delete port and vnic together by mac address.
+ self.inv.delete('inventory', {"mac_address": vnic.get("mac_address")})
+ return self.delete_handler(env, vservice_id, 'vservice')
+
+ def handle(self, env, notification):
+ subnet_id = notification['payload']['subnet_id']
+ network_document = self.inv.get_by_field(env, "network", "subnet_ids", subnet_id, get_single=True)
+ if not network_document:
+ self.log.info("network document not found, aborting subnet deleting")
+ return EventResult(result=False, retry=False)
+
+ # remove subnet_id from subnet_ids array
+ network_document["subnet_ids"].remove(subnet_id)
+
+ # find the subnet in network_document by subnet_id
+ subnet = next(
+ filter(lambda s: s['id'] == subnet_id,
+ network_document['subnets'].values()),
+ None)
+
+ # remove cidr from cidrs and delete subnet document.
+ if subnet:
+ network_document['cidrs'].remove(subnet['cidr'])
+ del network_document['subnets'][subnet['name']]
+
+ self.inv.set(network_document)
+
+ # when network does not have any subnet, delete vservice DHCP, port and vnic documents.
+ if not network_document["subnet_ids"]:
+ vservice_dhcp_id = 'qdhcp-{}'.format(network_document['id'])
+ self.delete_children_documents(env, vservice_dhcp_id)
+
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_document.get('id'))
diff --git a/app/discover/events/event_subnet_update.py b/app/discover/events/event_subnet_update.py
new file mode 100644
index 0000000..9d3c48b
--- /dev/null
+++ b/app/discover/events/event_subnet_update.py
@@ -0,0 +1,102 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.events.event_base import EventBase, EventResult
+from discover.events.event_port_add import EventPortAdd
+from discover.events.event_port_delete import EventPortDelete
+from discover.events.event_subnet_add import EventSubnetAdd
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.api.api_fetch_regions import ApiFetchRegions
+from discover.fetchers.db.db_fetch_port import DbFetchPort
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scanner import Scanner
+
+
+class EventSubnetUpdate(EventBase):
+
+ def handle(self, env, notification):
+ # check for network document.
+ subnet = notification['payload']['subnet']
+ project = notification['_context_project_name']
+ host_id = notification['publisher_id'].replace('network.', '', 1)
+ subnet_id = subnet['id']
+ network_id = subnet['network_id']
+ network_document = self.inv.get_by_id(env, network_id)
+ if not network_document:
+ self.log.info('network document does not exist, aborting subnet update')
+ return EventResult(result=False, retry=True)
+
+ # update network document.
+ subnets = network_document['subnets']
+ key = next(filter(lambda k: subnets[k]['id'] == subnet_id, subnets),
+ None)
+
+ if key:
+ if subnet['enable_dhcp'] and subnets[key]['enable_dhcp'] is False:
+ # scan DHCP namespace to add related document.
+ # add dhcp vservice document.
+ host = self.inv.get_by_id(env, host_id)
+ port_handler = EventPortAdd()
+ port_handler.add_dhcp_document(env, host, network_id,
+ network_document['name'])
+
+ # make sure that self.regions is not empty.
+ if len(ApiAccess.regions) == 0:
+ fetcher = ApiFetchRegions()
+ fetcher.set_env(env)
+ fetcher.get(None)
+
+ self.log.info("add port binding to DHCP server.")
+ port_id = DbFetchPort(). \
+ get_id_by_field(network_id,
+ """device_owner LIKE "%dhcp" """)
+ port = EventSubnetAdd(). \
+ add_port_document(env, port_id,
+ network_name=network_document['name'],
+ project_name=project)
+ if port:
+ port_handler. \
+ add_vnic_document(env, host, network_id,
+ network_name=network_document['name'],
+ mac_address=port['mac_address'])
+ # add link for vservice - vnic
+ FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+ FindLinksForVserviceVnics(). \
+ add_links(search={"id": "qdhcp-%s" % network_id})
+ scanner = Scanner()
+ scanner.set_env(env)
+ scanner.scan_cliques()
+
+ if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
+ # delete existed related DHCP documents.
+ self.inv.delete("inventory",
+ {'id': "qdhcp-%s" % subnet['network_id']})
+ self.log.info("delete DHCP document: qdhcp-%s" %
+ subnet['network_id'])
+
+ port = self.inv.find_items({'network_id': subnet['network_id'],
+ 'device_owner': 'network:dhcp'},
+ get_single=True)
+ if 'id' in port:
+ EventPortDelete().delete_port(env, port['id'])
+ self.log.info("delete port binding to DHCP server.")
+
+ if subnet['name'] == subnets[key]['name']:
+ subnets[key] = subnet
+ else:
+ # TODO: #AskCheng shouldn't we remove the old one?
+ subnets[subnet['name']] = subnet
+
+ self.inv.set(network_document)
+ return EventResult(result=True,
+ related_object=subnet['id'],
+ display_context=network_id)
diff --git a/app/discover/events/listeners/__init__.py b/app/discover/events/listeners/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/events/listeners/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/events/listeners/default_listener.py b/app/discover/events/listeners/default_listener.py
new file mode 100755
index 0000000..a135673
--- /dev/null
+++ b/app/discover/events/listeners/default_listener.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import datetime
+import json
+import os
+import time
+from collections import defaultdict
+from typing import List
+
+from kombu import Connection, Queue, Exchange
+from kombu.mixins import ConsumerMixin
+
+from discover.configuration import Configuration
+from discover.event_handler import EventHandler
+from discover.events.event_base import EventResult
+from discover.events.event_metadata_parser import parse_metadata_file
+from discover.events.listeners.listener_base import ListenerBase
+from messages.message import Message
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import OperationalStatus, EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+from utils.string_utils import stringify_datetime
+from utils.util import SignalHandler, setup_args
+
+
+class DefaultListener(ListenerBase, ConsumerMixin):
+
+ SOURCE_SYSTEM = "OpenStack"
+
+ COMMON_METADATA_FILE = "events.json"
+
+ DEFAULTS = {
+ "env": "Mirantis-Liberty",
+ "mongo_config": "",
+ "metadata_file": "",
+ "inventory": "inventory",
+ "loglevel": "INFO",
+ "environments_collection": "environments_config",
+ "retry_limit": 10,
+ "consume_all": False
+ }
+
+ def __init__(self, connection: Connection,
+ event_handler: EventHandler,
+ event_queues: List,
+ env_name: str = DEFAULTS["env"],
+ inventory_collection: str = DEFAULTS["inventory"],
+ retry_limit: int = DEFAULTS["retry_limit"],
+ consume_all: bool = DEFAULTS["consume_all"]):
+ super().__init__()
+
+ self.connection = connection
+ self.retry_limit = retry_limit
+ self.env_name = env_name
+ self.consume_all = consume_all
+ self.handler = event_handler
+ self.event_queues = event_queues
+ self.failing_messages = defaultdict(int)
+
+ self.inv = InventoryMgr()
+ self.inv.set_collections(inventory_collection)
+ if self.inv.is_feature_supported(self.env_name, EnvironmentFeatures.MONITORING):
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(self.env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ def get_consumers(self, consumer, channel):
+ return [consumer(queues=self.event_queues,
+ accept=['json'],
+ callbacks=[self.process_task])]
+
+ # Determines if message should be processed by a handler
+ # and extracts message body if yes.
+ @staticmethod
+ def _extract_event_data(body):
+ if "event_type" in body:
+ return True, body
+ elif "event_type" in body.get("oslo.message", ""):
+ return True, json.loads(body["oslo.message"])
+ else:
+ return False, None
+
+ def process_task(self, body, message):
+ received_timestamp = stringify_datetime(datetime.datetime.now())
+ processable, event_data = self._extract_event_data(body)
+ # If env listener can't process the message
+ # or it's not intended for env listener to handle,
+ # leave the message in the queue unless "consume_all" flag is set
+ if processable and event_data["event_type"] in self.handler.handlers:
+ with open("/tmp/listener.log", "a") as f:
+ f.write("{}\n".format(event_data))
+ event_result = self.handle_event(event_data["event_type"],
+ event_data)
+ finished_timestamp = stringify_datetime(datetime.datetime.now())
+ self.save_message(message_body=event_data,
+ result=event_result,
+ started=received_timestamp,
+ finished=finished_timestamp)
+
+ # Check whether the event was fully handled
+ # and, if not, whether it should be retried later
+ if event_result.result:
+ message.ack()
+ elif event_result.retry:
+ if 'message_id' not in event_data:
+ message.reject()
+ else:
+ # Track message retry count
+ message_id = event_data['message_id']
+ self.failing_messages[message_id] += 1
+
+ # Retry handling the message
+ if self.failing_messages[message_id] <= self.retry_limit:
+ self.inv.log.info("Retrying handling message " +
+ "with id '{}'".format(message_id))
+ message.requeue()
+ # Discard the message if it's not accepted
+ # after specified number of trials
+ else:
+ self.inv.log.warn("Discarding message with id '{}' ".
+ format(message_id) +
+ "as it's exceeded the retry limit")
+ message.reject()
+ del self.failing_messages[message_id]
+ else:
+ message.reject()
+ elif self.consume_all:
+ message.reject()
+
+ # This method passes the event to its handler.
+ # Returns a (result, retry) tuple:
+ # 'Result' flag is True if handler has finished successfully,
+ # False otherwise
+ # 'Retry' flag specifies if the error is recoverable or not
+ # 'Retry' flag is checked only is 'result' is False
+ def handle_event(self, event_type: str, notification: dict) -> EventResult:
+ print("Got notification.\nEvent_type: {}\nNotification:\n{}".
+ format(event_type, notification))
+ try:
+ result = self.handler.handle(event_name=event_type,
+ notification=notification)
+ return result if result else EventResult(result=False, retry=False)
+ except Exception as e:
+ self.inv.log.exception(e)
+ return EventResult(result=False, retry=False)
+
+ def save_message(self, message_body: dict, result: EventResult,
+ started: str, finished: str):
+ try:
+ message = Message(
+ msg_id=message_body.get('message_id'),
+ env=self.env_name,
+ source=self.SOURCE_SYSTEM,
+ object_id=result.related_object,
+ display_context=result.display_context,
+ level=message_body.get('priority'),
+ msg=message_body,
+ ts=message_body.get('timestamp'),
+ received_ts=started,
+ finished_ts=finished
+ )
+ self.inv.collections['messages'].insert_one(message.get())
+ return True
+ except Exception as e:
+ self.inv.log.error("Failed to save message")
+ self.inv.log.exception(e)
+ return False
+
+ @staticmethod
+ def listen(args: dict = None):
+
+ args = setup_args(args, DefaultListener.DEFAULTS, get_args)
+ if 'process_vars' not in args:
+ args['process_vars'] = {}
+
+ env_name = args["env"]
+ inventory_collection = args["inventory"]
+
+ MongoAccess.set_config_file(args["mongo_config"])
+ conf = Configuration(args["environments_collection"])
+ conf.use_env(env_name)
+
+ event_handler = EventHandler(env_name, inventory_collection)
+ event_queues = []
+
+ env_config = conf.get_env_config()
+ common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
+ 'config',
+ DefaultListener.COMMON_METADATA_FILE)
+
+ # import common metadata
+ import_metadata(event_handler, event_queues, common_metadata_file)
+
+ # import custom metadata if supplied
+ if args["metadata_file"]:
+ import_metadata(event_handler, event_queues, args["metadata_file"])
+
+ inv = InventoryMgr()
+ inv.set_collections(inventory_collection)
+ logger = FullLogger()
+ logger.set_loglevel(args["loglevel"])
+
+ amqp_config = conf.get("AMQP")
+ connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
+ .format(user=amqp_config["user"],
+ pwd=amqp_config["password"],
+ host=amqp_config["host"],
+ port=amqp_config["port"])
+
+ with Connection(connect_url) as conn:
+ try:
+ print(conn)
+ conn.connect()
+ args['process_vars']['operational'] = OperationalStatus.RUNNING
+ terminator = SignalHandler()
+ worker = \
+ DefaultListener(connection=conn,
+ event_handler=event_handler,
+ event_queues=event_queues,
+ retry_limit=args["retry_limit"],
+ consume_all=args["consume_all"],
+ inventory_collection=inventory_collection,
+ env_name=env_name)
+ worker.run()
+ if terminator.terminated:
+ args.get('process_vars', {})['operational'] = \
+ OperationalStatus.STOPPED
+ except KeyboardInterrupt:
+ print('Stopped')
+ args['process_vars']['operational'] = OperationalStatus.STOPPED
+ except Exception as e:
+ logger.log.exception(e)
+ args['process_vars']['operational'] = OperationalStatus.ERROR
+ finally:
+ # This should enable safe saving of shared variables
+ time.sleep(0.1)
+
+
+def get_args():
+ # Read listener config from command line args
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["mongo_config"],
+ help="Name of config file with MongoDB access details")
+ parser.add_argument("--metadata_file", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["metadata_file"],
+ help="Name of custom configuration metadata file")
+ def_env_collection = DefaultListener.DEFAULTS["environments_collection"]
+ parser.add_argument("-c", "--environments_collection", nargs="?", type=str,
+ default=def_env_collection,
+ help="Name of collection where selected environment " +
+ "is taken from \n(default: {})"
+ .format(def_env_collection))
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["env"],
+ help="Name of target listener environment \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["env"]))
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["inventory"],
+ help="Name of inventory collection \n"" +"
+ "(default: '{}')"
+ .format(DefaultListener.DEFAULTS["inventory"]))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=DefaultListener.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(DefaultListener.DEFAULTS["loglevel"]))
+ parser.add_argument("-r", "--retry_limit", nargs="?", type=int,
+ default=DefaultListener.DEFAULTS["retry_limit"],
+ help="Maximum number of times the OpenStack message "
+ "should be requeued before being discarded \n" +
+ "(default: {})"
+ .format(DefaultListener.DEFAULTS["retry_limit"]))
+ parser.add_argument("--consume_all", action="store_true",
+ help="If this flag is set, " +
+ "environment listener will try to consume"
+ "all messages from OpenStack event queue "
+ "and reject incompatible messages."
+ "Otherwise they'll just be ignored.",
+ default=DefaultListener.DEFAULTS["consume_all"])
+ args = parser.parse_args()
+ return args
+
+
+# Imports metadata from file,
+# updates event handler with new handlers
+# and event queues with new queues
+def import_metadata(event_handler: EventHandler,
+ event_queues: List[Queue],
+ metadata_file_path: str) -> None:
+ handlers_package, queues, event_handlers = \
+ parse_metadata_file(metadata_file_path)
+ event_handler.discover_handlers(handlers_package, event_handlers)
+ event_queues.extend([
+ Queue(q['queue'],
+ Exchange(q['exchange'], 'topic', durable=False),
+ durable=False, routing_key='#') for q in queues
+ ])
+
+
+if __name__ == '__main__':
+ DefaultListener.listen()
diff --git a/app/discover/events/listeners/listener_base.py b/app/discover/events/listeners/listener_base.py
new file mode 100644
index 0000000..7052dc9
--- /dev/null
+++ b/app/discover/events/listeners/listener_base.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+
+class ListenerBase(ABC):
+
+ @staticmethod
+ @abstractmethod
+ def listen(self):
+ pass
diff --git a/app/discover/fetch_host_object_types.py b/app/discover/fetch_host_object_types.py
new file mode 100644
index 0000000..da38af7
--- /dev/null
+++ b/app/discover/fetch_host_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchHostObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "instances_root",
+ "type": "instances_folder",
+ "text": "Instances"
+ },
+ {
+ "id": "networks_root",
+ "type": "networks_folder",
+ "text": "Networks"
+ },
+ {
+ "id": "vservices_root",
+ "type": "vservices_folder",
+ "text": "vServices"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetch_region_object_types.py b/app/discover/fetch_region_object_types.py
new file mode 100644
index 0000000..047c84c
--- /dev/null
+++ b/app/discover/fetch_region_object_types.py
@@ -0,0 +1,37 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+
+
+class FetchRegionObjectTypes(Fetcher):
+
+ def get(self, parent):
+ ret = {
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {
+ "id": "aggregates_root",
+ "type": "aggregates_folder",
+ "text": "Aggregates"
+ },
+ {
+ "id": "availability_zones_root",
+ "type": "availability_zones_folder",
+ "text": "Availability Zones"
+ },
+ {
+ "id": "network_agents_root",
+ "type": "network_agents_folder",
+ "text": "network Agents"
+ }
+ ]
+ }
+ return ret
diff --git a/app/discover/fetcher.py b/app/discover/fetcher.py
new file mode 100644
index 0000000..8d7fdbb
--- /dev/null
+++ b/app/discover/fetcher.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.configuration import Configuration
+from utils.logging.full_logger import FullLogger
+
+
+class Fetcher:
+
+ def __init__(self):
+ super().__init__()
+ self.env = None
+ self.log = FullLogger()
+ self.configuration = None
+
+ @staticmethod
+ def escape(string):
+ return string
+
+ def set_env(self, env):
+ self.env = env
+ self.log.set_env(env)
+ self.configuration = Configuration()
+
+ def get_env(self):
+ return self.env
+
+ def get(self, object_id):
+ return None
diff --git a/app/discover/fetcher_new.py b/app/discover/fetcher_new.py
new file mode 100644
index 0000000..f545554
--- /dev/null
+++ b/app/discover/fetcher_new.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+##old stuff
+class FetchHostObjectTypes(Fetcher):
+
+
+ def get(self, parent):
+ ret = {
+ "type": "host_object_type",
+ "id": "",
+ "parent": parent,
+ "rows": [
+ {"id": "instances_root", "text": "Instances", "descendants": 1},
+ {"id": "networks_root", "text": "Networks", "descendants": 1},
+ {"id": "pnics_root", "text": "pNICs", "descendants": 1},
+ {"id": "vservices_root", "text": "vServices", "descendants": 1}
+ ]
+ }
+ return ret
+
+ ## old/moved
+
diff --git a/app/discover/fetchers/__init__.py b/app/discover/fetchers/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/__init__.py b/app/discover/fetchers/aci/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/aci/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/aci/aci_access.py b/app/discover/fetchers/aci/aci_access.py
new file mode 100644
index 0000000..836e45d
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_access.py
@@ -0,0 +1,200 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import requests
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+
+
+def aci_config_required(default=None):
+ def decorator(func):
+ def wrapper(self, *args, **kwargs):
+ if not self.aci_enabled:
+ return default
+ return func(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+class AciAccess(Fetcher):
+
+ RESPONSE_FORMAT = "json"
+ cookie_token = None
+
+ def __init__(self):
+ super().__init__()
+ self.configuration = Configuration()
+ self.aci_enabled = self.configuration.get_env_config() \
+ .get('aci_enabled', False)
+ self.aci_configuration = None
+ self.host = None
+ if self.aci_enabled:
+ self.aci_configuration = self.configuration.get("ACI")
+ self.host = self.aci_configuration["host"]
+
+ def get_base_url(self):
+ return "https://{}/api".format(self.host)
+
+ # Unwrap ACI response payload
+ # and return an array of desired fields' values.
+ #
+ # Parameters
+ # ----------
+ #
+ # payload: dict
+ # Full json response payload returned by ACI
+ # *field_names: Tuple[str]
+ # Enumeration of fields that are used to traverse ACI "imdata" array
+ # (order is important)
+ #
+ # Returns
+ # ----------
+ # list
+ # List of unwrapped dictionaries (or primitives)
+ #
+ # Example
+ # ----------
+ # Given payload:
+ #
+ # {
+ # "totalCount": "2",
+ # "imdata": [
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value1"
+ # }
+ # }
+ # },
+ # {
+ # "aaa": {
+ # "bbb": {
+ # "ccc": "value2"
+ # }
+ # }
+ # }
+ # ]
+ # }
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb")
+ # will yield the following result:
+ #
+ # >>> [{"ccc": "value1"}, {"ccc": "value2"}]
+ #
+ # Executing get_objects_by_field_names(payload, "aaa", "bbb", "ccc")
+ # will yield the following result:
+ #
+ # >>> ["value1", "value2"]
+ #
+ @staticmethod
+ def get_objects_by_field_names(payload, *field_names):
+ results = payload.get("imdata", [])
+ if not results:
+ return []
+
+ for field in field_names:
+ results = [entry[field] for entry in results]
+ return results
+
+ # Set auth tokens in request headers and cookies
+ @staticmethod
+ def _insert_token_into_request(cookies):
+ return dict(cookies, **AciAccess.cookie_token) \
+ if cookies \
+ else AciAccess.cookie_token
+
+ @staticmethod
+ def _set_token(response):
+ tokens = AciAccess.get_objects_by_field_names(response.json(), "aaaLogin", "attributes", "token")
+ token = tokens[0]
+
+ AciAccess.cookie_token = {"APIC-Cookie": token}
+
+ @aci_config_required()
+ def login(self):
+ url = "/".join((self.get_base_url(), "aaaLogin.json"))
+ payload = {
+ "aaaUser": {
+ "attributes": {
+ "name": self.aci_configuration["user"],
+ "pwd": self.aci_configuration["pwd"]
+ }
+ }
+ }
+
+ response = requests.post(url, json=payload, verify=False)
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ # Refresh token or login if token has expired
+ @aci_config_required()
+ def refresh_token(self):
+ # First time login
+ if not AciAccess.cookie_token:
+ self.login()
+ return
+
+ url = "/".join((self.get_base_url(), "aaaRefresh.json"))
+
+ response = requests.get(url, verify=False)
+
+ # Login again if the token has expired
+ if response.status_code == requests.codes.forbidden:
+ self.login()
+ return
+ # Propagate any other error
+ elif response.status_code != requests.codes.ok:
+ response.raise_for_status()
+
+ AciAccess._set_token(response)
+
+ @aci_config_required(default={})
+ def send_get(self, url, params, headers, cookies):
+ self.refresh_token()
+
+ cookies = self._insert_token_into_request(cookies)
+
+ response = requests.get(url, params=params, headers=headers,
+ cookies=cookies, verify=False)
+ # Let client handle HTTP errors
+ response.raise_for_status()
+
+ return response.json()
+
+ # Search ACI for Managed Objects (MOs) of a specific class
+ @aci_config_required(default=[])
+ def fetch_objects_by_class(self,
+ class_name: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(),
+ "class", "{cn}.{f}".format(cn=class_name, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return self.get_objects_by_field_names(response_json, class_name)
+
+ # Fetch data for a specific Managed Object (MO)
+ @aci_config_required(default=[])
+ def fetch_mo_data(self,
+ dn: str,
+ params: dict = None,
+ headers: dict = None,
+ cookies: dict = None,
+ response_format: str = RESPONSE_FORMAT):
+ url = "/".join((self.get_base_url(), "mo", "topology",
+ "{dn}.{f}".format(dn=dn, f=response_format)))
+
+ response_json = self.send_get(url, params, headers, cookies)
+ return response_json
diff --git a/app/discover/fetchers/aci/aci_fetch_switch_pnic.py b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
new file mode 100644
index 0000000..a4216ea
--- /dev/null
+++ b/app/discover/fetchers/aci/aci_fetch_switch_pnic.py
@@ -0,0 +1,91 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.aci.aci_access import AciAccess, aci_config_required
+from utils.inventory_mgr import InventoryMgr
+from utils.util import encode_aci_dn, get_object_path_part
+
+
+class AciFetchSwitchPnic(AciAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def fetch_pnics_by_mac_address(self, mac_address):
+ mac_filter = "eq(epmMacEp.addr,\"{}\")".format(mac_address)
+ pnic_filter = "wcard(epmMacEp.ifId, \"eth\")"
+ query_filter = "and({},{})".format(mac_filter, pnic_filter)
+
+ pnics = self.fetch_objects_by_class("epmMacEp",
+ {"query-target-filter": query_filter})
+
+ return [pnic["attributes"] for pnic in pnics]
+
+ def fetch_switch_by_id(self, switch_id):
+ dn = "/".join((switch_id, "sys"))
+ response = self.fetch_mo_data(dn)
+ switch_data = self.get_objects_by_field_names(response, "topSystem", "attributes")
+ return switch_data[0] if switch_data else None
+
+ @aci_config_required(default=[])
+ def get(self, pnic_id):
+ environment = self.get_env()
+ pnic = self.inv.get_by_id(environment=environment, item_id=pnic_id)
+ if not pnic:
+ return []
+ mac_address = pnic.get("mac_address")
+ if not mac_address:
+ return []
+
+ switch_pnics = self.fetch_pnics_by_mac_address(mac_address)
+ if not switch_pnics:
+ return []
+ switch_pnic = switch_pnics[0]
+
+ # Prepare and save switch data in inventory
+ aci_id_match = re.match("topology/(.+)/sys", switch_pnic["dn"])
+ if not aci_id_match:
+ raise ValueError("Failed to fetch switch id from pnic dn: {}"
+ .format(switch_pnic["dn"]))
+
+ aci_switch_id = aci_id_match.group(1)
+ db_switch_id = encode_aci_dn(aci_switch_id)
+ if not self.inv.get_by_id(environment, db_switch_id):
+ switch_data = self.fetch_switch_by_id(aci_switch_id)
+ if not switch_data:
+ self.log.warning("No switch found for switch pnic dn: {}"
+ .format(switch_pnic["dn"]))
+ return []
+
+ switch_json = {
+ "id": db_switch_id,
+ "ip_address": switch_data["address"],
+ "type": "switch",
+ "aci_document": switch_data
+ }
+ # Region name is the same as region id
+ region_id = get_object_path_part(pnic["name_path"], "Regions")
+ region = self.inv.get_by_id(environment, region_id)
+ self.inv.save_inventory_object(o=switch_json, parent=region, environment=environment)
+
+ db_pnic_id = "-".join((db_switch_id,
+ encode_aci_dn(switch_pnic["ifId"]),
+ mac_address))
+ pnic_json = {
+ "id": db_pnic_id,
+ "type": "pnic",
+ "pnic_type": "switch",
+ "mac_address": mac_address,
+ "aci_document": switch_pnic
+ }
+ return [pnic_json]
+
diff --git a/app/discover/fetchers/api/__init__.py b/app/discover/fetchers/api/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/api/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/api/api_access.py b/app/discover/fetchers/api/api_access.py
new file mode 100644
index 0000000..89eeb34
--- /dev/null
+++ b/app/discover/fetchers/api/api_access.py
@@ -0,0 +1,195 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import calendar
+import re
+import requests
+import time
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class ApiAccess(Fetcher):
+ subject_token = None
+ initialized = False
+ regions = {}
+ config = None
+ api_config = None
+
+ host = ""
+ base_url = ""
+ admin_token = ""
+ tokens = {}
+ admin_endpoint = ""
+ admin_project = None
+ auth_response = None
+
+ alternative_services = {
+ "neutron": ["quantum"]
+ }
+
+ # identitity API v2 version with admin token
+ def __init__(self):
+ super(ApiAccess, self).__init__()
+ if ApiAccess.initialized:
+ return
+ ApiAccess.config = Configuration()
+ ApiAccess.api_config = ApiAccess.config.get("OpenStack")
+ host = ApiAccess.api_config["host"]
+ ApiAccess.host = host
+ port = ApiAccess.api_config["port"]
+ if not (host and port):
+ raise ValueError('Missing definition of host or port ' +
+ 'for OpenStack API access')
+ ApiAccess.base_url = "http://" + host + ":" + port
+ ApiAccess.admin_token = ApiAccess.api_config["admin_token"]
+ ApiAccess.admin_project = ApiAccess.api_config["admin_project"] \
+ if "admin_project" in ApiAccess.api_config \
+ else 'admin'
+ ApiAccess.admin_endpoint = "http://" + host + ":" + "35357"
+
+ token = self.v2_auth_pwd(ApiAccess.admin_project)
+ if not token:
+ raise ValueError("Authentication failed. Failed to obtain token")
+ else:
+ self.subject_token = token
+
+ @staticmethod
+ def parse_time(time_str):
+ try:
+ time_struct = time.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
+ except ValueError:
+ try:
+ time_struct = time.strptime(time_str,
+ "%Y-%m-%dT%H:%M:%S.%fZ")
+ except ValueError:
+ return None
+ return time_struct
+
+ # try to use existing token, if it did not expire
+ def get_existing_token(self, project_id):
+ try:
+ token_details = ApiAccess.tokens[project_id]
+ except KeyError:
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = token_details["token_expiry_time"]
+ now = time.time()
+ if now > token_expiry_time:
+ # token has expired
+ ApiAccess.tokens.pop(project_id)
+ return None
+ return token_details
+
+ def v2_auth(self, project_id, headers, post_body):
+ subject_token = self.get_existing_token(project_id)
+ if subject_token:
+ return subject_token
+ req_url = ApiAccess.base_url + "/v2.0/tokens"
+ response = requests.post(req_url, json=post_body, headers=headers)
+ ApiAccess.auth_response = response.json()
+ if 'error' in self.auth_response:
+ e = self.auth_response['error']
+ self.log.error(str(e['code']) + ' ' + e['title'] + ': ' +
+ e['message'] + ", URL: " + req_url)
+ return None
+ try:
+ token_details = ApiAccess.auth_response["access"]["token"]
+ except KeyError:
+ # assume authentication failed
+ return None
+ token_expiry = token_details["expires"]
+ token_expiry_time_struct = self.parse_time(token_expiry)
+ if not token_expiry_time_struct:
+ return None
+ token_expiry_time = calendar.timegm(token_expiry_time_struct)
+ token_details["token_expiry_time"] = token_expiry_time
+ ApiAccess.tokens[project_id] = token_details
+ return token_details
+
+ def v2_auth_pwd(self, project):
+ user = ApiAccess.api_config["user"]
+ pwd = ApiAccess.api_config["pwd"]
+ post_body = {
+ "auth": {
+ "passwordCredentials": {
+ "username": user,
+ "password": pwd
+ }
+ }
+ }
+ if project is not None:
+ post_body["auth"]["tenantName"] = project
+ project_id = project
+ else:
+ project_id = ""
+ headers = {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json; charset=UTF-8'
+ }
+ return self.v2_auth(project_id, headers, post_body)
+
+ def get_rel_url(self, relative_url, headers):
+ req_url = ApiAccess.base_url + relative_url
+ return self.get_url(req_url, headers)
+
+ def get_url(self, req_url, headers):
+ response = requests.get(req_url, headers=headers)
+ if response.status_code != requests.codes.ok:
+ # some error happened
+ if "reason" in response:
+ msg = ", reason: {}".format(response.reason)
+ else:
+ msg = ", response: {}".format(response.text)
+ self.log.error("req_url: {} {}".format(req_url, msg))
+ return response
+ ret = response.json()
+ return ret
+
+ def get_region_url(self, region_name, service):
+ if region_name not in self.regions:
+ return None
+ region = self.regions[region_name]
+ s = self.get_service_region_endpoints(region, service)
+ if not s:
+ return None
+ orig_url = s["adminURL"]
+ # replace host name with the host found in config
+ url = re.sub(r"^([^/]+)//[^:]+", r"\1//" + ApiAccess.host, orig_url)
+ return url
+
+ # like get_region_url(), but remove everything starting from the "/v2"
+ def get_region_url_nover(self, region, service):
+ full_url = self.get_region_url(region, service)
+ if not full_url:
+ self.log.error("could not find region URL for region: " + region)
+ exit()
+ url = re.sub(r":([0-9]+)/v[2-9].*", r":\1", full_url)
+ return url
+
+ def get_catalog(self, pretty):
+ return jsonify(self.regions, pretty)
+
+ # find the endpoints for a given service name,
+ # considering also alternative service names
+ def get_service_region_endpoints(self, region, service):
+ alternatives = [service]
+ endpoints = region["endpoints"]
+ if service in self.alternative_services:
+ alternatives.extend(self.alternative_services[service])
+ for sname in alternatives:
+ if sname in endpoints:
+ return endpoints[sname]
+ return None
+
diff --git a/app/discover/fetchers/api/api_fetch_availability_zones.py b/app/discover/fetchers/api/api_fetch_availability_zones.py
new file mode 100644
index 0000000..196893b
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_availability_zones.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchAvailabilityZones(ApiAccess):
+ def __init__(self):
+ super(ApiFetchAvailabilityZones, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(project_id)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(project_id, region, token))
+ return ret
+
+ def get_for_region(self, project, region, token):
+ # we use os-availability-zone/detail rather than os-availability-zone,
+ # because the later does not inclde the "internal" zone in the results
+ endpoint = self.get_region_url_nover(region, "nova")
+ req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
+ "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ ret = []
+ if "availabilityZoneInfo" not in response:
+ return []
+ azs = response["availabilityZoneInfo"]
+ if not azs:
+ return []
+ for doc in azs:
+ doc["id"] = doc["zoneName"]
+ doc["name"] = doc.pop("zoneName")
+ doc["master_parent_type"] = "region"
+ doc["master_parent_id"] = region
+ doc["parent_type"] = "availability_zones_folder"
+ doc["parent_id"] = region + "-availability_zones"
+ doc["parent_text"] = "Availability Zones"
+ doc["available"] = doc["zoneState"]["available"]
+ doc.pop("zoneState")
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_end_points.py b/app/discover/fetchers/api/api_fetch_end_points.py
new file mode 100644
index 0000000..9471c7e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_end_points.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# fetch the end points for a given project (tenant)
+# return list of regions, to allow further recursive scanning
+
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchEndPoints(ApiAccess):
+
+ def get(self, project_id):
+ if project_id != "admin":
+ return [] # XXX currently having problems authenticating to other tenants
+ self.v2_auth_pwd(project_id)
+
+ environment = ApiAccess.config.get_env_name()
+ regions = []
+ services = ApiAccess.auth_response['access']['serviceCatalog']
+ endpoints = []
+ for s in services:
+ if s["type"] != "identity":
+ continue
+ e = s["endpoints"][0]
+ e["environment"] = environment
+ e["project"] = project_id
+ e["type"] = "endpoint"
+ endpoints.append(e)
+ return endpoints
diff --git a/app/discover/fetchers/api/api_fetch_host_instances.py b/app/discover/fetchers/api/api_fetch_host_instances.py
new file mode 100644
index 0000000..56cffda
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_host_instances.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ApiFetchHostInstances(ApiAccess, DbAccess, metaclass=Singleton):
+ def __init__(self):
+ super(ApiFetchHostInstances, self).__init__()
+ self.inv = InventoryMgr()
+ self.endpoint = ApiAccess.base_url.replace(":5000", ":8774")
+ self.projects = None
+ self.db_fetcher = DbFetchInstances()
+
+ def get_projects(self):
+ if not self.projects:
+ projects_list = self.inv.get(self.get_env(), "project", None)
+ self.projects = [p["name"] for p in projects_list]
+
+ def get(self, id):
+ self.get_projects()
+ host_id = id[:id.rindex("-")]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host or "Compute" not in host.get("host_type", ""):
+ return []
+ instances_found = self.get_instances_from_api(host_id)
+ self.db_fetcher.get_instance_data(instances_found)
+ return instances_found
+
+ def get_instances_from_api(self, host_name):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ tenant_id = token["tenant"]["id"]
+ req_url = self.endpoint + "/v2/" + tenant_id + \
+ "/os-hypervisors/" + host_name + "/servers"
+ response = self.get_url(req_url, {"X-Auth-Token": token["id"]})
+ ret = []
+ if not "hypervisors" in response:
+ return []
+ if not "servers" in response["hypervisors"][0]:
+ return []
+ for doc in response["hypervisors"][0]["servers"]:
+ doc["id"] = doc["uuid"]
+ doc["host"] = host_name
+ doc["local_name"] = doc.pop("name")
+ ret.append(doc)
+ self.log.info("found %s instances for host: %s", str(len(ret)), host_name)
+ return ret
diff --git a/app/discover/fetchers/api/api_fetch_network.py b/app/discover/fetchers/api/api_fetch_network.py
new file mode 100644
index 0000000..889b8a5
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_network.py
@@ -0,0 +1,76 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetwork(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetwork, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+ ret.extend(self.get_for_region(region, token, project_id))
+ return ret
+
+ def get_network(self, region, token, subnet_id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+
+ # get target network network document
+ req_url = endpoint + "/v2.0/networks/" + subnet_id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "network" in response:
+ return []
+ network = response["network"]
+ subnets = network['subnets']
+
+ # get subnets documents.
+ subnets_hash = {}
+ cidrs = []
+ subnet_ids = []
+ for subnet_id in subnets:
+ req_url = endpoint + "/v2.0/subnets/" + subnet_id
+ response = self.get_url(req_url, headers)
+ if "subnet" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnet = response["subnet"]
+ subnets_hash[subnet["name"]] = subnet
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+
+ network["subnets"] = subnets_hash
+ network["cidrs"] = cidrs
+ network["subnet_ids"] = subnet_ids
+
+ network["master_parent_type"] = "project"
+ network["master_parent_id"] = network["tenant_id"]
+ network["parent_type"] = "networks_folder"
+ network["parent_id"] = network["tenant_id"] + "-networks"
+ network["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ network['network'] = network["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
+ if project:
+ network["project"] = project["name"]
+
+ return network
diff --git a/app/discover/fetchers/api/api_fetch_networks.py b/app/discover/fetchers/api/api_fetch_networks.py
new file mode 100644
index 0000000..4b70f65
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_networks.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchNetworks(ApiAccess):
+ def __init__(self):
+ super(ApiFetchNetworks, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id=None):
+ # use project admin credentials, to be able to fetch all networks
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_networks(region, token))
+ return ret
+
+ def get_networks(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/networks"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "networks" in response:
+ return []
+ networks = response["networks"]
+ req_url = endpoint + "/v2.0/subnets"
+ response = self.get_url(req_url, headers)
+ subnets_hash = {}
+ if "subnets" in response:
+ # create a hash subnets, to allow easy locating of subnets
+ subnets = response["subnets"]
+ for s in subnets:
+ subnets_hash[s["id"]] = s
+ for doc in networks:
+ doc["master_parent_type"] = "project"
+ project_id = doc["tenant_id"]
+ if not project_id:
+ # find project ID of admin project
+ project = self.inv.get_by_field(self.get_env(),
+ "project", "name",
+ self.admin_project,
+ get_single=True)
+ if not project:
+ self.log.error("failed to find admin project in DB")
+ project_id = project["id"]
+ doc["master_parent_id"] = project_id
+ doc["parent_type"] = "networks_folder"
+ doc["parent_id"] = project_id + "-networks"
+ doc["parent_text"] = "Networks"
+ # set the 'network' attribute for network objects to the name of network,
+ # to allow setting constraint on network when creating network clique
+ doc['network'] = doc["id"]
+ # get the project name
+ project = self.inv.get_by_id(self.get_env(), project_id)
+ if project:
+ doc["project"] = project["name"]
+ subnets_details = {}
+ cidrs = []
+ subnet_ids = []
+ for s in doc["subnets"]:
+ try:
+ subnet = subnets_hash[s]
+ cidrs.append(subnet["cidr"])
+ subnet_ids.append(subnet["id"])
+ subnets_details[subnet["name"]] = subnet
+ except KeyError:
+ pass
+
+ doc["subnets"] = subnets_details
+ doc["cidrs"] = cidrs
+ doc["subnet_ids"] = subnet_ids
+ return networks
diff --git a/app/discover/fetchers/api/api_fetch_port.py b/app/discover/fetchers/api/api_fetch_port.py
new file mode 100644
index 0000000..f8d9eeb
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_port.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPort(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPort, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ if not project_id:
+ self.log.info("Get method needs ID parameter")
+ return []
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.append(self.get_port(region, token, project_id))
+ if ret == []:
+ self.log.info("ApiFetchPort: Port not found.")
+ return ret
+
+ def get_port(self, region, token, id):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports/" + id
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "port" in response:
+ return []
+
+ doc = response["port"]
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return doc
diff --git a/app/discover/fetchers/api/api_fetch_ports.py b/app/discover/fetchers/api/api_fetch_ports.py
new file mode 100644
index 0000000..f4c54a6
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_ports.py
@@ -0,0 +1,55 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class ApiFetchPorts(ApiAccess):
+ def __init__(self):
+ super(ApiFetchPorts, self).__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, project_id):
+ # use project admin credentials, to be able to fetch all ports
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_ports_for_region(region, token))
+ return ret
+
+ def get_ports_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "neutron")
+ req_url = endpoint + "/v2.0/ports"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not "ports" in response:
+ return []
+ ports = response["ports"]
+ for doc in ports:
+ doc["master_parent_type"] = "network"
+ doc["master_parent_id"] = doc["network_id"]
+ doc["parent_type"] = "ports_folder"
+ doc["parent_id"] = doc["network_id"] + "-ports"
+ doc["parent_text"] = "Ports"
+ # get the project name
+ net = self.inv.get_by_id(self.get_env(), doc["network_id"])
+ if net:
+ doc["name"] = doc["mac_address"]
+ else:
+ doc["name"] = doc["id"]
+ project = self.inv.get_by_id(self.get_env(), doc["tenant_id"])
+ if project:
+ doc["project"] = project["name"]
+ return ports
diff --git a/app/discover/fetchers/api/api_fetch_project_hosts.py b/app/discover/fetchers/api/api_fetch_project_hosts.py
new file mode 100644
index 0000000..7dc262e
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_project_hosts.py
@@ -0,0 +1,144 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.api.api_access import ApiAccess
+from discover.fetchers.db.db_access import DbAccess
+
+
+class ApiFetchProjectHosts(ApiAccess, DbAccess):
+ def __init__(self):
+ super(ApiFetchProjectHosts, self).__init__()
+
+ def get(self, project_id):
+ if project_id != self.admin_project:
+ # do not scan hosts except under project 'admin'
+ return []
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ return ret
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url(region, "nova")
+ ret = []
+ if not token:
+ return []
+ req_url = endpoint + "/os-availability-zone/detail"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return []
+ az_info = response["availabilityZoneInfo"]
+ hosts = {}
+ for doc in az_info:
+ az_hosts = self.get_hosts_from_az(doc)
+ for h in az_hosts:
+ if h["name"] in hosts:
+ # merge host_type data between AZs
+ existing_entry = hosts[h["name"]]
+ for t in h["host_type"]:
+ self.add_host_type(existing_entry, t, doc['zoneName'])
+ else:
+ hosts[h["name"]] = h
+ ret.append(h)
+ # get os_id for hosts using the os-hypervisors API call
+ req_url = endpoint + "/os-hypervisors"
+ response = self.get_url(req_url, headers)
+ if "status" in response and int(response["status"]) != 200:
+ return ret
+ if "hypervisors" not in response:
+ return ret
+ for h in response["hypervisors"]:
+ hvname = h["hypervisor_hostname"]
+ if '.' in hvname and hvname not in hosts:
+ hostname = hvname[:hvname.index('.')]
+ else:
+ hostname = hvname
+ try:
+ doc = hosts[hostname]
+ except KeyError:
+ # TBD - add error output
+ continue
+ doc["os_id"] = str(h["id"])
+ self.fetch_compute_node_ip_address(doc, hvname)
+ # get more network nodes details
+ self.fetch_network_node_details(ret)
+ return ret
+
+ def get_hosts_from_az(self, az):
+ ret = []
+ for h in az["hosts"]:
+ doc = self.get_host_details(az, h)
+ ret.append(doc)
+ return ret
+
+ def get_host_details(self, az, h):
+ # for hosts we use the name
+ services = az["hosts"][h]
+ doc = {
+ "id": h,
+ "host": h,
+ "name": h,
+ "zone": az["zoneName"],
+ "parent_type": "availability_zone",
+ "parent_id": az["zoneName"],
+ "services": services,
+ "host_type": []
+ }
+ if "nova-conductor" in services:
+ s = services["nova-conductor"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Controller", az['zoneName'])
+ if "nova-compute" in services:
+ s = services["nova-compute"]
+ if s["available"] and s["active"]:
+ self.add_host_type(doc, "Compute", az['zoneName'])
+ return doc
+
+ # fetch more details of network nodes from neutron.agents table
+ def fetch_network_node_details(self, docs):
+ hosts = {}
+ for doc in docs:
+ hosts[doc["host"]] = doc
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM {}.agents
+ WHERE agent_type IN ('Metadata agent', 'DHCP agent', 'L3 agent')
+ """.format(self.neutron_db)
+ results = self.get_objects_list(query, "")
+ for r in results:
+ host = hosts[r["host"]]
+ host["config"] = json.loads(r["configurations"])
+ self.add_host_type(host, "Network", '')
+
+ # fetch ip_address from nova.compute_nodes table if possible
+ def fetch_compute_node_ip_address(self, doc, h):
+ query = """
+ SELECT host_ip AS ip_address
+ FROM nova.compute_nodes
+ WHERE hypervisor_hostname = %s
+ """
+ results = self.get_objects_list_for_id(query, "", h)
+ for db_row in results:
+ doc.update(db_row)
+
+ def add_host_type(self, doc, type, zone):
+ if not type in doc["host_type"]:
+ doc["host_type"].append(type)
+ if type == 'Compute':
+ doc['zone'] = zone
+ doc['parent_id'] = zone
diff --git a/app/discover/fetchers/api/api_fetch_projects.py b/app/discover/fetchers/api/api_fetch_projects.py
new file mode 100644
index 0000000..4ef8083
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_projects.py
@@ -0,0 +1,66 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchProjects(ApiAccess):
+ def __init__(self):
+ super(ApiFetchProjects, self).__init__()
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ if not self.regions:
+ self.log.error('No regions found')
+ return []
+ ret = []
+ for region in self.regions:
+ ret.extend(self.get_for_region(region, token))
+ projects_for_user = self.get_projects_for_api_user(region, token)
+ return [p for p in ret if p['name'] in projects_for_user] \
+ if projects_for_user else ret
+
+ def get_projects_for_api_user(self, region, token):
+ if not token:
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ endpoint = self.get_region_url_nover(region, "keystone")
+ headers = {
+ 'X-Auth-Project-Id': self.admin_project,
+ 'X-Auth-Token': token['id']
+ }
+ # get the list of projects accessible by the admin user
+ req_url = endpoint + '/v3/projects'
+ response = self.get_url(req_url, headers)
+ if not response or 'projects' not in response:
+ return None
+ response = [p['name'] for p in response['projects']]
+ return response
+
+ def get_for_region(self, region, token):
+ endpoint = self.get_region_url_nover(region, "keystone")
+ req_url = endpoint + "/v2.0/tenants"
+ headers = {
+ "X-Auth-Project-Id": self.admin_project,
+ "X-Auth-Token": token["id"]
+ }
+ response = self.get_url(req_url, headers)
+ if not isinstance(response, dict):
+ self.log.error('invalid response to /tenants request: not dict')
+ return []
+ tenants_list = response.get("tenants", [])
+ if not isinstance(tenants_list, list):
+ self.log.error('invalid response to /tenants request: '
+ 'tenants value is n ot a list')
+ return []
+ response = [t for t in tenants_list if t.get("name", "") != "services"]
+ return response
diff --git a/app/discover/fetchers/api/api_fetch_regions.py b/app/discover/fetchers/api/api_fetch_regions.py
new file mode 100644
index 0000000..dcc558f
--- /dev/null
+++ b/app/discover/fetchers/api/api_fetch_regions.py
@@ -0,0 +1,51 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.api.api_access import ApiAccess
+
+
+class ApiFetchRegions(ApiAccess):
+ def __init__(self):
+ super(ApiFetchRegions, self).__init__()
+ self.endpoint = ApiAccess.base_url
+
+ def get(self, project_id):
+ token = self.v2_auth_pwd(self.admin_project)
+ if not token:
+ return []
+ # the returned authentication response contains the list of end points
+ # and regions
+ service_catalog = ApiAccess.auth_response.get('access', {}).get('serviceCatalog')
+ if not service_catalog:
+ return []
+ env = self.get_env()
+ ret = []
+ NULL_REGION = "No-Region"
+ for service in service_catalog:
+ for e in service["endpoints"]:
+ if "region" in e:
+ region_name = e.pop("region")
+ region_name = region_name if region_name else NULL_REGION
+ else:
+ region_name = NULL_REGION
+ if region_name in self.regions.keys():
+ region = self.regions[region_name]
+ else:
+ region = {
+ "id": region_name,
+ "name": region_name,
+ "endpoints": {}
+ }
+ ApiAccess.regions[region_name] = region
+ region["parent_type"] = "regions_folder"
+ region["parent_id"] = env + "-regions"
+ e["service_type"] = service["type"]
+ region["endpoints"][service["name"]] = e
+ ret.extend(list(ApiAccess.regions.values()))
+ return ret
diff --git a/app/discover/fetchers/cli/__init__.py b/app/discover/fetchers/cli/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/cli/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/cli/cli_access.py b/app/discover/fetchers/cli/cli_access.py
new file mode 100644
index 0000000..1db84ea
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_access.py
@@ -0,0 +1,206 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+import time
+
+from discover.fetcher import Fetcher
+from utils.binary_converter import BinaryConverter
+from utils.logging.console_logger import ConsoleLogger
+from utils.ssh_conn import SshConn
+
+
+class CliAccess(BinaryConverter, Fetcher):
+ connections = {}
+ ssh_cmd = "ssh -o StrictHostKeyChecking=no "
+ call_count_per_con = {}
+ max_call_count_per_con = 100
+ cache_lifetime = 60 # no. of seconds to cache results
+ cached_commands = {}
+
+ def __init__(self):
+ super().__init__()
+ self.log = ConsoleLogger()
+
+ @staticmethod
+ def is_gateway_host(ssh_to_host):
+ ssh_conn = SshConn(ssh_to_host)
+ return ssh_conn.is_gateway_host(ssh_to_host)
+
+ def run_on_gateway(self, cmd, ssh_to_host="", enable_cache=True,
+ use_sudo=True):
+ self.run(cmd, ssh_to_host=ssh_to_host, enable_cache=enable_cache,
+ on_gateway=True, use_sudo=use_sudo)
+
+ def run(self, cmd, ssh_to_host="", enable_cache=True, on_gateway=False,
+ ssh=None, use_sudo=True):
+ ssh_conn = ssh if ssh else SshConn(ssh_to_host)
+ if use_sudo and not cmd.strip().startswith("sudo "):
+ cmd = "sudo " + cmd
+ if not on_gateway and ssh_to_host \
+ and not ssh_conn.is_gateway_host(ssh_to_host):
+ cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+ curr_time = time.time()
+ cmd_path = ssh_to_host + ',' + cmd
+ if enable_cache and cmd_path in self.cached_commands:
+ # try to re-use output from last call
+ cached = self.cached_commands[cmd_path]
+ if cached["timestamp"] + self.cache_lifetime < curr_time:
+ # result expired
+ self.cached_commands.pop(cmd_path, None)
+ else:
+ # result is good to use - skip the SSH call
+ self.log.info('CliAccess: ****** using cached result, ' +
+ 'host: ' + ssh_to_host + ', cmd: %s ******', cmd)
+ return cached["result"]
+
+ self.log.info('CliAccess: host: %s, cmd: %s', ssh_to_host, cmd)
+ ret = ssh_conn.exec(cmd)
+ self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
+ return ret
+
+ def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
+ out = self.run(cmd, ssh_to_host, enable_cache)
+ if not out:
+ return []
+ # first try to split lines by whitespace
+ ret = out.splitlines()
+ # if split by whitespace did not work, try splitting by "\\n"
+ if len(ret) == 1:
+ ret = [l for l in out.split("\\n") if l != ""]
+ return ret
+
+ # parse command output columns separated by whitespace
+ # since headers can contain whitespace themselves,
+ # it is the caller's responsibility to provide the headers
+ def parse_cmd_result_with_whitespace(self, lines, headers, remove_first):
+ if remove_first:
+ # remove headers line
+ del lines[:1]
+ results = [self.parse_line_with_ws(line, headers)
+ for line in lines]
+ return results
+
+ # parse command output with "|" column separators and "-" row separators
+ def parse_cmd_result_with_separators(self, lines):
+ headers = self.parse_headers_line_with_separators(lines[1])
+ # remove line with headers and formatting lines above it and below it
+ del lines[:3]
+ # remove formatting line in the end
+ lines.pop()
+ results = [self.parse_content_line_with_separators(line, headers)
+ for line in lines]
+ return results
+
+ # parse a line with columns separated by whitespace
+ def parse_line_with_ws(self, line, headers):
+ s = line if isinstance(line, str) else self.binary2str(line)
+ parts = [word.strip() for word in s.split() if word.strip()]
+ ret = {}
+ for i, p in enumerate(parts):
+ header = headers[i]
+ ret[header] = p
+ return ret
+
+ # parse a line with "|" column separators
+ def parse_line_with_separators(self, line):
+ s = self.binary2str(line)
+ parts = [word.strip() for word in s.split("|") if word.strip()]
+ # remove the ID field
+ del parts[:1]
+ return parts
+
+ def parse_headers_line_with_separators(self, line):
+ return self.parse_line_with_separators(line)
+
+ def parse_content_line_with_separators(self, line, headers):
+ content_parts = self.parse_line_with_separators(line)
+ content = {}
+ for i in range(0, len(content_parts)):
+ content[headers[i]] = content_parts[i]
+ return content
+
+ def merge_ws_spillover_lines(self, lines):
+ # with WS-separated output, extra output sometimes spills to next line
+ # detect that and add to the end of the previous line for our procesing
+ pending_line = None
+ fixed_lines = []
+ # remove headers line
+ for l in lines:
+ if l[0] == '\t':
+ # this is a spill-over line
+ if pending_line:
+ # add this line to the end of the previous line
+ pending_line = pending_line.strip() + "," + l.strip()
+ else:
+ # add the previous pending line to the fixed lines list
+ if pending_line:
+ fixed_lines.append(pending_line)
+ # make current line the pending line
+ pending_line = l
+ if pending_line:
+ fixed_lines.append(pending_line)
+ return fixed_lines
+
+ """
+ given output lines from CLI command like 'ip -d link show',
+ find lines belonging to section describing a specific interface
+ parameters:
+ - lines: list of strings, output of command
+ - header_regexp: regexp marking the start of the section
+ - end_regexp: regexp marking the end of the section
+ """
+ def get_section_lines(self, lines, header_regexp, end_regexp):
+ if not lines:
+ return []
+ header_re = re.compile(header_regexp)
+ start_pos = None
+ # find start_pos of section
+ line_count = len(lines)
+ for line_num in range(0, line_count-1):
+ matches = header_re.match(lines[line_num])
+ if matches:
+ start_pos = line_num
+ break
+ if not start_pos:
+ return []
+ # find end of section
+ end_pos = line_count
+ end_re = re.compile(end_regexp)
+ for line_num in range(start_pos+1, end_pos-1):
+ matches = end_re.match(lines[line_num])
+ if matches:
+ end_pos = line_num
+ break
+ return lines[start_pos:end_pos]
+
+ def get_object_data(self, o, lines, regexps):
+ """
+ find object data in output lines from CLI command
+ parameters:
+ - o: object (dict), to which we'll add attributes with the data found
+ - lines: list of strings
+ - regexps: dict, keys are attribute names, values are regexp to match
+ for finding the value of the attribute
+ """
+ for line in lines:
+ self.find_matching_regexps(o, line, regexps)
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ if 'name' not in o and 'default' in regexp_tuple:
+ o[name] = regexp_tuple['default']
+
+ def find_matching_regexps(self, o, line, regexps):
+ for regexp_tuple in regexps:
+ name = regexp_tuple['name']
+ regex = regexp_tuple['re']
+ regex = re.compile(regex)
+ matches = regex.search(line)
+ if matches:
+ o[name] = matches.group(1)
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
new file mode 100644
index 0000000..3516e25
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics.py
@@ -0,0 +1,122 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostPnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.ethtool_attr = re.compile('^\s+([^:]+):\s(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:?(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 addr:\s*(\S+)(\s.*)?$'},
+ {'name': 'IPv6 Address', 're': '^\s*inet6 \s*(\S+)(\s.*)?$'}
+ ]
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchHostPnics: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interface_lines = self.run_fetch_lines(cmd, host_id)
+ interfaces = []
+ for line in interface_lines:
+ interface_name = line[line.rindex('/')+1:]
+ interface_name = interface_name.strip()
+ # run ifconfig with specific interface name,
+ # since running it with no name yields a list without inactive pNICs
+ interface = self.find_interface_details(host_id, interface_name)
+ if interface:
+ interfaces.append(interface)
+ return interfaces
+
+ def find_interface_details(self, host_id, interface_name):
+ lines = self.run_fetch_lines("ifconfig " + interface_name, host_id)
+ interface = None
+ status_up = None
+ for line in [l for l in lines if l != '']:
+ tokens = None
+ if interface is None:
+ tokens = line.split()
+ name = tokens[0].strip('- :')
+ name = name.strip()
+ if name == interface_name:
+ line_remainder = line.strip('-')[len(interface_name)+2:]
+ line_remainder = line_remainder.strip(' :')
+ id = interface_name
+ interface = {
+ "host": host_id,
+ "name": id,
+ "local_name": interface_name,
+ "lines": []
+ }
+ self.handle_line(interface, line_remainder)
+ if '<UP,' in line:
+ status_up = True
+ if status_up is None:
+ if tokens is None:
+ tokens = line.split()
+ if 'BROADCAST' in tokens:
+ status_up = 'UP' in tokens
+ if interface:
+ self.handle_line(interface, line)
+ self.set_interface_data(interface)
+ interface['state'] = 'UP' if status_up else 'DOWN'
+ if 'id' not in interface:
+ interface['id'] = interface_name + '-unknown_mac'
+ return interface
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ if 'mac_address' in interface:
+ interface["id"] = interface["name"] + "-" + interface["mac_address"]
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface:
+ return
+ interface["data"] = "\n".join(interface["lines"])
+ interface.pop("lines", None)
+ ethtool_ifname = interface["local_name"]
+ if "@" in interface["local_name"]:
+ pos = interface["local_name"].index("@")
+ ethtool_ifname = ethtool_ifname[pos + 1:]
+ cmd = "ethtool " + ethtool_ifname
+ lines = self.run_fetch_lines(cmd, interface["host"])
+ attr = None
+ for line in lines[1:]:
+ matches = self.ethtool_attr.match(line)
+ if matches:
+ # add this attribute to the interface
+ attr = matches.group(1)
+ value = matches.group(2)
+ interface[attr] = value.strip()
+ else:
+ # add more values to the current attribute as an array
+ if isinstance(interface[attr], str):
+ interface[attr] = [interface[attr], line.strip()]
+ else:
+ interface[attr].append(line.strip())
diff --git a/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
new file mode 100644
index 0000000..69b6413
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_pnics_vpp.py
@@ -0,0 +1,44 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+NAME_RE = '^[a-zA-Z]*GigabitEthernet'
+
+class CliFetchHostPnicsVpp(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.name_re = re.compile(NAME_RE)
+
+ def get(self, id):
+ host_id = id[:id.rindex("-")]
+ host_id = id[:host_id.rindex("-")]
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge",
+ "host": host_id
+ })
+ ret = []
+ for vedge in vedges:
+ pnic_ports = vedge['ports']
+ for pnic_name in pnic_ports:
+ if not self.name_re.search(pnic_name):
+ continue
+ pnic = pnic_ports[pnic_name]
+ pnic['host'] = host_id
+ pnic['id'] = host_id + "-pnic-" + pnic_name
+ pnic['type'] = 'pnic'
+ pnic['object_name'] = pnic_name
+ pnic['Link detected'] = 'yes' if pnic['state'] == 'up' else 'no'
+ ret.append(pnic)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservice.py b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
new file mode 100644
index 0000000..9f8173f
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservice.py
@@ -0,0 +1,80 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from discover.network_agents_list import NetworkAgentsList
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchHostVservice(CliAccess, DbAccess):
+ def __init__(self):
+ super(CliFetchHostVservice, self).__init__()
+ # match only DHCP agent and router (L3 agent)
+ self.type_re = re.compile("^q(dhcp|router)-")
+ self.inv = InventoryMgr()
+ self.agents_list = NetworkAgentsList()
+
+ def get_vservice(self, host_id, name_space):
+ result = {"local_service_id": name_space}
+ self.set_details(host_id, result)
+ return result
+
+ def set_details(self, host_id, r):
+ # keep the index without prefix
+ id_full = r["local_service_id"].strip()
+ prefix = id_full[1:id_full.index('-')]
+ id_clean = id_full[id_full.index('-') + 1:]
+ r["service_type"] = prefix
+ name = self.get_router_name(r, id_clean) if prefix == "router" \
+ else self.get_network_name(id_clean)
+ r["name"] = prefix + "-" + name
+ r["host"] = host_id
+ r["id"] = host_id + "-" + id_full
+ self.set_agent_type(r)
+
+ def get_network_name(self, id):
+ query = """
+ SELECT name
+ FROM {}.networks
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id)
+ if not list(results):
+ return id
+ for db_row in results:
+ return db_row["name"]
+
+ def get_router_name(self, r, id):
+ query = """
+ SELECT *
+ FROM {}.routers
+ WHERE id = %s
+ """.format(self.neutron_db)
+ results = self.get_objects_list_for_id(query, "router", id.strip())
+ for db_row in results:
+ r.update(db_row)
+ return r["name"]
+
+ # dynamically create sub-folder for vService by type
+ def set_agent_type(self, o):
+ o["master_parent_id"] = o["host"] + "-vservices"
+ o["master_parent_type"] = "vservices_folder"
+ atype = o["service_type"]
+ agent = self.agents_list.get_type(atype)
+ try:
+ o["parent_id"] = o["master_parent_id"] + "-" + agent["type"] + "s"
+ o["parent_type"] = "vservice_" + agent["type"] + "s_folder"
+ o["parent_text"] = agent["folder_text"]
+ except KeyError:
+ o["parent_id"] = o["master_parent_id"] + "-" + "miscellenaous"
+ o["parent_type"] = "vservice_miscellenaous_folder"
+ o["parent_text"] = "Misc. services"
diff --git a/app/discover/fetchers/cli/cli_fetch_host_vservices.py b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
new file mode 100644
index 0000000..9b62dcb
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_host_vservices.py
@@ -0,0 +1,27 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_host_vservice import CliFetchHostVservice
+
+
+class CliFetchHostVservices(CliFetchHostVservice):
+ def __init__(self):
+ super(CliFetchHostVservices, self).__init__()
+
+ def get(self, host_id):
+ host = self.inv.get_single(self.get_env(), "host", host_id)
+ if "Network" not in host["host_type"]:
+ return []
+ services_ids = [l[:l.index(' ')] if ' ' in l else l
+ for l in self.run_fetch_lines("ip netns", host_id)]
+ results = [{"local_service_id": s} for s in services_ids if self.type_re.match(s)]
+ for r in results:
+ self.set_details(host_id, r)
+ return results
+
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
new file mode 100644
index 0000000..22ac573
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnics(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def set_vnic_properties(self, v, instance):
+ super().set_vnic_properties(v, instance)
+ v["source_bridge"] = v["source"]["@bridge"]
+
+ def get_vnic_name(self, v, instance):
+ return v["target"]["@dev"]
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
new file mode 100644
index 0000000..4de1840
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
@@ -0,0 +1,68 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import xmltodict
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchInstanceVnicsBase(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ instance_uuid = id[:id.rindex('-')]
+ instance = self.inv.get_by_id(self.get_env(), instance_uuid)
+ if not instance:
+ return []
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ if not host or "Compute" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("virsh list", instance["host"])
+ del lines[:2] # remove header
+ virsh_ids = [l.split()[0] for l in lines if l > ""]
+ results = []
+ # Note: there are 2 ids here of instances with local names, which are
+ # not connected to the data we have thus far for the instance
+ # therefore, we will decide whether the instance is the correct one
+ # based on comparison of the uuid in the dumpxml output
+ for id in virsh_ids:
+ results.extend(self.get_vnics_from_dumpxml(id, instance))
+ return results
+
+ def get_vnics_from_dumpxml(self, id, instance):
+ xml_string = self.run("virsh dumpxml " + id, instance["host"])
+ if not xml_string.strip():
+ return []
+ response = xmltodict.parse(xml_string)
+ if instance["uuid"] != response["domain"]["uuid"]:
+ # this is the wrong instance - skip it
+ return []
+ try:
+ vnics = response["domain"]["devices"]["interface"]
+ except KeyError:
+ return []
+ if isinstance(vnics, dict):
+ vnics = [vnics]
+ for v in vnics:
+ self.set_vnic_properties(v, instance)
+ return vnics
+
+ def set_vnic_properties(self, v, instance):
+ v["name"] = self.get_vnic_name(v, instance)
+ v["id"] = v["name"]
+ v["vnic_type"] = "instance_vnic"
+ v["host"] = instance["host"]
+ v["instance_id"] = instance["id"]
+ v["instance_db_id"] = instance["_id"]
+ v["mac_address"] = v["mac"]["@address"]
+ instance["mac_address"] = v["mac_address"]
+ self.inv.set(instance)
diff --git a/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
new file mode 100644
index 0000000..58facd2
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_instance_vnics_vpp.py
@@ -0,0 +1,18 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_instance_vnics_base import CliFetchInstanceVnicsBase
+
+
+class CliFetchInstanceVnicsVpp(CliFetchInstanceVnicsBase):
+ def __init__(self):
+ super().__init__()
+
+ def get_vnic_name(self, v, instance):
+ return instance["name"] + "-" + v["@type"] + "-" + v["mac"]["@address"]
diff --git a/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
new file mode 100644
index 0000000..1e65a14
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_oteps_lxb.py
@@ -0,0 +1,86 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchOtepsLxb(CliAccess, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, parent_id):
+ vconnector = self.inv.get_by_id(self.get_env(), parent_id)
+ if not vconnector:
+ return []
+ configurations = vconnector['configurations']
+ tunneling_ip = configurations['tunneling_ip']
+ tunnel_types_used = configurations['tunnel_types']
+ if not tunnel_types_used:
+ return []
+ tunnel_type = tunnel_types_used[0]
+ if not tunnel_type:
+ return []
+ # check only interfaces with name matching tunnel type
+ ret = [i for i in vconnector['interfaces'].values()
+ if i['name'].startswith(tunnel_type + '-')]
+ for otep in ret:
+ otep['ip_address'] = tunneling_ip
+ otep['host'] = vconnector['host']
+ self.get_otep_ports(otep)
+ otep['id'] = otep['host'] + '-otep-' + otep['name']
+ otep['name'] = otep['id']
+ otep['vconnector'] = vconnector['name']
+ otep['overlay_type'] = tunnel_type
+ self.get_udp_port(otep)
+ return ret
+
+ """
+ fetch OTEP data from CLI command 'ip -d link show'
+ """
+ def get_otep_ports(self, otep):
+ cmd = 'ip -d link show'
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ header_format = '[0-9]+: ' + otep['name'] + ':'
+ interface_lines = self.get_section_lines(lines, header_format, '\S')
+ otep['data'] = '\n'.join(interface_lines)
+ regexps = [
+ {'name': 'state', 're': ',UP,', 'default': 'DOWN'},
+ {'name': 'mac_address', 're': '.*\slink/ether\s(\S+)\s'},
+ {'name': 'mtu', 're': '.*\smtu\s(\S+)\s'},
+ ]
+ self.get_object_data(otep, interface_lines, regexps)
+ cmd = 'bridge fdb show'
+ dst_line_format = ' dev ' + otep['name'] + ' dst '
+ lines = self.run_fetch_lines(cmd, otep['host'])
+ lines = [l for l in lines if dst_line_format in l]
+ if lines:
+ l = lines[0]
+ otep['bridge dst'] = l[l.index(' dst ')+5:]
+ return otep
+
+ def get_udp_port(self, otep):
+ table_name = "neutron.ml2_" + otep['overlay_type'] + "_endpoints"
+ results = None
+ try:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT udp_port
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", otep['host'])
+ except Exception as e:
+ self.log.error('failed to fetch UDP port for OTEP: ' + str(e))
+ otep['udp_port'] = 0
+ for result in results:
+ otep['udp_port'] = result['udp_port']
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors.py b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
new file mode 100644
index 0000000..78b767a
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors.py
@@ -0,0 +1,40 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import abstractmethod, ABCMeta
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class ABCSingleton(ABCMeta, Singleton):
+ pass
+
+
+class CliFetchVconnectors(CliAccess, metaclass=ABCSingleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ @abstractmethod
+ def get_vconnectors(self, host):
+ raise NotImplementedError("Subclass must override get_vconnectors()")
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("CliFetchVconnectors: host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id + \
+ ", host: " + str(host))
+ return []
+ return self.get_vconnectors(host)
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
new file mode 100644
index 0000000..648dc63
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_lxb.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.cli.cli_fetch_vconnectors_ovs import CliFetchVconnectorsOvs
+from discover.fetchers.db.db_access import DbAccess
+
+
+class CliFetchVconnectorsLxb(CliFetchVconnectorsOvs, DbAccess):
+
+ def __init__(self):
+ super().__init__()
+
+ def get(self, id):
+ ret = super().get(id)
+ for doc in ret:
+ query = """
+ SELECT configurations
+ FROM {}.agents
+ WHERE agent_type="Linux bridge agent" AND host = %s
+ """.format(self.neutron_db)
+ host = doc['host']
+ matches = self.get_objects_list_for_id(query, '', host)
+ if not matches:
+ raise ValueError('No Linux bridge agent in DB for host: {}'.format(host))
+ agent = matches[0]
+ doc['configurations'] = json.loads(agent['configurations'])
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
new file mode 100644
index 0000000..ff37569
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsOvs(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ host_id = host['id']
+ lines = self.run_fetch_lines("brctl show", host_id)
+ headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+ headers_count = len(headers)
+ # since we hard-coded the headers list, remove the headers line
+ del lines[:1]
+
+ # intefaces can spill to next line - need to detect that and add
+ # them to the end of the previous line for our procesing
+ fixed_lines = self.merge_ws_spillover_lines(lines)
+
+ results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
+ ret = []
+ for doc in results:
+ doc["name"] = doc.pop("bridge_name")
+ doc["id"] = doc["name"] + "-" + doc.pop("bridge_id")
+ doc["host"] = host_id
+ doc["connector_type"] = "bridge"
+ if "interfaces" in doc:
+ interfaces = {}
+ interface_names = doc["interfaces"].split(",")
+ for interface_name in interface_names:
+ # find MAC address for this interface from ports list
+ port_id_prefix = interface_name[3:]
+ port = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host_id,
+ "id": {"$regex": r"^" + re.escape(port_id_prefix)}
+ }, get_single=True)
+ mac_address = '' if not port else port['mac_address']
+ interface = {'name': interface_name, 'mac_address': mac_address}
+ interfaces[interface_name] = interface
+ doc["interfaces"] = interfaces
+ doc['interfaces_names'] = list(interfaces.keys())
+ ret.append(doc)
+ return ret
diff --git a/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
new file mode 100644
index 0000000..479e1db
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vconnectors_vpp.py
@@ -0,0 +1,64 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_fetch_vconnectors import CliFetchVconnectors
+
+
+class CliFetchVconnectorsVpp(CliFetchVconnectors):
+ def __init__(self):
+ super().__init__()
+
+ def get_vconnectors(self, host):
+ lines = self.run_fetch_lines("vppctl show mode", host['id'])
+ vconnectors = {}
+ for l in lines:
+ if not l.startswith('l2 bridge'):
+ continue
+ line_parts = l.split(' ')
+ name = line_parts[2]
+ bd_id = line_parts[4]
+ if bd_id in vconnectors:
+ vconnector = vconnectors[bd_id]
+ else:
+ vconnector = {
+ 'host': host['id'],
+ 'id': host['id'] + '-vconnector-' + bd_id,
+ 'bd_id': bd_id,
+ 'name': "bridge-domain-" + bd_id,
+ 'interfaces': {},
+ 'interfaces_names': []
+ }
+ vconnectors[bd_id] = vconnector
+ interface = self.get_interface_details(host, name)
+ if interface:
+ vconnector['interfaces'][name] = interface
+ vconnector['interfaces_names'].append(name)
+ return list(vconnectors.values())
+
+ def get_interface_details(self, host, name):
+ # find vconnector interfaces
+ cmd = "vppctl show hardware-int " + name
+ interface_lines = self.run_fetch_lines(cmd, host['id'])
+ # remove header line
+ interface_lines.pop(0)
+ interface = None
+ for l in interface_lines:
+ if not l.strip():
+ continue # ignore empty lines
+ if not l.startswith(' '):
+ details = l.split()
+ interface = {
+ "name": details[0],
+ "hardware": details[3],
+ "state": details[2],
+ "id": details[1],
+ }
+ elif l.startswith(' Ethernet address '):
+ interface['mac_address'] = l[l.rindex(' ') + 1:]
+ return interface
diff --git a/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
new file mode 100644
index 0000000..f9c622d
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vpp_vedges.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# Copyright 2016 cisco Corporation
+#oslo related message handling
+
+from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
+import yaml
+
+from neutronclient.tests.functional import base
+
+
+class TestCLIFormatter(base.ClientTestBase):
+
+## old stuff ..not related to vpp..disregard
+ def setUp(self):
+ super(TestCLIFormatter, self).setUp()
+ self.net_name = 'net-%s' % uuidutils.generate_uuid()
+ self.addCleanup(self.neutron, 'net-delete %s' % self.net_name)
+
+ def _create_net(self, fmt, col_attrs):
+ params = ['-c %s' % attr for attr in col_attrs]
+ params.append('-f %s' % fmt)
+ params.append(self.net_name)
+ param_string = ' '.join(params)
+ return self.neutron('net-create', params=param_string)
+
+ def test_net_create_with_json_formatter(self):
+ result = self._create_net('json', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ jsonutils.loads(result))
+
+ def test_net_create_with_yaml_formatter(self):
+ result = self._create_net('yaml', ['name', 'admin_state_up'])
+ self.assertDictEqual({'name': self.net_name,
+ 'admin_state_up': True},
+ yaml.load(result))
+
+ def test_net_create_with_value_formatter(self):
+ # NOTE(amotoki): In 'value' formatter, there is no guarantee
+ # in the order of attribute, so we use one attribute in this test.
+ result = self._create_net('value', ['name'])
+ self.assertEqual(self.net_name, result.strip())
+
+ def test_net_create_with_shell_formatter(self):
+ result = self._create_net('shell', ['name', 'admin_state_up'])
+ result_lines = set(result.strip().split('\n'))
+ self.assertSetEqual(set(['name="%s"' % self.net_name,
+ 'admin_state_up="True"']),
+result_lines)
diff --git a/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
new file mode 100644
index 0000000..44ac8d6
--- /dev/null
+++ b/app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
@@ -0,0 +1,140 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchVserviceVnics(CliAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.if_header = re.compile('^[-]?(\S+)\s+(.*)$')
+ self.regexps = [
+ {'name': 'mac_address', 're': '^.*\sHWaddr\s(\S+)(\s.*)?$'},
+ {'name': 'mac_address', 're': '^.*\sether\s(\S+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\sMask:\s?([0-9.]+)(\s.*)?$'},
+ {'name': 'netmask', 're': '^.*\snetmask\s([0-9.]+)(\s.*)?$'},
+ {'name': 'IP Address', 're': '^\s*inet addr:(\S+)\s.*$'},
+ {'name': 'IP Address', 're': '^\s*inet ([0-9.]+)\s.*$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 addr: ?\s*([0-9a-f:/]+)(\s.*)?$'},
+ {'name': 'IPv6 Address',
+ 're': '^\s*inet6 \s*([0-9a-f:/]+)(\s.*)?$'}
+ ]
+
+ def get(self, host_id):
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("host not found: " + host_id)
+ return []
+ if "host_type" not in host:
+ self.log.error("host does not have host_type: " + host_id +
+ ", host: " + str(host))
+ return []
+ if "Network" not in host["host_type"]:
+ return []
+ lines = self.run_fetch_lines("ip netns", host_id)
+ ret = []
+ for l in [l for l in lines
+ if l.startswith("qdhcp") or l.startswith("qrouter")]:
+ service = l.strip()
+ service = service if ' ' not in service \
+ else service[:service.index(' ')]
+ ret.extend(self.handle_service(host_id, service))
+ return ret
+
+ def handle_service(self, host, service, enable_cache=True):
+ cmd = "ip netns exec " + service + " ifconfig"
+ lines = self.run_fetch_lines(cmd, host, enable_cache)
+ interfaces = []
+ current = None
+ for line in lines:
+ matches = self.if_header.match(line)
+ if matches:
+ if current:
+ self.set_interface_data(current)
+ name = matches.group(1).strip(":")
+ # ignore 'lo' interface
+ if name == 'lo':
+ current = None
+ else:
+ line_remainder = matches.group(2)
+ vservice_id = host + "-" + service
+ current = {
+ "id": host + "-" + name,
+ "type": "vnic",
+ "vnic_type": "vservice_vnic",
+ "host": host,
+ "name": name,
+ "master_parent_type": "vservice",
+ "master_parent_id": vservice_id,
+ "parent_type": "vnics_folder",
+ "parent_id": vservice_id + "-vnics",
+ "parent_text": "vNICs",
+ "lines": []
+ }
+ interfaces.append(current)
+ self.handle_line(current, line_remainder)
+ else:
+ if current:
+ self.handle_line(current, line)
+ if current:
+ self.set_interface_data(current)
+ return interfaces
+
+ def handle_line(self, interface, line):
+ self.find_matching_regexps(interface, line, self.regexps)
+ interface["lines"].append(line.strip())
+
+ def set_interface_data(self, interface):
+ if not interface or 'IP Address' not in interface or 'netmask' not in interface:
+ return
+
+ interface["data"] = "\n".join(interface.pop("lines", None))
+ interface["cidr"] = self.get_cidr_for_vnic(interface)
+ network = self.inv.get_by_field(self.get_env(), "network", "cidrs",
+ interface["cidr"], get_single=True)
+ if not network:
+ return
+ interface["network"] = network["id"]
+ # set network for the vservice, to check network on clique creation
+ vservice = self.inv.get_by_id(self.get_env(),
+ interface["master_parent_id"])
+ network_id = network["id"]
+ if "network" not in vservice:
+ vservice["network"] = list()
+ if network_id not in vservice["network"]:
+ vservice["network"].append(network_id)
+ self.inv.set(vservice)
+
+ # find CIDR string by IP address and netmask
+ def get_cidr_for_vnic(self, vnic):
+ if "IP Address" not in vnic:
+ vnic["IP Address"] = "No IP Address"
+ return "No IP Address"
+ ipaddr = vnic["IP Address"].split('.')
+ netmask = vnic["netmask"].split('.')
+
+ # calculate network start
+ net_start = []
+ for pos in range(0, 4):
+ net_start.append(str(int(ipaddr[pos]) & int(netmask[pos])))
+
+ cidr_string = '.'.join(net_start) + '/'
+ cidr_string = cidr_string + self.get_net_size(netmask)
+ return cidr_string
+
+ def get_net_size(self, netmask):
+ binary_str = ''
+ for octet in netmask:
+ binary_str += bin(int(octet))[2:].zfill(8)
+ return str(len(binary_str.rstrip('0')))
diff --git a/app/discover/fetchers/db/__init__.py b/app/discover/fetchers/db/__init__.py
new file mode 100644
index 0000000..b0637e9
--- /dev/null
+++ b/app/discover/fetchers/db/__init__.py
@@ -0,0 +1,9 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
diff --git a/app/discover/fetchers/db/db_access.py b/app/discover/fetchers/db/db_access.py
new file mode 100644
index 0000000..00bd776
--- /dev/null
+++ b/app/discover/fetchers/db/db_access.py
@@ -0,0 +1,142 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import mysql.connector
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class DbAccess(Fetcher):
+ conn = None
+ query_count_per_con = 0
+
+ # connection timeout set to 30 seconds,
+ # due to problems over long connections
+ TIMEOUT = 30
+
+ def __init__(self):
+ super().__init__()
+ self.config = Configuration()
+ self.conf = self.config.get("mysql")
+ self.connect_to_db()
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ # check if DB schema 'neutron' exists
+ cursor.execute("SELECT COUNT(*) FROM neutron.agents")
+ for row in cursor:
+ pass
+ self.neutron_db = "neutron"
+ except (AttributeError, mysql.connector.errors.ProgrammingError):
+ self.neutron_db = "ml2_neutron"
+
+ def db_connect(self, _host, _port, _user, _password, _database):
+ if DbAccess.conn:
+ return
+ try:
+ connector = mysql.connector
+ DbAccess.conn = connector.connect(host=_host, port=_port,
+ connection_timeout=self.TIMEOUT,
+ user=_user,
+ password=_password,
+ database=_database,
+ raise_on_warnings=True)
+ DbAccess.conn.ping(True) # auto-reconnect if necessary
+ except:
+ self.log.critical("failed to connect to MySQL DB")
+ return
+ DbAccess.query_count_per_con = 0
+
+ def connect_to_db(self, force=False):
+ if DbAccess.conn:
+ if not force:
+ return
+ self.log.info("DbAccess: ****** forcing reconnect, " +
+ "query count: %s ******",
+ DbAccess.query_count_per_con)
+ DbAccess.conn = None
+ self.conf = self.config.get("mysql")
+ cnf = self.conf
+ cnf['schema'] = cnf['schema'] if 'schema' in cnf else 'nova'
+ self.db_connect(cnf["host"], cnf["port"],
+ cnf["user"], cnf["password"],
+ cnf["schema"])
+
+ def get_objects_list_for_id(self, query, object_type, id):
+ self.connect_to_db(DbAccess.query_count_per_con >= 25)
+ DbAccess.query_count_per_con += 1
+ self.log.debug("query count: %s, running query:\n%s\n",
+ str(DbAccess.query_count_per_con), query)
+
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ try:
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ if id:
+ cursor.execute(query, [str(id)])
+ else:
+ cursor.execute(query)
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def get_objects_list(self, query, object_type):
+ return self.get_objects_list_for_id(query, object_type, None)
+
+ def get_objects(self, qry, type, id):
+ return jsonify(self.get_objects_list(qry, type))
+
+ def get(self, id):
+ # return list of available fetch types
+ ret = {
+ "description": "List of available fetch calls for this interface",
+ "types": {
+ "regions": "Regions of this environment",
+ "projects": "Projects (tenants) of this environment",
+ "availability_zones": "Availability zones",
+ "aggregates": "Host aggregates",
+ "aggregate_hosts": "Hosts in aggregate X (parameter: id)",
+ "az_hosts": "Host in availability_zone X (parameter: id)"
+ }
+ }
+ return jsonify(ret)
+
+ def exec(self, query, table, field, values):
+ try:
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+ except (AttributeError, mysql.connector.errors.OperationalError) as e:
+ self.log.error(e)
+ self.connect_to_db(True)
+ # try again to run the query
+ cursor = DbAccess.conn.cursor(dictionary=True)
+ cursor.execute(query, [table, field, values])
+
+ rows = []
+ for row in cursor:
+ rows.append(row)
+ return rows
+
+ def set(self, table, field, values):
+ query = """INSERT INTO %s %s VALUES %s"""
+ return self.exec(query, table, field, values)
+
+ def delete(self, table, field, values):
+ query = """DELETE FROM %s WHERE %s=%s"""
+ return self.exec(query, table, field, values)
diff --git a/app/discover/fetchers/db/db_fetch_aggregate_hosts.py b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
new file mode 100644
index 0000000..59ba5d0
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregate_hosts.py
@@ -0,0 +1,36 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import bson
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchAggregateHosts(DbAccess):
+ def get(self, id):
+ query = """
+ SELECT CONCAT('aggregate-', a.name, '-', host) AS id, host AS name
+ FROM nova.aggregate_hosts ah
+ JOIN nova.aggregates a ON a.id = ah.aggregate_id
+ WHERE ah.deleted = 0 AND aggregate_id = %s
+ """
+ hosts = self.get_objects_list_for_id(query, "host", id)
+ if hosts:
+ inv = InventoryMgr()
+ for host_rec in hosts:
+ host_id = host_rec['name']
+ host = inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error('unable to find host {} '
+ 'from aggregate {} in inventory'
+ .format(host_id, id))
+ continue
+ host_rec['ref_id'] = bson.ObjectId(host['_id'])
+ return hosts
diff --git a/app/discover/fetchers/db/db_fetch_aggregates.py b/app/discover/fetchers/db/db_fetch_aggregates.py
new file mode 100644
index 0000000..da0720b
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_aggregates.py
@@ -0,0 +1,21 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAggregates(DbAccess):
+ def get(self, id):
+ return self.get_objects_list(
+ """
+ SELECT id, name
+ FROM nova.aggregates
+ WHERE deleted = 0
+ """,
+ "host aggregate")
diff --git a/app/discover/fetchers/db/db_fetch_availability_zones.py b/app/discover/fetchers/db/db_fetch_availability_zones.py
new file mode 100644
index 0000000..763d777
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_availability_zones.py
@@ -0,0 +1,22 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+
+class DbFetchAvailabilityZones(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT availability_zone,
+ availability_zone AS id, COUNT(DISTINCT host) AS descendants
+ FROM nova.instances
+ WHERE availability_zone IS NOT NULL
+ GROUP BY availability_zone
+ """
+ return self.get_objects_list(query, "availability zone")
diff --git a/app/discover/fetchers/db/db_fetch_az_network_hosts.py b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
new file mode 100644
index 0000000..09043ea
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_az_network_hosts.py
@@ -0,0 +1,31 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchAZNetworkHosts(DbAccess):
+
+ def get(self, id):
+ query = """
+ SELECT DISTINCT host, host AS id, configurations
+ FROM neutron.agents
+ WHERE agent_type = 'Metadata agent'
+ """
+ results = self.get_objects_list(query, "host")
+ for r in results:
+ self.set_host_details(r)
+ return results
+
+ def set_host_details(self, r):
+ config = json.loads(r["configurations"])
+ r["ip_address"] = config["nova_metadata_ip"]
+ r["host_type"] = "Network Node"
diff --git a/app/discover/fetchers/db/db_fetch_host_instances.py b/app/discover/fetchers/db/db_fetch_host_instances.py
new file mode 100644
index 0000000..2245c4a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_instances.py
@@ -0,0 +1,15 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_fetch_instances import DbFetchInstances
+
+class DbFetchHostInstances(DbFetchInstances):
+
+ def get(self, id):
+ return self.get_instances("host", id)
diff --git a/app/discover/fetchers/db/db_fetch_host_network_agents.py b/app/discover/fetchers/db/db_fetch_host_network_agents.py
new file mode 100644
index 0000000..c323573
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_host_network_agents.py
@@ -0,0 +1,35 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchHostNetworkAgents(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id):
+ query = """
+ SELECT * FROM {}.agents
+ WHERE host = %s
+ """.format(self.neutron_db)
+ host_id = id[:-1 * len("-network_agents")]
+ results = self.get_objects_list_for_id(query, "network_agent", host_id)
+ mechanism_drivers = self.env_config['mechanism_drivers']
+ id_prefix = mechanism_drivers[0] if mechanism_drivers else 'network_agent'
+ for o in results:
+ o["configurations"] = json.loads(o["configurations"])
+ o["name"] = o["binary"]
+ o['id'] = id_prefix + '-' + o['id']
+ return results
diff --git a/app/discover/fetchers/db/db_fetch_instances.py b/app/discover/fetchers/db/db_fetch_instances.py
new file mode 100644
index 0000000..54c4114
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_instances.py
@@ -0,0 +1,60 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+from discover.fetchers.db.db_access import DbAccess
+
+
+class DbFetchInstances(DbAccess):
+ def get_instance_data(self, instances):
+ instances_hash = {}
+ for doc in instances:
+ instances_hash[doc["id"]] = doc
+
+ query = """
+ SELECT DISTINCT i.uuid AS id, i.display_name AS name,
+ i.host AS host, host_ip AS ip_address,
+ network_info, project_id,
+ IF(p.name IS NULL, "Unknown", p.name) AS project
+ FROM nova.instances i
+ LEFT JOIN keystone.project p ON p.id = i.project_id
+ JOIN nova.instance_info_caches ic ON i.uuid = ic.instance_uuid
+ JOIN nova.compute_nodes cn ON i.node = cn.hypervisor_hostname
+ WHERE i.deleted = 0
+ """
+ results = self.get_objects_list(query, "instance")
+ for result in results:
+ id = result["id"]
+ if id not in instances_hash:
+ continue
+ self.build_instance_details(result)
+ doc = instances_hash[id]
+ doc.update(result)
+
+ def build_instance_details(self, result):
+ network_info_str = result.pop("network_info", None)
+ result["network_info"] = json.loads(network_info_str)
+
+ # add network as an array to allow constraint checking
+ # when building clique
+ networks = []
+ for net in result["network_info"]:
+ if "network" not in net or "id" not in net["network"]:
+ continue
+ network_id = net["network"]["id"]
+ if network_id in networks:
+ continue
+ networks.append(network_id)
+ result["network"] = networks
+
+ result["type"] = "instance"
+ result["parent_type"] = "instances_folder"
+ result["parent_id"] = result["host"] + "-instances"
+ result["in_project-" + result.pop("project", None)] = "1"
diff --git a/app/discover/fetchers/db/db_fetch_oteps.py b/app/discover/fetchers/db/db_fetch_oteps.py
new file mode 100644
index 0000000..9055c11
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_oteps.py
@@ -0,0 +1,81 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+
+ def get(self, id):
+ vedge = self.inv.get_by_id(self.get_env(), id)
+ tunnel_type = None
+ if "configurations" not in vedge:
+ return []
+ if "tunnel_types" not in vedge["configurations"]:
+ return []
+ if not vedge["configurations"]["tunnel_types"]:
+ return []
+ tunnel_type = vedge["configurations"]["tunnel_types"][0]
+ host_id = vedge["host"]
+ table_name = "neutron.ml2_" + tunnel_type + "_endpoints"
+ env_config = self.config.get_env_config()
+ distribution = env_config["distribution"]
+ if distribution == "Canonical-icehouse":
+ # for Icehouse, we only get IP address from the DB, so take the
+ # host IP address and from the host data in Mongo
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ results = [{"host": host_id, "ip_address": host["ip_address"]}]
+ else:
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}
+ WHERE host = %s
+ """.format(table_name),
+ "vedge", host_id)
+ for doc in results:
+ doc["id"] = host_id + "-otep"
+ doc["name"] = doc["id"]
+ doc["host"] = host_id
+ doc["overlay_type"] = tunnel_type
+ doc["ports"] = vedge["tunnel_ports"] if "tunnel_ports" in vedge else []
+ if "udp_port" not in doc:
+ doc["udp_port"] = "67"
+ self.get_vconnector(doc, host_id, vedge)
+
+ return results
+
+ # find matching vConnector by tunneling_ip of vEdge
+ # look for that IP address in ifconfig for the host
+ def get_vconnector(self, doc, host_id, vedge):
+ tunneling_ip = vedge["configurations"]["tunneling_ip"]
+ ifconfig_lines = self.run_fetch_lines("ifconfig", host_id)
+ interface = None
+ ip_string = " " * 10 + "inet addr:" + tunneling_ip + " "
+ vconnector = None
+ for l in ifconfig_lines:
+ if l.startswith(" "):
+ if interface and l.startswith(ip_string):
+ vconnector = interface
+ break
+ else:
+ if " " in l:
+ interface = l[:l.index(" ")]
+
+ if vconnector:
+ doc["vconnector"] = vconnector
diff --git a/app/discover/fetchers/db/db_fetch_port.py b/app/discover/fetchers/db/db_fetch_port.py
new file mode 100644
index 0000000..2cb814a
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_port.py
@@ -0,0 +1,34 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class DbFetchPort(DbAccess):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.env_config = self.config.get_env_config()
+
+ def get(self, id=None):
+ query = """SELECT * FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ return self.get_objects_list_for_id(query, "port", id)
+
+ def get_id(self, id=None):
+ query = """SELECT id FROM {}.ports where network_id = %s""" \
+ .format(self.neutron_db)
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None
+
+ def get_id_by_field(self, id, search=''):
+ query = """SELECT id FROM neutron.ports where network_id = %s AND """ + search
+ result = self.get_objects_list_for_id(query, "port", id)
+ return result[0]['id'] if result != [] else None \ No newline at end of file
diff --git a/app/discover/fetchers/db/db_fetch_vedges_ovs.py b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
new file mode 100644
index 0000000..24cc9f8
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_ovs.py
@@ -0,0 +1,178 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import json
+
+import re
+
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+ self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
+ self.port_line_header_prefix = " " * 8 + "Port "
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ results = self.get_objects_list_for_id(
+ """
+ SELECT *
+ FROM {}.agents
+ WHERE host = %s AND agent_type = 'Open vSwitch agent'
+ """.format(self.neutron_db),
+ "vedge", host_id)
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ vsctl_lines = self.run_fetch_lines("ovs-vsctl show", host["id"])
+ ports = self.fetch_ports(host, vsctl_lines)
+ for doc in results:
+ doc["name"] = doc["host"] + "-OVS"
+ doc["configurations"] = json.loads(doc["configurations"])
+ doc["ports"] = ports
+ doc["tunnel_ports"] = self.get_overlay_tunnels(doc, vsctl_lines)
+ return results
+
+ def fetch_ports(self, host, vsctl_lines):
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return {}
+ ports = self.fetch_ports_from_dpctl(host["id"])
+ self.fetch_port_tags_from_vsctl(vsctl_lines, ports)
+ return ports
+
+ def fetch_ports_from_dpctl(self, host_id):
+ cmd = "ovs-dpctl show"
+ lines = self.run_fetch_lines(cmd, host_id)
+ ports = {}
+ for l in lines:
+ port_matches = self.port_re.match(l)
+ if not port_matches:
+ continue
+ port = {}
+ id = port_matches.group(1)
+ name = port_matches.group(2)
+ is_internal = port_matches.group(3) == " (internal)"
+ port["internal"] = is_internal
+ port["id"] = id
+ port["name"] = name
+ ports[name] = port
+ return ports
+
+ # from ovs-vsctl, fetch tags of ports
+ # example format of ovs-vsctl output for a specific port:
+ # Port "tap9f94d28e-7b"
+ # tag: 5
+ # Interface "tap9f94d28e-7b"
+ # type: internal
+ def fetch_port_tags_from_vsctl(self, vsctl_lines, ports):
+ port = None
+ for l in vsctl_lines:
+ if l.startswith(self.port_line_header_prefix):
+ port = None
+ port_name = l[len(self.port_line_header_prefix):]
+ # remove quotes from port name
+ if '"' in port_name:
+ port_name = port_name[1:][:-1]
+ if port_name in ports:
+ port = ports[port_name]
+ continue
+ if not port:
+ continue
+ if l.startswith(" " * 12 + "tag: "):
+ port["tag"] = l[l.index(":") + 2:]
+ ports[port["name"]] = port
+ return ports
+
+ def get_overlay_tunnels(self, doc, vsctl_lines):
+ if doc["agent_type"] != "Open vSwitch agent":
+ return {}
+ if "tunneling_ip" not in doc["configurations"]:
+ return {}
+ if not doc["configurations"]["tunneling_ip"]:
+ self.get_bridge_pnic(doc)
+ return {}
+
+ # read the 'br-tun' interface ports
+ # this will be used later in the OTEP
+ tunnel_bridge_header = " " * 4 + "Bridge br-tun"
+ try:
+ br_tun_loc = vsctl_lines.index(tunnel_bridge_header)
+ except ValueError:
+ return []
+ lines = vsctl_lines[br_tun_loc + 1:]
+ tunnel_ports = {}
+ port = None
+ for l in lines:
+ # if we have only 4 or less spaces in the beginng,
+ # the br-tun section ended so return
+ if not l.startswith(" " * 5):
+ break
+ if l.startswith(self.port_line_header_prefix):
+ if port:
+ tunnel_ports[port["name"]] = port
+ name = l[len(self.port_line_header_prefix):].strip('" ')
+ port = {"name": name}
+ elif port and l.startswith(" " * 12 + "Interface "):
+ interface = l[10 + len("Interface ") + 1:].strip('" ')
+ port["interface"] = interface
+ elif port and l.startswith(" " * 16):
+ colon_pos = l.index(":")
+ attr = l[:colon_pos].strip()
+ val = l[colon_pos + 2:].strip('" ')
+ if attr == "options":
+ opts = val.strip('{}')
+ val = {}
+ for opt in opts.split(", "):
+ opt_name = opt[:opt.index("=")]
+ opt_val = opt[opt.index("=") + 1:].strip('" ')
+ val[opt_name] = opt_val
+ port[attr] = val
+ if port:
+ tunnel_ports[port["name"]] = port
+ return tunnel_ports
+
+ def get_bridge_pnic(self, doc):
+ conf = doc["configurations"]
+ if "bridge_mappings" not in conf or not conf["bridge_mappings"]:
+ return
+ for v in conf["bridge_mappings"].values(): br = v
+ ifaces_list_lines = self.run_fetch_lines("ovs-vsctl list-ifaces " + br,
+ doc["host"])
+ br_pnic_postfix = br + "--br-"
+ interface = ""
+ for l in ifaces_list_lines:
+ if l.startswith(br_pnic_postfix):
+ interface = l[len(br_pnic_postfix):]
+ break
+ if not interface:
+ return
+ doc["pnic"] = interface
+ # add port ID to pNIC
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": doc["host"],
+ "name": interface
+ }, get_single=True)
+ if not pnic:
+ return
+ port = doc["ports"][interface]
+ pnic["port_id"] = port["id"]
+ self.inv.set(pnic)
diff --git a/app/discover/fetchers/db/db_fetch_vedges_vpp.py b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
new file mode 100644
index 0000000..a1c659e
--- /dev/null
+++ b/app/discover/fetchers/db/db_fetch_vedges_vpp.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.db.db_access import DbAccess
+from utils.inventory_mgr import InventoryMgr
+from utils.singleton import Singleton
+
+
+class DbFetchVedgesVpp(DbAccess, CliAccess, metaclass=Singleton):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def get(self, id):
+ host_id = id[:id.rindex('-')]
+ vedge = {
+ 'host': host_id,
+ 'id': host_id + '-VPP',
+ 'name': 'VPP-' + host_id,
+ 'agent_type': 'VPP'
+ }
+ ver = self.run_fetch_lines('vppctl show ver', host_id)
+ if ver:
+ ver = ver[0]
+ vedge['binary'] = ver[:ver.index(' ', ver.index(' ') + 1)]
+ host = self.inv.get_by_id(self.get_env(), host_id)
+ if not host:
+ self.log.error("unable to find host in inventory: %s", host_id)
+ return []
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ interfaces = self.run_fetch_lines('vppctl show int', host_id)
+ vedge['ports'] = self.fetch_ports(interfaces)
+ return [vedge]
+
+ def fetch_ports(self, interfaces):
+ ports = {}
+ for i in interfaces:
+ if not i or i.startswith(' '):
+ continue
+ parts = i.split()
+ port = {
+ 'id': parts[1],
+ 'state': parts[2],
+ 'name': parts[0]
+ }
+ ports[port['name']] = port
+ return ports
diff --git a/app/discover/fetchers/folder_fetcher.py b/app/discover/fetchers/folder_fetcher.py
new file mode 100644
index 0000000..e7bb1fa
--- /dev/null
+++ b/app/discover/fetchers/folder_fetcher.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.string_utils import jsonify
+
+
+class FolderFetcher(Fetcher):
+ def __init__(self, types_name, parent_type, text="", create_folder=True):
+ super(FolderFetcher, self).__init__()
+ self.types_name = types_name
+ self.parent_type = parent_type
+ self.text = text
+ self.create_folder = create_folder
+ if not self.text:
+ self.text = self.types_name.capitalize()
+
+ def get(self, id):
+ oid = id + "-" + self.types_name
+ root_obj = {
+ "id": oid,
+ "create_object": self.create_folder,
+ "name": oid,
+ "text": self.text,
+ "type": self.types_name + "_folder",
+ "parent_id": id,
+ "parent_type": self.parent_type
+ }
+ return jsonify([root_obj])
diff --git a/app/discover/find_links.py b/app/discover/find_links.py
new file mode 100644
index 0000000..0967a60
--- /dev/null
+++ b/app/discover/find_links.py
@@ -0,0 +1,30 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetcher import Fetcher
+from utils.inventory_mgr import InventoryMgr
+
+
+class FindLinks(Fetcher):
+ def __init__(self):
+ super().__init__()
+ self.inv = InventoryMgr()
+
+ def create_link(self, env, host, source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label="", target_label="",
+ extra_attributes=None):
+ if extra_attributes is None:
+ extra_attributes = {}
+ link = self.inv.create_link(env, host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=extra_attributes)
+ if self.inv.monitoring_setup_manager:
+ self.inv.monitoring_setup_manager.create_setup(link)
diff --git a/app/discover/find_links_for_instance_vnics.py b/app/discover/find_links_for_instance_vnics.py
new file mode 100644
index 0000000..7e081fc
--- /dev/null
+++ b/app/discover/find_links_for_instance_vnics.py
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForInstanceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding links of type: instance-vnic")
+ vnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "instance_vnic"
+ })
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ instance = self.inv.get_by_id(self.get_env(), v["instance_id"])
+ if "network_info" not in instance:
+ self.log.warn("add_link_for_vnic: " +
+ "network_info missing in instance: %s ",
+ instance["id"])
+ return
+ host = self.inv.get_by_id(self.get_env(), instance["host"])
+ host_types = host["host_type"]
+ if "Network" not in host_types and "Compute" not in host_types:
+ return []
+ source = instance["_id"]
+ source_id = instance["id"]
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "instance-vnic"
+ # find related network
+ network_name = None
+ network_id = None
+ for net in instance["network_info"]:
+ if net["devname"] == v["id"]:
+ network_name = net["network"]["label"]
+ network_id = net['network']['id']
+ v['network'] = network_id
+ self.inv.set(v)
+ break
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {} if not network_id else {'network': network_id}
+ self.create_link(self.get_env(), host["name"],
+ source, source_id, target, target_id,
+ link_type, network_name, state, link_weight,
+ extra_attributes=attributes)
diff --git a/app/discover/find_links_for_oteps.py b/app/discover/find_links_for_oteps.py
new file mode 100644
index 0000000..84373a4
--- /dev/null
+++ b/app/discover/find_links_for_oteps.py
@@ -0,0 +1,85 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForOteps(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vedge-otep, otep-vconnector, otep-pnic")
+ oteps = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "otep"
+ })
+ for otep in oteps:
+ self.add_vedge_otep_link(otep)
+ self.add_otep_vconnector_link(otep)
+ self.add_otep_pnic_link(otep)
+
+ def add_vedge_otep_link(self, otep):
+ vedge = self.inv.get_by_id(self.get_env(), otep["parent_id"])
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = otep["_id"]
+ target_id = otep["id"]
+ link_type = "vedge-otep"
+ link_name = vedge["name"] + "-otep"
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_vconnector_link(self, otep):
+ if "vconnector" not in otep:
+ return
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": otep["host"],
+ "name": otep["vconnector"]
+ }, get_single=True)
+ if not vconnector:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "otep-vconnector"
+ link_name = otep["name"] + "-" + otep["vconnector"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
+
+ def add_otep_pnic_link(self, otep):
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": otep["host"],
+ "IP Address": otep["ip_address"]
+ }, get_single=True)
+ if not pnic:
+ return
+ source = otep["_id"]
+ source_id = otep["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "otep-pnic"
+ link_name = otep["host"] + "pnic" + pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), otep["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_pnics.py b/app/discover/find_links_for_pnics.py
new file mode 100644
index 0000000..19828d0
--- /dev/null
+++ b/app/discover/find_links_for_pnics.py
@@ -0,0 +1,58 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForPnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ pnics = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "pnic_type": {"$ne": "switch"} # TODO: make a more educated guess
+ })
+ for pnic in pnics:
+ self.add_pnic_network_links(pnic)
+
+ def add_pnic_network_links(self, pnic):
+ self.log.info("adding links of type: pnic-network")
+ host = pnic["host"]
+ # find ports for that host, and fetch just the network ID
+ ports = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "port",
+ "binding:host_id": host
+ }, {"network_id": 1, "id": 1})
+ networks = {}
+ for port in ports:
+ networks[port["network_id"]] = 1
+ for network_id in networks.keys():
+ network = self.inv.get_by_id(self.get_env(), network_id)
+ if network == []:
+ return
+ source = pnic["_id"]
+ source_id = pnic["id"]
+ target = network["_id"]
+ target_id = network["id"]
+ link_type = "pnic-network"
+ link_name = "Segment-" + str(network["provider:segmentation_id"]) \
+ if "provider:segmentation_id" in network \
+ else "Segment-None"
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ source_label = "port-" + pnic["port_id"] if "port_id" in pnic \
+ else ""
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label,
+ extra_attributes={"network": target_id})
diff --git a/app/discover/find_links_for_vconnectors.py b/app/discover/find_links_for_vconnectors.py
new file mode 100644
index 0000000..3d5cdb0
--- /dev/null
+++ b/app/discover/find_links_for_vconnectors.py
@@ -0,0 +1,88 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVconnectors(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ vconnectors = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector"
+ })
+ self.log.info("adding links of type: vnic-vconnector, vconnector-pnic")
+ for vconnector in vconnectors:
+ for interface in vconnector["interfaces_names"]:
+ self.add_vnic_vconnector_link(vconnector, interface)
+ self.add_vconnector_pnic_link(vconnector, interface)
+
+ def add_vnic_vconnector_link(self, vconnector, interface_name):
+ mechanism_drivers = self.configuration.environment['mechanism_drivers']
+ is_ovs = mechanism_drivers and mechanism_drivers[0] == 'OVS'
+ if is_ovs:
+ # interface ID for OVS
+ vnic = self.inv.get_by_id(self.get_env(), interface_name)
+ else:
+ # interface ID for VPP - match interface MAC address to vNIC MAC
+ interface = vconnector['interfaces'][interface_name]
+ if not interface or 'mac_address' not in interface:
+ return
+ vnic_mac = interface['mac_address']
+ vnic = self.inv.get_by_field(self.get_env(), 'vnic',
+ 'mac_address', vnic_mac,
+ get_single=True)
+ if not vnic:
+ return
+ host = vnic["host"]
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vconnector["_id"]
+ target_id = vconnector["id"]
+ link_type = "vnic-vconnector"
+ link_name = vnic["mac_address"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ attributes = {}
+ if 'network' in vnic:
+ attributes = {'network': vnic['network']}
+ vconnector['network'] = vnic['network']
+ self.inv.set(vconnector)
+ self.create_link(self.get_env(), host,
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes=attributes)
+
+ def add_vconnector_pnic_link(self, vconnector, interface):
+ ifname = interface['name'] if isinstance(interface, dict) else interface
+ if "." in ifname:
+ ifname = ifname[:ifname.index(".")]
+ host = vconnector["host"]
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vconnector["host"],
+ "name": ifname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vconnector-pnic"
+ link_name = pnic["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), host,
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vedges.py b/app/discover/find_links_for_vedges.py
new file mode 100644
index 0000000..1235074
--- /dev/null
+++ b/app/discover/find_links_for_vedges.py
@@ -0,0 +1,124 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVedges(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self):
+ self.log.info("adding link types: " +
+ "vnic-vedge, vconnector-vedge, vedge-pnic")
+ vedges = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vedge"
+ })
+ for vedge in vedges:
+ ports = vedge["ports"]
+ for p in ports.values():
+ self.add_link_for_vedge(vedge, p)
+
+ def add_link_for_vedge(self, vedge, port):
+ vnic = self.inv.get_by_id(self.get_env(),
+ vedge['host'] + '-' + port["name"])
+ if not vnic:
+ self.find_matching_vconnector(vedge, port)
+ self.find_matching_pnic(vedge, port)
+ return
+ source = vnic["_id"]
+ source_id = vnic["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vnic-vedge"
+ link_name = vnic["name"] + "-" + vedge["name"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vnic["mac_address"]
+ target_label = port["id"]
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label)
+
+ def find_matching_vconnector(self, vedge, port):
+ if self.configuration.has_network_plugin('VPP'):
+ vconnector_interface_name = port['name']
+ else:
+ if not port["name"].startswith("qv"):
+ return
+ base_id = port["name"][3:]
+ vconnector_interface_name = "qvb" + base_id
+ vconnector = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "vconnector",
+ "host": vedge['host'],
+ 'interfaces_names': vconnector_interface_name},
+ get_single=True)
+ if not vconnector:
+ return
+ source = vconnector["_id"]
+ source_id = vconnector["id"]
+ target = vedge["_id"]
+ target_id = vedge["id"]
+ link_type = "vconnector-vedge"
+ link_name = "port-" + port["id"]
+ if "tag" in port:
+ link_name += "-" + port["tag"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ source_label = vconnector_interface_name
+ target_label = port["name"]
+ mac_address = "Unknown"
+ attributes = {'mac_address': mac_address}
+ for interface in vconnector['interfaces'].values():
+ if vconnector_interface_name != interface['name']:
+ continue
+ if 'mac_address' not in interface:
+ continue
+ mac_address = interface['mac_address']
+ attributes['mac_address'] = mac_address
+ break
+ if 'network' in vconnector:
+ attributes['network'] = vconnector['network']
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight,
+ source_label, target_label,
+ attributes)
+
+ def find_matching_pnic(self, vedge, port):
+ pname = port["name"]
+ if "pnic" in vedge:
+ if pname != vedge["pnic"]:
+ return
+ elif self.configuration.has_network_plugin('VPP'):
+ pass
+ pnic = self.inv.find_items({
+ "environment": self.get_env(),
+ "type": "pnic",
+ "host": vedge["host"],
+ "name": pname
+ }, get_single=True)
+ if not pnic:
+ return
+ source = vedge["_id"]
+ source_id = vedge["id"]
+ target = pnic["_id"]
+ target_id = pnic["id"]
+ link_type = "vedge-pnic"
+ link_name = "Port-" + port["id"]
+ state = "up" if pnic["Link detected"] == "yes" else "down"
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), vedge["host"],
+ source, source_id, target, target_id,
+ link_type, link_name, state, link_weight)
diff --git a/app/discover/find_links_for_vservice_vnics.py b/app/discover/find_links_for_vservice_vnics.py
new file mode 100644
index 0000000..e8a91c8
--- /dev/null
+++ b/app/discover/find_links_for_vservice_vnics.py
@@ -0,0 +1,56 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.find_links import FindLinks
+
+
+class FindLinksForVserviceVnics(FindLinks):
+ def __init__(self):
+ super().__init__()
+
+ def add_links(self, search=None):
+ self.log.info("adding links of type: vservice-vnic")
+
+ if search is None:
+ search = {}
+
+ search.update({"environment": self.get_env(),
+ "type": "vnic",
+ "vnic_type": "vservice_vnic"})
+
+ vnics = self.inv.find_items(search)
+
+ for v in vnics:
+ self.add_link_for_vnic(v)
+
+ def add_link_for_vnic(self, v):
+ host = self.inv.get_by_id(self.get_env(), v["host"])
+ if "Network" not in host["host_type"]:
+ return
+ if "network" not in v:
+ return
+ network = self.inv.get_by_id(self.get_env(), v["network"])
+ if network == []:
+ return
+ vservice_id = v["parent_id"]
+ vservice_id = vservice_id[:vservice_id.rindex('-')]
+ vservice = self.inv.get_by_id(self.get_env(), vservice_id)
+ source = vservice["_id"]
+ source_id = vservice_id
+ target = v["_id"]
+ target_id = v["id"]
+ link_type = "vservice-vnic"
+ link_name = network["name"]
+ state = "up" # TBD
+ link_weight = 0 # TBD
+ self.create_link(self.get_env(), v["host"],
+ source, source_id,
+ target, target_id,
+ link_type, link_name, state, link_weight,
+ extra_attributes={'network': v['network']})
diff --git a/app/discover/manager.py b/app/discover/manager.py
new file mode 100644
index 0000000..e37bb31
--- /dev/null
+++ b/app/discover/manager.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from abc import ABC, abstractmethod
+
+from utils.logging.file_logger import FileLogger
+from utils.logging.full_logger import FullLogger
+from utils.mongo_access import MongoAccess
+
+
+class Manager(ABC):
+
+ MIN_INTERVAL = 0.1 # To prevent needlessly frequent scans
+
+ def __init__(self, log_directory: str = None,
+ mongo_config_file: str = None):
+ super().__init__()
+ if log_directory:
+ FileLogger.LOG_DIRECTORY = log_directory
+ MongoAccess.config_file = mongo_config_file
+ self.log = FullLogger()
+ self.conf = None
+ self.inv = None
+ self.collection = None
+ self._update_document = None
+ self.interval = self.MIN_INTERVAL
+
+ @abstractmethod
+ def configure(self):
+ pass
+
+ @abstractmethod
+ def do_action(self):
+ pass
+
+ def run(self):
+ self.configure()
+ self.do_action()
diff --git a/app/discover/monitoring_mgr.py b/app/discover/monitoring_mgr.py
new file mode 100644
index 0000000..f3f737f
--- /dev/null
+++ b/app/discover/monitoring_mgr.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+#moved
diff --git a/app/discover/network_agents_list.py b/app/discover/network_agents_list.py
new file mode 100644
index 0000000..c1c1b36
--- /dev/null
+++ b/app/discover/network_agents_list.py
@@ -0,0 +1,23 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from utils.mongo_access import MongoAccess
+
+
+class NetworkAgentsList(MongoAccess):
+ def __init__(self):
+ super(NetworkAgentsList, self).__init__()
+ self.list = MongoAccess.db["network_agent_types"]
+
+ def get_type(self, type):
+ matches = self.list.find({"type": type})
+ for doc in matches:
+ doc["_id"] = str(doc["_id"])
+ return doc
+ return {}
diff --git a/app/discover/plugins/__init__.py b/app/discover/plugins/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/discover/plugins/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/discover/scan.py b/app/discover/scan.py
new file mode 100755
index 0000000..72184ec
--- /dev/null
+++ b/app/discover/scan.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+# Scan an object and insert/update in the inventory
+
+# phase 2: either scan default environment, or scan specific object
+
+import argparse
+import sys
+
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.scan_error import ScanError
+from discover.scanner import Scanner
+from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
+from utils.constants import EnvironmentFeatures
+from utils.mongo_access import MongoAccess
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.ssh_connection import SshConnection
+from utils.util import setup_args
+
+
+class ScanPlan:
+ """
+ @DynamicAttrs
+ """
+
+ # Each tuple of COMMON_ATTRIBUTES consists of:
+ # attr_name, arg_name and def_key
+ #
+ # attr_name - name of class attribute to be set
+ # arg_name - corresponding name of argument (equal to attr_name if not set)
+ # def_key - corresponding key in DEFAULTS (equal to attr_name if not set)
+ COMMON_ATTRIBUTES = (("loglevel",),
+ ("inventory_only",),
+ ("links_only",),
+ ("cliques_only",),
+ ("monitoring_setup_only",),
+ ("clear",),
+ ("clear_all",),
+ ("object_type", "type", "type"),
+ ("env",),
+ ("object_id", "id", "env"),
+ ("parent_id",),
+ ("type_to_scan", "parent_type", "parent_type"),
+ ("id_field",),
+ ("scan_self",),
+ ("child_type", "type", "type"))
+
+ def __init__(self, args=None):
+ self.obj = None
+ self.scanner_type = None
+ self.args = args
+ for attribute in self.COMMON_ATTRIBUTES:
+ setattr(self, attribute[0], None)
+
+ if isinstance(args, dict):
+ self._init_from_dict()
+ else:
+ self._init_from_args()
+ self._validate_args()
+
+ def _validate_args(self):
+ errors = []
+ if (self.inventory_only and self.links_only) \
+ or (self.inventory_only and self.cliques_only) \
+ or (self.links_only and self.cliques_only):
+ errors.append("Only one of (inventory_only, links_only, "
+ "cliques_only) can be True.")
+ if errors:
+ raise ScanArgumentsError("\n".join(errors))
+
+ def _set_arg_from_dict(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self, attribute_name,
+ self.args.get(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _set_arg_from_cmd(self, attribute_name, arg_name=None):
+ setattr(self,
+ attribute_name,
+ getattr(self.args, arg_name if arg_name else attribute_name))
+
+ def _set_arg_from_form(self, attribute_name, arg_name=None,
+ default_key=None):
+ default_attr = default_key if default_key else attribute_name
+ setattr(self,
+ attribute_name,
+ self.args.getvalue(arg_name if arg_name else attribute_name,
+ ScanController.DEFAULTS[default_attr]))
+
+ def _init_from_dict(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_dict(*arg)
+ self.child_id = None
+
+ def _init_from_args(self):
+ for arg in self.COMMON_ATTRIBUTES:
+ self._set_arg_from_cmd(*arg[:2])
+ self.child_id = None
+
+
+class ScanController(Fetcher):
+ DEFAULTS = {
+ "env": "",
+ "mongo_config": "",
+ "type": "",
+ "inventory": "inventory",
+ "scan_self": False,
+ "parent_id": "",
+ "parent_type": "",
+ "id_field": "id",
+ "loglevel": "INFO",
+ "inventory_only": False,
+ "links_only": False,
+ "cliques_only": False,
+ "monitoring_setup_only": False,
+ "clear": False,
+ "clear_all": False
+ }
+
+ def __init__(self):
+ super().__init__()
+ self.conf = None
+ self.inv = None
+
+ def get_args(self):
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=self.DEFAULTS["mongo_config"],
+ help="name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-e", "--env", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="name of environment to scan \n"
+ "(default: " + self.DEFAULTS["env"] + ")")
+ parser.add_argument("-t", "--type", nargs="?", type=str,
+ default=self.DEFAULTS["type"],
+ help="type of object to scan \n"
+ "(default: environment)")
+ parser.add_argument("-y", "--inventory", nargs="?", type=str,
+ default=self.DEFAULTS["inventory"],
+ help="name of inventory collection \n"
+ "(default: 'inventory')")
+ parser.add_argument("-s", "--scan_self", action="store_true",
+ help="scan changes to a specific object \n"
+ "(default: False)")
+ parser.add_argument("-i", "--id", nargs="?", type=str,
+ default=self.DEFAULTS["env"],
+ help="ID of object to scan (when scan_self=true)")
+ parser.add_argument("-p", "--parent_id", nargs="?", type=str,
+ default=self.DEFAULTS["parent_id"],
+ help="ID of parent object (when scan_self=true)")
+ parser.add_argument("-a", "--parent_type", nargs="?", type=str,
+ default=self.DEFAULTS["parent_type"],
+ help="type of parent object (when scan_self=true)")
+ parser.add_argument("-f", "--id_field", nargs="?", type=str,
+ default=self.DEFAULTS["id_field"],
+ help="name of ID field (when scan_self=true) \n"
+ "(default: 'id', use 'name' for projects)")
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=self.DEFAULTS["loglevel"],
+ help="logging level \n(default: '{}')"
+ .format(self.DEFAULTS["loglevel"]))
+ parser.add_argument("--clear", action="store_true",
+ help="clear all data related to "
+ "the specified environment prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--clear_all", action="store_true",
+ help="clear all data prior to scanning\n"
+ "(default: False)")
+ parser.add_argument("--monitoring_setup_only", action="store_true",
+ help="do only monitoring setup deployment \n"
+ "(default: False)")
+
+ # At most one of these arguments may be present
+ scan_only_group = parser.add_mutually_exclusive_group()
+ scan_only_group.add_argument("--inventory_only", action="store_true",
+ help="do only scan to inventory\n" +
+ "(default: False)")
+ scan_only_group.add_argument("--links_only", action="store_true",
+ help="do only links creation \n" +
+ "(default: False)")
+ scan_only_group.add_argument("--cliques_only", action="store_true",
+ help="do only cliques creation \n" +
+ "(default: False)")
+
+ return parser.parse_args()
+
+ def get_scan_plan(self, args):
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ return self.prepare_scan_plan(ScanPlan(args))
+
+ def prepare_scan_plan(self, plan):
+ # Find out object type if not specified in arguments
+ if not plan.object_type:
+ if not plan.object_id:
+ plan.object_type = "environment"
+ else:
+ # If we scan a specific object, it has to exist in db
+ scanned_object = self.inv.get_by_id(plan.env, plan.object_id)
+ if not scanned_object:
+ exc_msg = "No object found with specified id: '{}'" \
+ .format(plan.object_id)
+ raise ScanArgumentsError(exc_msg)
+ plan.object_type = scanned_object["type"]
+ plan.parent_id = scanned_object["parent_id"]
+ plan.type_to_scan = scanned_object["parent_type"]
+
+ class_module = plan.object_type
+ if not plan.scan_self:
+ plan.scan_self = plan.object_type != "environment"
+
+ plan.object_type = plan.object_type.title().replace("_", "")
+
+ if not plan.scan_self:
+ plan.child_type = None
+ else:
+ plan.child_id = plan.object_id
+ plan.object_id = plan.parent_id
+ if plan.type_to_scan.endswith("_folder"):
+ class_module = plan.child_type + "s_root"
+ else:
+ class_module = plan.type_to_scan
+ plan.object_type = class_module.title().replace("_", "")
+
+ if class_module == "environment":
+ plan.obj = {"id": plan.env}
+ else:
+ # fetch object from inventory
+ obj = self.inv.get_by_id(plan.env, plan.object_id)
+ if not obj:
+ raise ValueError("No match for object ID: {}"
+ .format(plan.object_id))
+ plan.obj = obj
+
+ plan.scanner_type = "Scan" + plan.object_type
+ return plan
+
+ def run(self, args: dict = None):
+ args = setup_args(args, self.DEFAULTS, self.get_args)
+ # After this setup we assume args dictionary has all keys
+ # defined in self.DEFAULTS
+
+ try:
+ MongoAccess.set_config_file(args['mongo_config'])
+ self.inv = InventoryMgr()
+ self.inv.set_collections(args['inventory'])
+ self.conf = Configuration()
+ except FileNotFoundError as e:
+ return False, 'Mongo configuration file not found: {}'\
+ .format(str(e))
+
+ scan_plan = self.get_scan_plan(args)
+ if scan_plan.clear or scan_plan.clear_all:
+ self.inv.clear(scan_plan)
+ self.conf.log.set_loglevel(scan_plan.loglevel)
+
+ env_name = scan_plan.env
+ self.conf.use_env(env_name)
+
+ # generate ScanObject Class and instance.
+ scanner = Scanner()
+ scanner.set_env(env_name)
+
+ # decide what scanning operations to do
+ inventory_only = scan_plan.inventory_only
+ links_only = scan_plan.links_only
+ cliques_only = scan_plan.cliques_only
+ monitoring_setup_only = scan_plan.monitoring_setup_only
+ run_all = False if inventory_only or links_only or cliques_only \
+ or monitoring_setup_only else True
+
+ # setup monitoring server
+ monitoring = \
+ self.inv.is_feature_supported(env_name,
+ EnvironmentFeatures.MONITORING)
+ if monitoring:
+ self.inv.monitoring_setup_manager = \
+ MonitoringSetupManager(env_name)
+ self.inv.monitoring_setup_manager.server_setup()
+
+ # do the actual scanning
+ try:
+ if inventory_only or run_all:
+ scanner.run_scan(
+ scan_plan.scanner_type,
+ scan_plan.obj,
+ scan_plan.id_field,
+ scan_plan.child_id,
+ scan_plan.child_type)
+ if links_only or run_all:
+ scanner.scan_links()
+ if cliques_only or run_all:
+ scanner.scan_cliques()
+ if monitoring:
+ if monitoring_setup_only:
+ self.inv.monitoring_setup_manager.simulate_track_changes()
+ if not (inventory_only or links_only or cliques_only):
+ scanner.deploy_monitoring_setup()
+ except ScanError as e:
+ return False, "scan error: " + str(e)
+ SshConnection.disconnect_all()
+ return True, 'ok'
+
+
+if __name__ == '__main__':
+ scan_manager = ScanController()
+ ret, msg = scan_manager.run()
+ if not ret:
+ scan_manager.log.error(msg)
+ sys.exit(0 if ret else 1)
diff --git a/app/discover/scan_error.py b/app/discover/scan_error.py
new file mode 100644
index 0000000..2e04275
--- /dev/null
+++ b/app/discover/scan_error.py
@@ -0,0 +1,11 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+class ScanError(Exception):
+ pass
diff --git a/app/discover/scan_manager.py b/app/discover/scan_manager.py
new file mode 100644
index 0000000..b6ad782
--- /dev/null
+++ b/app/discover/scan_manager.py
@@ -0,0 +1,294 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+import argparse
+import datetime
+
+import time
+
+import pymongo
+from functools import partial
+
+from discover.manager import Manager
+from utils.constants import ScanStatus, EnvironmentFeatures
+from utils.exceptions import ScanArgumentsError
+from utils.inventory_mgr import InventoryMgr
+from utils.logging.file_logger import FileLogger
+from utils.mongo_access import MongoAccess
+from discover.scan import ScanController
+
+
+class ScanManager(Manager):
+
+ DEFAULTS = {
+ "mongo_config": "",
+ "scans": "scans",
+ "scheduled_scans": "scheduled_scans",
+ "environments": "environments_config",
+ "interval": 1,
+ "loglevel": "INFO"
+ }
+
+ def __init__(self):
+ self.args = self.get_args()
+ super().__init__(log_directory=self.args.log_directory,
+ mongo_config_file=self.args.mongo_config)
+ self.db_client = None
+ self.environments_collection = None
+
+ @staticmethod
+ def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["mongo_config"],
+ help="Name of config file " +
+ "with MongoDB server access details")
+ parser.add_argument("-c", "--scans_collection", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-s", "--scheduled_scans_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["scheduled_scans"],
+ help="Scans collection to read from")
+ parser.add_argument("-e", "--environments_collection", nargs="?",
+ type=str,
+ default=ScanManager.DEFAULTS["environments"],
+ help="Environments collection to update "
+ "after scans")
+ parser.add_argument("-i", "--interval", nargs="?", type=float,
+ default=ScanManager.DEFAULTS["interval"],
+ help="Interval between collection polls"
+ "(must be more than {} seconds)"
+ .format(ScanManager.MIN_INTERVAL))
+ parser.add_argument("-l", "--loglevel", nargs="?", type=str,
+ default=ScanManager.DEFAULTS["loglevel"],
+ help="Logging level \n(default: '{}')"
+ .format(ScanManager.DEFAULTS["loglevel"]))
+ parser.add_argument("-d", "--log_directory", nargs="?", type=str,
+ default=FileLogger.LOG_DIRECTORY,
+ help="File logger directory \n(default: '{}')"
+ .format(FileLogger.LOG_DIRECTORY))
+ args = parser.parse_args()
+ return args
+
+ def configure(self):
+ self.db_client = MongoAccess()
+ self.inv = InventoryMgr()
+ self.inv.set_collections()
+ self.scans_collection = self.db_client.db[self.args.scans_collection]
+ self.scheduled_scans_collection = \
+ self.db_client.db[self.args.scheduled_scans_collection]
+ self.environments_collection = \
+ self.db_client.db[self.args.environments_collection]
+ self._update_document = \
+ partial(MongoAccess.update_document, self.scans_collection)
+ self.interval = max(self.MIN_INTERVAL, self.args.interval)
+ self.log.set_loglevel(self.args.loglevel)
+
+ self.log.info("Started ScanManager with following configuration:\n"
+ "Mongo config file path: {0.args.mongo_config}\n"
+ "Scans collection: {0.scans_collection.name}\n"
+ "Environments collection: "
+ "{0.environments_collection.name}\n"
+ "Polling interval: {0.interval} second(s)"
+ .format(self))
+
+ def _build_scan_args(self, scan_request: dict):
+ args = {
+ 'mongo_config': self.args.mongo_config
+ }
+
+ def set_arg(name_from: str, name_to: str = None):
+ if name_to is None:
+ name_to = name_from
+ val = scan_request.get(name_from)
+ if val:
+ args[name_to] = val
+
+ set_arg("object_id", "id")
+ set_arg("log_level", "loglevel")
+ set_arg("environment", "env")
+ set_arg("scan_only_inventory", "inventory_only")
+ set_arg("scan_only_links", "links_only")
+ set_arg("scan_only_cliques", "cliques_only")
+ set_arg("inventory")
+ set_arg("clear")
+ set_arg("clear_all")
+
+ return args
+
+ def _finalize_scan(self, scan_request: dict, status: ScanStatus,
+ scanned: bool):
+ scan_request['status'] = status.value
+ self._update_document(scan_request)
+ # If no object id is present, it's a full env scan.
+ # We need to update environments collection
+ # to reflect the scan results.
+ if not scan_request.get('id'):
+ self.environments_collection\
+ .update_one(filter={'name': scan_request.get('environment')},
+ update={'$set': {'scanned': scanned}})
+
+ def _fail_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.FAILED, False)
+
+ def _complete_scan(self, scan_request: dict):
+ self._finalize_scan(scan_request, ScanStatus.COMPLETED, True)
+
+ # PyCharm type checker can't reliably check types of document
+ # noinspection PyTypeChecker
+ def _clean_up(self):
+ # Find and fail all running scans
+ running_scans = list(self
+ .scans_collection
+ .find(filter={'status': ScanStatus.RUNNING.value}))
+ self.scans_collection \
+ .update_many(filter={'_id': {'$in': [scan['_id']
+ for scan
+ in running_scans]}},
+ update={'$set': {'status': ScanStatus.FAILED.value}})
+
+ # Find all environments connected to failed full env scans
+ env_scans = [scan['environment']
+ for scan in running_scans
+ if not scan.get('object_id')
+ and scan.get('environment')]
+
+ # Set 'scanned' flag in those envs to false
+ if env_scans:
+ self.environments_collection\
+ .update_many(filter={'name': {'$in': env_scans}},
+ update={'$set': {'scanned': False}})
+
+ INTERVALS = {
+ 'YEARLY': datetime.timedelta(days=365.25),
+ 'MONTHLY': datetime.timedelta(days=365.25/12),
+ 'WEEKLY': datetime.timedelta(weeks=1),
+ 'DAILY': datetime.timedelta(days=1),
+ 'HOURLY': datetime.timedelta(hours=1)
+ }
+
+ def _submit_scan_request_for_schedule(self, scheduled_scan, interval, ts):
+ scans = self.scans_collection
+ new_scan = {
+ 'status': 'submitted',
+ 'log_level': scheduled_scan['log_level'],
+ 'clear': scheduled_scan['clear'],
+ 'scan_only_inventory': scheduled_scan['scan_only_inventory'],
+ 'scan_only_links': scheduled_scan['scan_only_links'],
+ 'scan_only_cliques': scheduled_scan['scan_only_cliques'],
+ 'submit_timestamp': ts,
+ 'environment': scheduled_scan['environment'],
+ 'inventory': 'inventory'
+ }
+ scans.insert_one(new_scan)
+
+ def _set_scheduled_requests_next_run(self, scheduled_scan, interval, ts):
+ scheduled_scan['scheduled_timestamp'] = ts + self.INTERVALS[interval]
+ doc_id = scheduled_scan.pop('_id')
+ self.scheduled_scans_collection.update({'_id': doc_id}, scheduled_scan)
+
+ def _prepare_scheduled_requests_for_interval(self, interval):
+ now = datetime.datetime.utcnow()
+
+ # first, submit a scan request where the scheduled time has come
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$lte': now}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition) \
+ .sort('scheduled_timestamp', pymongo.ASCENDING)
+ for match in matches:
+ self._submit_scan_request_for_schedule(match, interval, now)
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ # now set scheduled time where it was not set yet (new scheduled scans)
+ condition = {'$and': [
+ {'freq': interval},
+ {'scheduled_timestamp': {'$exists': False}}
+ ]}
+ matches = self.scheduled_scans_collection.find(condition)
+ for match in matches:
+ self._set_scheduled_requests_next_run(match, interval, now)
+
+ def _prepare_scheduled_requests(self):
+ # see if any scheduled request is waiting to be submitted
+ for interval in self.INTERVALS.keys():
+ self._prepare_scheduled_requests_for_interval(interval)
+
+ def do_action(self):
+ self._clean_up()
+ try:
+ while True:
+ self._prepare_scheduled_requests()
+
+ # Find a pending request that is waiting the longest time
+ results = self.scans_collection \
+ .find({'status': ScanStatus.PENDING.value,
+ 'submit_timestamp': {'$ne': None}}) \
+ .sort("submit_timestamp", pymongo.ASCENDING) \
+ .limit(1)
+
+ # If no scans are pending, sleep for some time
+ if results.count() == 0:
+ time.sleep(self.interval)
+ else:
+ scan_request = results[0]
+ if not self.inv.is_feature_supported(scan_request.get('environment'),
+ EnvironmentFeatures.SCANNING):
+ self.log.error("Scanning is not supported for env '{}'"
+ .format(scan_request.get('environment')))
+ self._fail_scan(scan_request)
+ continue
+
+ scan_request['start_timestamp'] = datetime.datetime.utcnow()
+ scan_request['status'] = ScanStatus.RUNNING.value
+ self._update_document(scan_request)
+
+ # Prepare scan arguments and run the scan with them
+ try:
+ scan_args = self._build_scan_args(scan_request)
+
+ self.log.info("Starting scan for '{}' environment"
+ .format(scan_args.get('env')))
+ self.log.debug("Scan arguments: {}".format(scan_args))
+ result, message = ScanController().run(scan_args)
+ except ScanArgumentsError as e:
+ self.log.error("Scan request '{id}' "
+ "has invalid arguments. "
+ "Errors:\n{errors}"
+ .format(id=scan_request['_id'],
+ errors=e))
+ self._fail_scan(scan_request)
+ except Exception as e:
+ self.log.exception(e)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ else:
+ # Check is scan returned success
+ if not result:
+ self.log.error(message)
+ self.log.error("Scan request '{}' has failed."
+ .format(scan_request['_id']))
+ self._fail_scan(scan_request)
+ continue
+
+ # update the status and timestamps.
+ self.log.info("Request '{}' has been scanned."
+ .format(scan_request['_id']))
+ end_time = datetime.datetime.utcnow()
+ scan_request['end_timestamp'] = end_time
+ self._complete_scan(scan_request)
+ finally:
+ self._clean_up()
+
+
+if __name__ == "__main__":
+ ScanManager().run()
diff --git a/app/discover/scan_metadata_parser.py b/app/discover/scan_metadata_parser.py
new file mode 100644
index 0000000..df27e18
--- /dev/null
+++ b/app/discover/scan_metadata_parser.py
@@ -0,0 +1,202 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from discover.fetchers.folder_fetcher import FolderFetcher
+from utils.metadata_parser import MetadataParser
+from utils.mongo_access import MongoAccess
+from utils.util import ClassResolver
+
+
+class ScanMetadataParser(MetadataParser):
+
+ SCANNERS_PACKAGE = 'scanners_package'
+ SCANNERS_FILE = 'scanners.json'
+ SCANNERS = 'scanners'
+
+ TYPE = 'type'
+ FETCHER = 'fetcher'
+ CHILDREN_SCANNER = 'children_scanner'
+ ENVIRONMENT_CONDITION = 'environment_condition'
+ OBJECT_ID_TO_USE_IN_CHILD = 'object_id_to_use_in_child'
+
+ COMMENT = '_comment'
+
+ REQUIRED_SCANNER_ATTRIBUTES = [TYPE, FETCHER]
+ ALLOWED_SCANNER_ATTRIBUTES = [TYPE, FETCHER, CHILDREN_SCANNER,
+ ENVIRONMENT_CONDITION,
+ OBJECT_ID_TO_USE_IN_CHILD]
+
+ MECHANISM_DRIVER = 'mechanism_driver'
+
+ def __init__(self, inventory_mgr):
+ super().__init__()
+ self.inv = inventory_mgr
+ self.constants = {}
+
+ def get_required_fields(self):
+ return [self.SCANNERS_PACKAGE, self.SCANNERS]
+
+ def validate_fetcher(self, scanner_name: str, scan_type: dict,
+ type_index: int, package: str):
+ fetcher = scan_type.get(self.FETCHER, '')
+ if not fetcher:
+ self.add_error('missing or empty fetcher in scanner {} type #{}'
+ .format(scanner_name, str(type_index)))
+ elif isinstance(fetcher, str):
+ try:
+ module_name = ClassResolver.get_module_file_by_class_name(fetcher)
+ fetcher_package = module_name.split("_")[0]
+ if package:
+ fetcher_package = ".".join((package, fetcher_package))
+ instance = ClassResolver.get_instance_of_class(package_name=fetcher_package,
+ module_name=module_name,
+ class_name=fetcher)
+ except ValueError:
+ instance = None
+ if not instance:
+ self.add_error('failed to find fetcher class {} in scanner {}'
+ ' type #{}'
+ .format(fetcher, scanner_name, type_index))
+ scan_type[self.FETCHER] = instance
+ elif isinstance(fetcher, dict):
+ is_folder = fetcher.get('folder', False)
+ if not is_folder:
+ self.add_error('scanner {} type #{}: '
+ 'only folder dict accepted in fetcher'
+ .format(scanner_name, type_index))
+ else:
+ instance = FolderFetcher(fetcher['types_name'],
+ fetcher['parent_type'],
+ fetcher.get('text', ''))
+ scan_type[self.FETCHER] = instance
+ else:
+ self.add_error('incorrect type of fetcher for scanner {} type #{}'
+ .format(scanner_name, type_index))
+
+ def validate_children_scanner(self, scanner_name: str, type_index: int,
+ scanners: dict, scan_type: dict):
+ scanner = scanners[scanner_name]
+ if 'children_scanner' in scan_type:
+ children_scanner = scan_type.get('children_scanner')
+ if not isinstance(children_scanner, str):
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner must be a string'
+ .format(scanner_name, type_index))
+ elif children_scanner not in scanners:
+ self.add_error('scanner {} type #{}: '
+ 'children_scanner {} not found '
+ .format(scanner_name, type_index,
+ children_scanner))
+
+ def validate_environment_condition(self, scanner_name: str, type_index: int,
+ scanner: dict):
+ if self.ENVIRONMENT_CONDITION not in scanner:
+ return
+ condition = scanner[self.ENVIRONMENT_CONDITION]
+ if not isinstance(condition, dict):
+ self.add_error('scanner {} type #{}: condition must be dict'
+ .format(scanner_name, str(type_index)))
+ return
+ if self.MECHANISM_DRIVER in condition.keys():
+ drivers = condition[self.MECHANISM_DRIVER]
+ if not isinstance(drivers, list):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ if not all((isinstance(driver, str) for driver in drivers)):
+ self.add_error('scanner {} type #{}: '
+ '{} must be a list of strings'
+ .format(scanner_name, type_index,
+ self.MECHANISM_DRIVER))
+ else:
+ for driver in drivers:
+ self.validate_constant(scanner_name,
+ driver,
+ 'mechanism_drivers',
+ 'mechanism drivers')
+
+ def validate_scanner(self, scanners: dict, name: str, package: str):
+ scanner = scanners.get(name)
+ if not scanner:
+ self.add_error('failed to find scanner: {}')
+ return
+
+ # make sure only allowed attributes are supplied
+ for i in range(0, len(scanner)):
+ scan_type = scanner[i]
+ self.validate_scan_type(scanners, name, i+1, scan_type, package)
+
+ def validate_scan_type(self, scanners: dict, scanner_name: str,
+ type_index: int, scan_type: dict, package: str):
+ # keep previous error count to know if errors were detected here
+ error_count = len(self.errors)
+ # ignore comments
+ scan_type.pop(self.COMMENT, '')
+ for attribute in scan_type.keys():
+ if attribute not in self.ALLOWED_SCANNER_ATTRIBUTES:
+ self.add_error('unknown attribute {} '
+ 'in scanner {}, type #{}'
+ .format(attribute, scanner_name,
+ str(type_index)))
+
+ # make sure required attributes are supplied
+ for attribute in ScanMetadataParser.REQUIRED_SCANNER_ATTRIBUTES:
+ if attribute not in scan_type:
+ self.add_error('scanner {}, type #{}: '
+ 'missing attribute "{}"'
+ .format(scanner_name, str(type_index),
+ attribute))
+ # the following checks depend on previous checks,
+ # so return if previous checks found errors
+ if len(self.errors) > error_count:
+ return
+
+ # type must be valid object type
+ self.validate_constant(scanner_name, scan_type[self.TYPE],
+ 'scan_object_types', 'types')
+ self.validate_fetcher(scanner_name, scan_type, type_index, package)
+ self.validate_children_scanner(scanner_name, type_index, scanners,
+ scan_type)
+ self.validate_environment_condition(scanner_name, type_index,
+ scan_type)
+
+ def get_constants(self, scanner_name, items_desc, constant_type):
+ if not self.constants.get(constant_type):
+ constants = MongoAccess.db['constants']
+ values_list = constants.find_one({'name': constant_type})
+ if not values_list:
+ raise ValueError('scanner {}: '
+ 'could not find {} list in DB'
+ .format(scanner_name, items_desc))
+ self.constants[constant_type] = values_list
+ return self.constants[constant_type]
+
+ def validate_constant(self,
+ scanner_name: str,
+ value_to_check: str,
+ constant_type: str,
+ items_desc: str = None):
+ values_list = self.get_constants(scanner_name, items_desc,
+ constant_type)
+ values = [t['value'] for t in values_list['data']]
+ if value_to_check not in values:
+ self.add_error('scanner {}: value not in {}: {}'
+ .format(scanner_name, items_desc, value_to_check))
+
+ def validate_metadata(self, metadata: dict) -> bool:
+ super().validate_metadata(metadata)
+ scanners = metadata.get(self.SCANNERS, {})
+ package = metadata.get(self.SCANNERS_PACKAGE)
+ if not scanners:
+ self.add_error('no scanners found in scanners list')
+ else:
+ for name in scanners.keys():
+ self.validate_scanner(scanners, name, package)
+ return len(self.errors) == 0
diff --git a/app/discover/scanner.py b/app/discover/scanner.py
new file mode 100644
index 0000000..1b7cd51
--- /dev/null
+++ b/app/discover/scanner.py
@@ -0,0 +1,253 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+# base class for scanners
+
+import json
+import queue
+import os
+import traceback
+
+from discover.clique_finder import CliqueFinder
+from discover.configuration import Configuration
+from discover.fetcher import Fetcher
+from discover.find_links_for_instance_vnics import FindLinksForInstanceVnics
+from discover.find_links_for_oteps import FindLinksForOteps
+from discover.find_links_for_pnics import FindLinksForPnics
+from discover.find_links_for_vconnectors import FindLinksForVconnectors
+from discover.find_links_for_vedges import FindLinksForVedges
+from discover.find_links_for_vservice_vnics import FindLinksForVserviceVnics
+from discover.scan_error import ScanError
+from discover.scan_metadata_parser import ScanMetadataParser
+from utils.constants import EnvironmentFeatures
+from utils.inventory_mgr import InventoryMgr
+from utils.util import ClassResolver
+
+
+class Scanner(Fetcher):
+ config = None
+ environment = None
+ env = None
+ root_patern = None
+ scan_queue = queue.Queue()
+ scan_queue_track = {}
+
+ def __init__(self):
+ """
+ Scanner is the base class for scanners.
+ """
+ super().__init__()
+ self.config = Configuration()
+ self.inv = InventoryMgr()
+ self.scanners_package = None
+ self.scanners = {}
+ self.load_metadata()
+
+ def scan(self, scanner_type, obj, id_field="id",
+ limit_to_child_id=None, limit_to_child_type=None):
+ types_to_fetch = self.get_scanner(scanner_type)
+ types_children = []
+ if not limit_to_child_type:
+ limit_to_child_type = []
+ elif isinstance(limit_to_child_type, str):
+ limit_to_child_type = [limit_to_child_type]
+ try:
+ for t in types_to_fetch:
+ if limit_to_child_type and t["type"] not in limit_to_child_type:
+ continue
+ children = self.scan_type(t, obj, id_field)
+ if limit_to_child_id:
+ children = [c for c in children
+ if c[id_field] == limit_to_child_id]
+ if not children:
+ continue
+ types_children.append({"type": t["type"],
+ "children": children})
+ except ValueError:
+ return False
+ if limit_to_child_id and len(types_children) > 0:
+ t = types_children[0]
+ children = t["children"]
+ return children[0]
+ return obj
+
+ def check_type_env(self, type_to_fetch):
+ # check if type is to be run in this environment
+ if "environment_condition" not in type_to_fetch:
+ return True
+ env_cond = type_to_fetch.get("environment_condition", {})
+ if not env_cond:
+ return True
+ if not isinstance(env_cond, dict):
+ self.log.warn('illegal environment_condition given '
+ 'for type {}'.format(type_to_fetch['type']))
+ return True
+ conf = self.config.get_env_config()
+ for attr, required_val in env_cond.items():
+ if attr == "mechanism_drivers":
+ if "mechanism_drivers" not in conf:
+ self.log.warn('illegal environment configuration: '
+ 'missing mechanism_drivers')
+ return False
+ if not isinstance(required_val, list):
+ required_val = [required_val]
+ return bool(set(required_val) & set(conf["mechanism_drivers"]))
+ elif attr not in conf or conf[attr] != required_val:
+ return False
+ # no check failed
+ return True
+
+ def scan_type(self, type_to_fetch, parent, id_field):
+ # check if type is to be run in this environment
+ if not self.check_type_env(type_to_fetch):
+ return []
+
+ if not parent:
+ obj_id = None
+ else:
+ obj_id = str(parent[id_field])
+ if not obj_id or not obj_id.rstrip():
+ raise ValueError("Object missing " + id_field + " attribute")
+
+ # get Fetcher instance
+ fetcher = type_to_fetch["fetcher"]
+ fetcher.set_env(self.get_env())
+
+ # get children_scanner instance
+ children_scanner = type_to_fetch.get("children_scanner")
+
+ escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
+ self.log.info(
+ "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
+ type_to_fetch["type"],
+ parent.get('type', 'environment'),
+ parent.get('name', ''),
+ escaped_id)
+
+ # fetch OpenStack data from environment by CLI, API or MySQL
+ # or physical devices data from ACI API
+ # It depends on the Fetcher's config.
+ try:
+ db_results = fetcher.get(escaped_id)
+ except Exception as e:
+ self.log.error("Error while scanning : " +
+ "fetcher=%s, " +
+ "type=%s, " +
+ "parent: (type=%s, name=%s, id=%s), " +
+ "error: %s",
+ fetcher.__class__.__name__,
+ type_to_fetch["type"],
+ "environment" if "type" not in parent
+ else parent["type"],
+ "" if "name" not in parent else parent["name"],
+ escaped_id,
+ e)
+ traceback.print_exc()
+ raise ScanError(str(e))
+
+ # format results
+ if isinstance(db_results, dict):
+ results = db_results["rows"] if db_results["rows"] else [db_results]
+ elif isinstance(db_results, str):
+ results = json.loads(db_results)
+ else:
+ results = db_results
+
+ # get child_id_field
+ try:
+ child_id_field = type_to_fetch["object_id_to_use_in_child"]
+ except KeyError:
+ child_id_field = "id"
+
+ environment = self.get_env()
+ children = []
+
+ for o in results:
+ saved = self.inv.save_inventory_object(o,
+ parent=parent,
+ environment=environment,
+ type_to_fetch=type_to_fetch)
+
+ if saved:
+ # add objects into children list.
+ children.append(o)
+
+ # put children scanner into queue
+ if children_scanner:
+ self.queue_for_scan(o, child_id_field, children_scanner)
+ return children
+
+ # scanning queued items, rather than going depth-first (DFS)
+ # this is done to allow collecting all required data for objects
+ # before continuing to next level
+ # for example, get host ID from API os-hypervisors call, so later
+ # we can use this ID in the "os-hypervisors/<ID>/servers" call
+ @staticmethod
+ def queue_for_scan(o, child_id_field, children_scanner):
+ if o["id"] in Scanner.scan_queue_track:
+ return
+ Scanner.scan_queue_track[o["type"] + ";" + o["id"]] = 1
+ Scanner.scan_queue.put({"object": o,
+ "child_id_field": child_id_field,
+ "scanner": children_scanner})
+
+ def run_scan(self, scanner_type, obj, id_field, child_id, child_type):
+ results = self.scan(scanner_type, obj, id_field, child_id, child_type)
+
+ # run children scanner from queue.
+ self.scan_from_queue()
+ return results
+
+ def scan_from_queue(self):
+ while not Scanner.scan_queue.empty():
+ item = Scanner.scan_queue.get()
+ scanner_type = item["scanner"]
+
+ # scan the queued item
+ self.scan(scanner_type, item["object"], item["child_id_field"])
+ self.log.info("Scan complete")
+
+ def scan_links(self):
+ self.log.info("scanning for links")
+ fetchers_implementing_add_links = [
+ FindLinksForPnics(),
+ FindLinksForInstanceVnics(),
+ FindLinksForVserviceVnics(),
+ FindLinksForVconnectors(),
+ FindLinksForVedges(),
+ FindLinksForOteps()
+ ]
+ for fetcher in fetchers_implementing_add_links:
+ fetcher.set_env(self.get_env())
+ fetcher.add_links()
+
+ def scan_cliques(self):
+ clique_scanner = CliqueFinder()
+ clique_scanner.set_env(self.get_env())
+ clique_scanner.find_cliques()
+
+ def deploy_monitoring_setup(self):
+ self.inv.monitoring_setup_manager.handle_pending_setup_changes()
+
+ def load_metadata(self):
+ parser = ScanMetadataParser(self.inv)
+ conf = self.config.get_env_config()
+ scanners_file = os.path.join(conf.get('app_path', '/etc/calipso'),
+ 'config',
+ ScanMetadataParser.SCANNERS_FILE)
+
+ metadata = parser.parse_metadata_file(scanners_file)
+ self.scanners_package = metadata[ScanMetadataParser.SCANNERS_PACKAGE]
+ self.scanners = metadata[ScanMetadataParser.SCANNERS]
+
+ def get_scanner_package(self):
+ return self.scanners_package
+
+ def get_scanner(self, scanner_type: str) -> dict:
+ return self.scanners.get(scanner_type)
diff --git a/app/install/calipso-installer.py b/app/install/calipso-installer.py
new file mode 100644
index 0000000..bccddae
--- /dev/null
+++ b/app/install/calipso-installer.py
@@ -0,0 +1,380 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from pymongo import MongoClient, ReturnDocument
+from pymongo.errors import ConnectionFailure
+from urllib.parse import quote_plus
+import docker
+import argparse
+import dockerpycreds
+# note : not used, useful for docker api security if used
+import time
+import json
+
+
+class MongoComm:
+ # deals with communication from host/installer server to mongoDB, includes methods for future use
+ try:
+
+ def __init__(self, host, user, password, port):
+ self.uri = "mongodb://%s:%s@%s:%s/%s" % (
+ quote_plus(user), quote_plus(password), host, port, "calipso")
+ self.client = MongoClient(self.uri)
+
+ def find(self, coll, key, val):
+ collection = self.client.calipso[coll]
+ doc = collection.find({key: val})
+ return doc
+
+ def get(self, coll, doc_name):
+ collection = self.client.calipso[coll]
+ doc = collection.find_one({"name": doc_name})
+ return doc
+
+ def insert(self, coll, doc):
+ collection = self.client.calipso[coll]
+ doc_id = collection.insert(doc)
+ return doc_id
+
+ def remove_doc(self, coll, doc):
+ collection = self.client.calipso[coll]
+ collection.remove(doc)
+
+ def remove_coll(self, coll):
+ collection = self.client.calipso[coll]
+ collection.remove()
+
+ def find_update(self, coll, key, val, data):
+ collection = self.client.calipso[coll]
+ collection.find_one_and_update(
+ {key: val},
+ {"$set": data},
+ upsert=True
+ )
+
+ def update(self, coll, doc, upsert=False):
+ collection = self.client.calipso[coll]
+ doc_id = collection.update_one({'_id': doc['_id']},{'$set': doc}, upsert=upsert)
+ return doc_id
+
+ except ConnectionFailure:
+ print("MongoDB Server not available")
+
+
+DockerClient = docker.from_env() # using local host docker environment parameters
+
+# use the below example for installer against a remote docker host:
+# DockerClient = docker.DockerClient(base_url='tcp://korlev-calipso-testing.cisco.com:2375')
+
+
+def copy_file(filename):
+ c = MongoComm(args.hostname, args.dbuser, args.dbpassword, args.dbport)
+ txt = open('db/'+filename+'.json')
+ data = json.load(txt)
+ c.remove_coll(filename)
+ doc_id = c.insert(filename, data)
+ print("Copied", filename, "mongo doc_ids:\n\n", doc_id, "\n\n")
+ time.sleep(1)
+
+
+C_MONGO_CONFIG = "/local_dir/calipso_mongo_access.conf"
+H_MONGO_CONFIG = "/home/calipso/calipso_mongo_access.conf"
+PYTHONPATH = "/home/scan/calipso_prod/app"
+C_LDAP_CONFIG = "/local_dir/ldap.conf"
+H_LDAP_CONFIG = "/home/calipso/ldap.conf"
+
+# functions to check and start calipso containers:
+def start_mongo(dbport):
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-mongo"}):
+ print("\nstarting container calipso-mongo, please wait...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:mongo")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:mongo missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:mongo")
+ print("Downloaded", image, "\n\n")
+ mongocontainer = DockerClient.containers.run('korenlev/calipso:mongo', detach=True, name="calipso-mongo",
+ ports={'27017/tcp': dbport, '28017/tcp': 28017},
+ restart_policy={"Name": "always"})
+ # wait a bit till mongoDB is up before starting to copy the json files from 'db' folder:
+ time.sleep(5)
+ enable_copy = input("create initial calipso DB ? (copy json files from 'db' folder to mongoDB -"
+ " 'c' to copy, 'q' to skip):")
+ if enable_copy == "c":
+ print("\nstarting to copy json files to mongoDB...\n\n")
+ print("-----------------------------------------\n\n")
+ time.sleep(1)
+ copy_file("attributes_for_hover_on_data")
+ copy_file("clique_constraints")
+ copy_file("clique_types")
+ copy_file("cliques")
+ copy_file("constants")
+ copy_file("environments_config")
+ copy_file("inventory")
+ copy_file("link_types")
+ copy_file("links")
+ copy_file("messages")
+ copy_file("meteor_accounts_loginServiceConfiguration")
+ copy_file("users")
+ copy_file("monitoring_config")
+ copy_file("monitoring_config_templates")
+ copy_file("network_agent_types")
+ copy_file("roles")
+ copy_file("scans")
+ copy_file("scheduled_scans")
+ copy_file("statistics")
+ copy_file("supported_environments")
+
+ # note : 'messages', 'roles', 'users' and some of the 'constants' are filled by calipso-ui at runtime
+ # some other docs are filled later by scanning, logging and monitoring
+ else:
+ return
+ else:
+ print("container named calipso-mongo already exists, please deal with it using docker...\n")
+ return
+
+
+def start_listen():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-listen"}):
+ print("\nstarting container calipso-listen...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:listen")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:listen missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:listen")
+ print("Downloaded", image, "\n\n")
+ listencontainer = DockerClient.containers.run('korenlev/calipso:listen', detach=True, name="calipso-listen",
+ ports={'22/tcp': 50022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG],
+ volumes={'/home/calipso': {'bind': '/local_dir', 'mode': 'rw'}})
+ else:
+ print("container named calipso-listen already exists, please deal with it using docker...\n")
+ return
+
+
+def start_ldap():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-ldap"}):
+ print("\nstarting container calipso-ldap...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:ldap")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:ldap missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:ldap")
+ print("Downloaded", image, "\n\n")
+ ldapcontainer = DockerClient.containers.run('korenlev/calipso:ldap', detach=True, name="calipso-ldap",
+ ports={'389/tcp': 389, '389/udp': 389},
+ restart_policy={"Name": "always"},
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-ldap already exists, please deal with it using docker...\n")
+ return
+
+
+def start_api():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-api"}):
+ print("\nstarting container calipso-api...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:api")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:api missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:api")
+ print("Downloaded", image, "\n\n")
+ apicontainer = DockerClient.containers.run('korenlev/calipso:api', detach=True, name="calipso-api",
+ ports={'8000/tcp': 8000, '22/tcp': 40022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG,
+ "LDAP_CONFIG=" + C_LDAP_CONFIG,
+ "LOG_LEVEL=DEBUG"],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-api already exists, please deal with it using docker...\n")
+ return
+
+
+def start_scan():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-scan"}):
+ print("\nstarting container calipso-scan...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:scan")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:scan missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:scan")
+ print("Downloaded", image, "\n\n")
+ scancontainer = DockerClient.containers.run('korenlev/calipso:scan', detach=True, name="calipso-scan",
+ ports={'22/tcp': 30022},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH,
+ "MONGO_CONFIG=" + C_MONGO_CONFIG],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-scan already exists, please deal with it using docker...\n")
+ return
+
+
+def start_sensu():
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-sensu"}):
+ print("\nstarting container calipso-sensu...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:sensu")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:sensu missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:sensu")
+ print("Downloaded", image, "\n\n")
+ sensucontainer = DockerClient.containers.run('korenlev/calipso:sensu', detach=True, name="calipso-sensu",
+ ports={'22/tcp': 20022, '3000/tcp': 3000, '4567/tcp': 4567,
+ '5671/tcp': 5671, '15672/tcp': 15672},
+ restart_policy={"Name": "always"},
+ environment=["PYTHONPATH=" + PYTHONPATH],
+ volumes={'/home/calipso/': {'bind': '/local_dir/', 'mode': 'rw'}})
+ else:
+ print("container named calipso-sensu already exists, please deal with it using docker...\n")
+ return
+
+
+def start_ui(host, dbuser, dbpassword, webport, dbport):
+ if not DockerClient.containers.list(all=True, filters={"name": "calipso-ui"}):
+ print("\nstarting container calipso-ui...\n")
+ image = DockerClient.images.list(all=True, name="korenlev/calipso:ui")
+ if image:
+ print(image, "exists...not downloading...")
+ else:
+ print("image korenlev/calipso:ui missing, hold on while downloading first...\n")
+ image = DockerClient.images.pull("korenlev/calipso:ui")
+ print("Downloaded", image, "\n\n")
+ uicontainer = DockerClient.containers.run('korenlev/calipso:ui', detach=True, name="calipso-ui",
+ ports={'3000/tcp': webport},
+ restart_policy={"Name": "always"},
+ environment=["ROOT_URL=http://{}:{}".format(host, str(webport)),
+ "MONGO_URL=mongodb://{}:{}@{}:{}/calipso".format
+ (dbuser, dbpassword, host, str(dbport)),
+ "LDAP_CONFIG=" + C_LDAP_CONFIG])
+ else:
+ print("container named calipso-ui already exists, please deal with it using docker...\n")
+ return
+
+
+# function to check and stop calipso containers:
+
+def container_stop(container_name):
+ if DockerClient.containers.list(all=True, filters={"name": container_name}):
+ print("fetching container name", container_name, "...\n")
+ c = DockerClient.containers.get(container_name)
+ if c.status != "running":
+ print(container_name, "is not running...")
+ time.sleep(1)
+ print("removing container name", c.name, "...\n")
+ c.remove()
+ else:
+ print("killing container name", c.name, "...\n")
+ c.kill()
+ time.sleep(1)
+ print("removing container name", c.name, "...\n")
+ c.remove()
+ else:
+ print("no container named", container_name, "found...")
+
+
+# parser for getting optional command arguments:
+parser = argparse.ArgumentParser()
+parser.add_argument("--hostname", help="Hostname or IP address of the server (default=172.17.0.1)",type=str,
+ default="172.17.0.1", required=False)
+parser.add_argument("--webport", help="Port for the Calipso WebUI (default=80)",type=int,
+ default="80", required=False)
+parser.add_argument("--dbport", help="Port for the Calipso MongoDB (default=27017)",type=int,
+ default="27017", required=False)
+parser.add_argument("--dbuser", help="User for the Calipso MongoDB (default=calipso)",type=str,
+ default="calipso", required=False)
+parser.add_argument("--dbpassword", help="Password for the Calipso MongoDB (default=calipso_default)",type=str,
+ default="calipso_default", required=False)
+args = parser.parse_args()
+
+container = ""
+action = ""
+container_names = ["all", "calipso-mongo", "calipso-scan", "calipso-listen", "calipso-ldap", "calipso-api",
+ "calipso-sensu", "calipso-ui"]
+container_actions = ["stop", "start"]
+while action not in container_actions:
+ action = input("Action? (stop, start, or 'q' to quit):\n")
+ if action == "q":
+ exit()
+while container not in container_names:
+ container = input("Container? (all, calipso-mongo, calipso-scan, calipso-listen, calipso-ldap, calipso-api, "
+ "calipso-sensu, calipso-ui or 'q' to quit):\n")
+ if container == "q":
+ exit()
+
+# starting the containers per arguments:
+if action == "start":
+ # building /home/calipso/calipso_mongo_access.conf and /home/calipso/ldap.conf files, per the arguments:
+ calipso_mongo_access_text = "server " + args.hostname + "\nuser " + args.dbuser + "\npassword " + \
+ args.dbpassword + "\nauth_db calipso"
+ ldap_text = "user admin" + "\npassword password" + "\nurl ldap://" + args.hostname + ":389" + \
+ "\nuser_id_attribute CN" + "\nuser_pass_attribute userpassword" + \
+ "\nuser_objectclass inetOrgPerson" + \
+ "\nuser_tree_dn OU=Users,DC=openstack,DC=org" + "\nquery_scope one" + \
+ "\ntls_req_cert allow" + \
+ "\ngroup_member_attribute member"
+ print("creating default", H_MONGO_CONFIG, "file...\n")
+ calipso_mongo_access_file = open(H_MONGO_CONFIG, "w+")
+ time.sleep(1)
+ calipso_mongo_access_file.write(calipso_mongo_access_text)
+ calipso_mongo_access_file.close()
+ print("creating default", H_LDAP_CONFIG, "file...\n")
+ ldap_file = open(H_LDAP_CONFIG, "w+")
+ time.sleep(1)
+ ldap_file.write(ldap_text)
+ ldap_file.close()
+
+ if container == "calipso-mongo" or container == "all":
+ start_mongo(args.dbport)
+ time.sleep(1)
+ if container == "calipso-listen" or container == "all":
+ start_listen()
+ time.sleep(1)
+ if container == "calipso-ldap" or container == "all":
+ start_ldap()
+ time.sleep(1)
+ if container == "calipso-api" or container == "all":
+ start_api()
+ time.sleep(1)
+ if container == "calipso-scan" or container == "all":
+ start_scan()
+ time.sleep(1)
+ if container == "calipso-sensu" or container == "all":
+ start_sensu()
+ time.sleep(1)
+ if container == "calipso-ui" or container == "all":
+ start_ui(args.hostname, args.dbuser, args.dbpassword, args.webport, args.dbport)
+ time.sleep(1)
+
+# stopping the containers per arguments:
+if action == "stop":
+ if container == "calipso-mongo" or container == "all":
+ container_stop("calipso-mongo")
+ if container == "calipso-listen" or container == "all":
+ container_stop("calipso-listen")
+ if container == "calipso-ldap" or container == "all":
+ container_stop("calipso-ldap")
+ if container == "calipso-api" or container == "all":
+ container_stop("calipso-api")
+ if container == "calipso-scan" or container == "all":
+ container_stop("calipso-scan")
+ if container == "calipso-sensu" or container == "all":
+ container_stop("calipso-sensu")
+ if container == "calipso-ui" or container == "all":
+ container_stop("calipso-ui")
diff --git a/app/install/calipso_mongo_access.conf.example b/app/install/calipso_mongo_access.conf.example
new file mode 100644
index 0000000..1b3377d
--- /dev/null
+++ b/app/install/calipso_mongo_access.conf.example
@@ -0,0 +1,4 @@
+server korlev-calipso-dev.cisco.com
+user calipso
+password calipso_default
+auth_db calipso
diff --git a/app/install/db/attributes_for_hover_on_data.json b/app/install/db/attributes_for_hover_on_data.json
new file mode 100644
index 0000000..6fdc5a3
--- /dev/null
+++ b/app/install/db/attributes_for_hover_on_data.json
@@ -0,0 +1,89 @@
+[
+{
+ "attributes" : [
+ "object_name",
+ "model",
+ "mac_address",
+ "type",
+ "koren"
+ ],
+ "type" : "vnic"
+},
+{
+ "attributes" : [
+ "object_name",
+ "connector_type",
+ "type",
+ "interfaces"
+ ],
+ "type" : "vconnector"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "service_type",
+ "type"
+ ],
+ "type" : "vservice"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "agent_type",
+ "binary",
+ "type"
+ ],
+ "type" : "vedge"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "mac_address",
+ "Speed",
+ "Link detected",
+ "type"
+ ],
+ "type" : "pnic"
+},
+{
+ "attributes" : [
+ "object_name",
+ "provider:segmentation_id",
+ "provider:network_type",
+ "type"
+ ],
+ "type" : "network"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host_type",
+ "parent_id",
+ "type"
+ ],
+ "type" : "host"
+},
+{
+ "attributes" : [
+ "object_name",
+ "host",
+ "project",
+ "type",
+ "name_path"
+ ],
+ "type" : "instance"
+},
+{
+ "attributes" : [
+ "object_name",
+ "overlay_type",
+ "ip_address",
+ "type",
+ "ports"
+ ],
+ "type" : "otep"
+}
+]
diff --git a/app/install/db/clique_constraints.json b/app/install/db/clique_constraints.json
new file mode 100644
index 0000000..e317c3d
--- /dev/null
+++ b/app/install/db/clique_constraints.json
@@ -0,0 +1,20 @@
+[
+{
+ "focal_point_type" : "instance",
+ "constraints" : [
+ "network"
+ ]
+},
+{
+ "focal_point_type" : "vservice",
+ "constraints" : [
+ "network"
+ ]
+},
+{
+ "constraints" : [
+ "network"
+ ],
+ "focal_point_type" : "network"
+}
+]
diff --git a/app/install/db/clique_types.json b/app/install/db/clique_types.json
new file mode 100644
index 0000000..a2ef3c2
--- /dev/null
+++ b/app/install/db/clique_types.json
@@ -0,0 +1,56 @@
+[
+{
+ "environment" : "ANY",
+ "focal_point_type" : "instance",
+ "link_types" : [
+ "instance-vnic",
+ "vnic-vconnector",
+ "vconnector-vedge",
+ "vedge-otep",
+ "otep-vconnector",
+ "vconnector-pnic",
+ "pnic-network"
+ ],
+ "name" : "instance"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "pnic",
+ "link_types" : [
+ "pnic-host",
+ "host-network",
+ "network-switch_pnic",
+ "switch_pnic-switch"
+ ],
+ "name" : "pnic_clique"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "vservice",
+ "link_types" : [
+ "vservice-vnic",
+ "vnic-vedge",
+ "vedge-otep",
+ "otep-vconnector",
+ "vconnector-pnic",
+ "pnic-network"
+ ],
+ "name" : "vservice"
+},
+{
+ "environment" : "ANY",
+ "focal_point_type" : "network",
+ "link_types" : [
+ "network-pnic",
+ "pnic-vconnector",
+ "vconnector-otep",
+ "otep-vedge",
+ "vedge-vconnector",
+ "vedge-vnic",
+ "vconnector-vnic",
+ "vnic-instance",
+ "vnic-vservice"
+ ],
+ "name" : "network"
+}
+]
diff --git a/app/install/db/cliques.json b/app/install/db/cliques.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/cliques.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/constants.json b/app/install/db/constants.json
new file mode 100644
index 0000000..0521d69
--- /dev/null
+++ b/app/install/db/constants.json
@@ -0,0 +1,668 @@
+[
+{
+ "data" : [
+ {
+ "label" : "network",
+ "value" : "network"
+ }
+ ],
+ "name" : "constraints"
+},
+{
+ "data" : [
+ {
+ "label" : "Development",
+ "value" : "development"
+ },
+ {
+ "label" : "Testing",
+ "value" : "testing"
+ },
+ {
+ "label" : "Staging",
+ "value" : "staging"
+ },
+ {
+ "label" : "Production",
+ "value" : "production"
+ }
+ ],
+ "name" : "env_types"
+},
+{
+ "data" : [
+ {
+ "label" : "CRITICAL",
+ "value" : "critical"
+ },
+ {
+ "label" : "ERROR",
+ "value" : "error"
+ },
+ {
+ "label" : "WARNING",
+ "value" : "warning"
+ },
+ {
+ "label" : "INFO",
+ "value" : "info"
+ },
+ {
+ "label" : "DEBUG",
+ "value" : "debug"
+ },
+ {
+ "label" : "NOTSET",
+ "value" : "notset"
+ }
+ ],
+ "name" : "log_levels"
+},
+{
+ "data" : [
+ {
+ "label" : "OVS",
+ "value" : "OVS"
+ },
+ {
+ "label" : "VPP",
+ "value" : "VPP"
+ },
+ {
+ "label" : "LXB",
+ "value" : "LXB"
+ },
+ {
+ "label" : "Arista",
+ "value" : "Arista"
+ },
+ {
+ "label" : "Nexus",
+ "value" : "Nexus"
+ }
+ ],
+ "name" : "mechanism_drivers"
+},
+{
+ "data" : [
+ {
+ "label" : "local",
+ "value" : "local"
+ },
+ {
+ "label" : "vlan",
+ "value" : "vlan"
+ },
+ {
+ "label" : "vxlan",
+ "value" : "vxlan"
+ },
+ {
+ "label" : "gre",
+ "value" : "gre"
+ },
+ {
+ "label" : "flat",
+ "value" : "flat"
+ }
+ ],
+ "name" : "type_drivers"
+},
+{
+ "data" : [
+ {
+ "label" : "Sensu",
+ "value" : "Sensu"
+ }
+ ],
+ "name" : "environment_monitoring_types"
+},
+{
+ "data" : [
+ {
+ "label" : "up",
+ "value" : "up"
+ },
+ {
+ "label" : "down",
+ "value" : "down"
+ }
+ ],
+ "name" : "link_states"
+},
+{
+ "name" : "environment_provision_types",
+ "data" : [
+ {
+ "label" : "None",
+ "value" : "None"
+ },
+ {
+ "label" : "Deploy",
+ "value" : "Deploy"
+ },
+ {
+ "label" : "Files",
+ "value" : "Files"
+ },
+ {
+ "label" : "DB",
+ "value" : "DB"
+ }
+ ]
+},
+{
+ "name" : "environment_operational_status",
+ "data" : [
+ {
+ "value" : "stopped",
+ "label" : "stopped"
+ },
+ {
+ "value" : "running",
+ "label" : "running"
+ },
+ {
+ "value" : "error",
+ "label" : "error"
+ }
+ ]
+},
+{
+ "name" : "link_types",
+ "data" : [
+ {
+ "label" : "instance-vnic",
+ "value" : "instance-vnic"
+ },
+ {
+ "label" : "otep-vconnector",
+ "value" : "otep-vconnector"
+ },
+ {
+ "label" : "otep-pnic",
+ "value" : "otep-pnic"
+ },
+ {
+ "label" : "pnic-network",
+ "value" : "pnic-network"
+ },
+ {
+ "label" : "vedge-otep",
+ "value" : "vedge-otep"
+ },
+ {
+ "label" : "vnic-vconnector",
+ "value" : "vnic-vconnector"
+ },
+ {
+ "label" : "vconnector-pnic",
+ "value" : "vconnector-pnic"
+ },
+ {
+ "label" : "vnic-vedge",
+ "value" : "vnic-vedge"
+ },
+ {
+ "label" : "vconnector-vedge",
+ "value" : "vconnector-vedge"
+ },
+ {
+ "label" : "vedge-pnic",
+ "value" : "vedge-pnic"
+ },
+ {
+ "label" : "vservice-vnic",
+ "value" : "vservice-vnic"
+ },
+ {
+ "label" : "pnic-host",
+ "value" : "pnic-host"
+ },
+ {
+ "label" : "host-pnic",
+ "value" : "host-pnic"
+ },
+ {
+ "label" : "host-network",
+ "value" : "host-network"
+ },
+ {
+ "label" : "network-host",
+ "value" : "network-host"
+ },
+ {
+ "label" : "switch_pnic-network",
+ "value" : "switch_pnic-network"
+ },
+ {
+ "label" : "network-switch_pnic",
+ "value" : "network-switch_pnic"
+ },
+ {
+ "label" : "switch_pnic-switch",
+ "value" : "switch_pnic-switch"
+ },
+ {
+ "label" : "switch-switch_pnic",
+ "value" : "switch-switch_pnic"
+ }
+ ]
+},
+{
+ "name" : "monitoring_sides",
+ "data" : [
+ {
+ "label" : "client",
+ "value" : "client"
+ },
+ {
+ "label" : "server",
+ "value" : "server"
+ }
+ ]
+},
+{
+ "name" : "messages_severity",
+ "data" : [
+ {
+ "label" : "panic",
+ "value" : "panic"
+ },
+ {
+ "label" : "alert",
+ "value" : "alert"
+ },
+ {
+ "label" : "crit",
+ "value" : "crit"
+ },
+ {
+ "label" : "error",
+ "value" : "error"
+ },
+ {
+ "label" : "warn",
+ "value" : "warn"
+ },
+ {
+ "label" : "notice",
+ "value" : "notice"
+ },
+ {
+ "label" : "info",
+ "value" : "info"
+ },
+ {
+ "label" : "debug",
+ "value" : "debug"
+ }
+ ]
+},
+{
+ "name" : "object_types",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "label" : "host",
+ "value" : "host"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+},
+{
+ "name" : "scans_statuses",
+ "data" : [
+ {
+ "value" : "draft",
+ "label" : "Draft"
+ },
+ {
+ "value" : "pending",
+ "label" : "Pending"
+ },
+ {
+ "value" : "running",
+ "label" : "Running"
+ },
+ {
+ "value" : "completed",
+ "label" : "Completed"
+ },
+ {
+ "value" : "failed",
+ "label" : "Failed"
+ },
+ {
+ "value" : "aborted",
+ "label" : "Aborted"
+ }
+ ]
+},
+{
+ "data" : [
+ {
+ "label" : "Mirantis-6.0",
+ "value" : "Mirantis-6.0"
+ },
+ {
+ "label" : "Mirantis-7.0",
+ "value" : "Mirantis-7.0"
+ },
+ {
+ "label" : "Mirantis-8.0",
+ "value" : "Mirantis-8.0"
+ },
+ {
+ "label" : "Mirantis-9.0",
+ "value" : "Mirantis-9.0"
+ },
+ {
+ "label" : "RDO-Mitaka",
+ "value" : "RDO-Mitaka"
+ },
+ {
+ "label" : "RDO-Liberty",
+ "value" : "RDO-Liberty"
+ },
+ {
+ "label" : "RDO-Juno",
+ "value" : "RDO-Juno"
+ },
+ {
+ "label" : "RDO-kilo",
+ "value" : "RDO-kilo"
+ },
+ {
+ "label" : "devstack-liberty",
+ "value" : "devstack-liberty"
+ },
+ {
+ "label" : "Canonical-icehouse",
+ "value" : "Canonical-icehouse"
+ },
+ {
+ "label" : "Canonical-juno",
+ "value" : "Canonical-juno"
+ },
+ {
+ "label" : "Canonical-liberty",
+ "value" : "Canonical-liberty"
+ },
+ {
+ "label" : "Canonical-mitaka",
+ "value" : "Canonical-mitaka"
+ },
+ {
+ "label" : "Apex-Mitaka",
+ "value" : "Apex-Mitaka"
+ },
+ {
+ "label" : "Devstack-Mitaka",
+ "value" : "Devstack-Mitaka"
+ },
+ {
+ "label" : "packstack-7.0.0-0.10.dev1682",
+ "value" : "packstack-7.0.0-0.10.dev1682"
+ },
+ {
+ "label" : "Stratoscale-v2.1.6",
+ "value" : "Stratoscale-v2.1.6"
+ },
+ {
+ "label" : "Mirantis-9.1",
+ "value" : "Mirantis-9.1"
+ }
+ ],
+ "name" : "distributions"
+},
+{
+ "name" : "message_source_systems",
+ "data" : [
+ {
+ "value" : "OpenStack",
+ "label" : "OpenStack"
+ },
+ {
+ "value" : "Calipso",
+ "label" : "Calipso"
+ },
+ {
+ "value" : "Sensu",
+ "label" : "Sensu"
+ }
+ ]
+},
+{
+ "name" : "object_types_for_links",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "label" : "host",
+ "value" : "host"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+},
+{
+ "name" : "scan_object_types",
+ "data" : [
+ {
+ "label" : "vnic",
+ "value" : "vnic"
+ },
+ {
+ "label" : "vconnector",
+ "value" : "vconnector"
+ },
+ {
+ "label" : "vedge",
+ "value" : "vedge"
+ },
+ {
+ "label" : "instance",
+ "value" : "instance"
+ },
+ {
+ "label" : "vservice",
+ "value" : "vservice"
+ },
+ {
+ "label" : "pnic",
+ "value" : "pnic"
+ },
+ {
+ "label" : "network",
+ "value" : "network"
+ },
+ {
+ "label" : "port",
+ "value" : "port"
+ },
+ {
+ "label" : "otep",
+ "value" : "otep"
+ },
+ {
+ "label" : "agent",
+ "value" : "agent"
+ },
+ {
+ "value" : "availability_zone",
+ "label" : "availability_zone"
+ },
+ {
+ "value" : "regions_folder",
+ "label" : "regions_folder"
+ },
+ {
+ "value" : "instances_folder",
+ "label" : "instances_folder"
+ },
+ {
+ "value" : "pnics_folder",
+ "label" : "pnics_folder"
+ },
+ {
+ "value" : "vconnectors_folder",
+ "label" : "vconnectors_folder"
+ },
+ {
+ "value" : "vedges_folder",
+ "label" : "vedges_folder"
+ },
+ {
+ "value" : "ports_folder",
+ "label" : "ports_folder"
+ },
+ {
+ "value" : "aggregates_folder",
+ "label" : "aggregates_folder"
+ },
+ {
+ "value" : "vservices_folder",
+ "label" : "vservices_folder"
+ },
+ {
+ "value" : "vnics_folder",
+ "label" : "vnics_folder"
+ },
+ {
+ "value" : "network_agent",
+ "label" : "network_agent"
+ },
+ {
+ "value" : "project",
+ "label" : "project"
+ },
+ {
+ "value" : "projects_folder",
+ "label" : "projects_folder"
+ },
+ {
+ "value" : "aggregate",
+ "label" : "aggregate"
+ },
+ {
+ "value" : "network_agents_folder",
+ "label" : "network_agents_folder"
+ },
+ {
+ "value" : "host",
+ "label" : "host"
+ },
+ {
+ "value" : "region",
+ "label" : "region"
+ },
+ {
+ "value" : "host_ref",
+ "label" : "host_ref"
+ },
+ {
+ "value" : "network_services_folder",
+ "label" : "network_services_folder"
+ },
+ {
+ "label" : "switch_pnic",
+ "value" : "switch_pnic"
+ },
+ {
+ "label" : "switch",
+ "value" : "switch"
+ }
+ ]
+}
+]
diff --git a/app/install/db/environments_config.json b/app/install/db/environments_config.json
new file mode 100644
index 0000000..9e05687
--- /dev/null
+++ b/app/install/db/environments_config.json
@@ -0,0 +1,78 @@
+[
+{
+ "operational" : "stopped",
+ "listen" : true,
+ "configuration" : [
+ {
+ "name" : "OpenStack",
+ "admin_token" : "sadgsgsagsa",
+ "user" : "adminuser",
+ "port" : 5000,
+ "pwd" : "saggsgsasg",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "mysql",
+ "password" : "sgasdggddsgsd",
+ "port" : 3307,
+ "user" : "mysqluser",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "CLI",
+ "user" : "sshuser",
+ "pwd" : "sagsagsagsa",
+ "host" : "10.0.0.1"
+ },
+ {
+ "name" : "AMQP",
+ "password" : "sagssdgassgd",
+ "port" : 5673,
+ "user" : "rabbitmquser",
+ "host" : "10.0.0.1"
+ },
+ {
+ "rabbitmq_port" : 5671,
+ "ssh_user" : "root",
+ "server_name" : "sensu_server",
+ "env_type" : "production",
+ "provision" : "None",
+ "name" : "Monitoring",
+ "ssh_port" : 20022,
+ "rabbitmq_pass" : "sagsagss",
+ "ssh_password" : "calipsoasgsagdg",
+ "rabbitmq_user" : "sensu",
+ "config_folder" : "/local_dir/sensu_config",
+ "type" : "Sensu",
+ "server_ip" : "10.0.0.1",
+ "api_port" : 4567
+ },
+ {
+ "name" : "ACI",
+ "user" : "admin",
+ "pwd" : "Cisco123456",
+ "host" : "10.1.1.104"
+ }
+ ],
+ "enable_monitoring" : true,
+ "name" : "DEMO-ENVIRONMENT-SCHEME",
+ "distribution" : "Mirantis-8.0",
+ "last_scanned" : "filled-by-scanning",
+ "app_path" : "/home/scan/calipso_prod/app",
+ "scanned" : false,
+ "type_drivers" : "vxlan",
+ "mechanism_drivers" : [
+ "OVS"
+ ],
+ "user" : "wNLeBJxNDyw8G7Ssg",
+ "auth" : {
+ "edit-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ],
+ "view-env" : [
+ "wNLeBJxNDyw8G7Ssg"
+ ]
+ },
+ "type" : "environment"
+}
+]
diff --git a/app/install/db/inventory.json b/app/install/db/inventory.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/inventory.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/link_types.json b/app/install/db/link_types.json
new file mode 100644
index 0000000..30a610c
--- /dev/null
+++ b/app/install/db/link_types.json
@@ -0,0 +1,184 @@
+[
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "instance-vnic",
+ "endPointA" : "instance",
+ "endPointB" : "vnic",
+ "type" : "instance-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vconnector",
+ "endPointA" : "vnic",
+ "endPointB" : "vconnector",
+ "type" : "vnic-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-vedge",
+ "endPointA" : "vconnector",
+ "endPointB" : "vedge",
+ "type" : "vconnector-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-otep",
+ "endPointA" : "vedge",
+ "endPointB" : "otep",
+ "type" : "vedge-otep"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-pnic",
+ "endPointA" : "vconnector",
+ "endPointB" : "pnic",
+ "type" : "vconnector-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-network",
+ "endPointA" : "pnic",
+ "endPointB" : "network",
+ "type" : "pnic-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "otep-vconnector",
+ "endPointA" : "otep",
+ "endPointB" : "vconnector",
+ "type" : "otep-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vedge",
+ "endPointA" : "vnic",
+ "endPointB" : "vedge",
+ "type" : "vnic-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-pnic",
+ "endPointA" : "network",
+ "endPointB" : "pnic",
+ "type" : "network-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-vconnector",
+ "endPointA" : "vedge",
+ "endPointB" : "vconnector",
+ "type" : "vedge-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-vnic",
+ "endPointA" : "vconnector",
+ "endPointB" : "vnic",
+ "type" : "vconnector-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-instance",
+ "endPointA" : "vnic",
+ "endPointB" : "instance",
+ "type" : "vnic-instance"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vnic-vservice",
+ "endPointA" : "vnic",
+ "endPointB" : "vservice",
+ "type" : "vnic-vservice"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vservice-vnic",
+ "endPointA" : "vservice",
+ "endPointB" : "vnic",
+ "type" : "vservice-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-vconnector",
+ "endPointA" : "pnic",
+ "endPointB" : "vconnector",
+ "type" : "pnic-vconnector"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vconnector-otep",
+ "endPointA" : "vconnector",
+ "endPointB" : "otep",
+ "type" : "vconnector-otep"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "otep-vedge",
+ "endPointA" : "otep",
+ "endPointB" : "vedge",
+ "type" : "otep-vedge"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "vedge-vnic",
+ "endPointA" : "vedge",
+ "endPointB" : "vnic",
+ "type" : "vedge-vnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "pnic-host",
+ "endPointA" : "pnic",
+ "endPointB" : "host",
+ "type" : "pnic-host"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "host-pnic",
+ "endPointA" : "host",
+ "endPointB" : "pnic",
+ "type" : "host-pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "host-network",
+ "endPointA" : "host",
+ "endPointB" : "network",
+ "type" : "host-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-host",
+ "endPointA" : "network",
+ "endPointB" : "host",
+ "type" : "network-host"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "network-switch_pnic",
+ "endPointA" : "network",
+ "endPointB" : "switch_pnic",
+ "type" : "network-switch_pnic"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch_pnic-network",
+ "endPointA" : "switch_pnic",
+ "endPointB" : "network",
+ "type" : "switch_pnic-network"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch_pnic-switch",
+ "endPointA" : "switch_pnic",
+ "endPointB" : "switch",
+ "type" : "switch_pnic-switch"
+},
+{
+ "user_id" : "WS7j8oTbWPf3LbNne",
+ "description" : "switch-switch_pnic",
+ "endPointA" : "switch",
+ "endPointB" : "switch_pnic",
+ "type" : "switch-switch_pnic"
+}
+]
diff --git a/app/install/db/links.json b/app/install/db/links.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/links.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/messages.json b/app/install/db/messages.json
new file mode 100644
index 0000000..b6c27cc
--- /dev/null
+++ b/app/install/db/messages.json
@@ -0,0 +1,44 @@
+[
+{
+ "message" : "2017-07-03 09:16:47,563 INFO: scanning : type=vnic, parent: (type=vnics_folder, name=vNICs, id=72bda9ec-2d0d-424c-8558-88ec22f09c95-vnics)",
+ "related_object" : null,
+ "received_timestamp" : null,
+ "finished_timestamp" : null,
+ "level" : "info",
+ "display_context" : null,
+ "viewed" : false,
+ "id" : "17350.33407.563988",
+ "source_system" : "CALIPSO",
+ "related_object_type" : null,
+ "timestamp" : "2017-07-03T09:16:47.563988",
+ "environment" : null
+},
+{
+ "related_object_type" : null,
+ "finished_timestamp" : null,
+ "environment" : null,
+ "received_timestamp" : null,
+ "related_object" : null,
+ "source_system" : "CALIPSO",
+ "message" : "2017-07-04 13:38:41,431 INFO: Started EventManager with following configuration:\nMongo config file path: /local_dir/calipso_mongo_access.conf\nCollection: environments_config\nPolling interval: 5 second(s)",
+ "timestamp" : "2017-07-04T13:38:41.431470",
+ "id" : "17351.49121.431470",
+ "level" : "info",
+ "viewed" : false,
+ "display_context" : null
+},
+{
+ "display_context" : null,
+ "level" : "info",
+ "id" : "17351.49126.596498",
+ "environment" : null,
+ "finished_timestamp" : null,
+ "viewed" : false,
+ "message" : "2017-07-04 13:38:46,596 INFO: Started ScanManager with following configuration:\nMongo config file path: /local_dir/calipso_mongo_access.conf\nScans collection: scans\nEnvironments collection: environments_config\nPolling interval: 1 second(s)",
+ "timestamp" : "2017-07-04T13:38:46.596498",
+ "related_object" : null,
+ "received_timestamp" : null,
+ "related_object_type" : null,
+ "source_system" : "CALIPSO"
+}
+]
diff --git a/app/install/db/meteor_accounts_loginServiceConfiguration.json b/app/install/db/meteor_accounts_loginServiceConfiguration.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/meteor_accounts_loginServiceConfiguration.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/monitoring_config.json b/app/install/db/monitoring_config.json
new file mode 100644
index 0000000..be99137
--- /dev/null
+++ b/app/install/db/monitoring_config.json
@@ -0,0 +1,3 @@
+{
+ "_id" : "xyz"
+}
diff --git a/app/install/db/monitoring_config_templates.json b/app/install/db/monitoring_config_templates.json
new file mode 100644
index 0000000..2e6d9ba
--- /dev/null
+++ b/app/install/db/monitoring_config_templates.json
@@ -0,0 +1,378 @@
+[
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "rabbitmq" : {
+ "port" : "{rabbitmq_port}",
+ "vhost" : "/sensu",
+ "password" : "{rabbitmq_pass}",
+ "host" : "{server_ip}",
+ "user" : "{rabbitmq_user}",
+ "ssl" : {
+ "cert_chain_file" : "/etc/sensu/ssl/cert.pem",
+ "private_key_file" : "/etc/sensu/ssl/key.pem"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "rabbitmq.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "transport" : {
+ "name" : "rabbitmq",
+ "reconnect_on_error" : true
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "transport.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "redis" : {
+ "port" : "6379",
+ "host" : "127.0.0.1"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "redis.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "api" : {
+ "port" : 4567,
+ "host" : "{server_ip}"
+ },
+ "client" : {
+ "address" : "{client_name}",
+ "subscriptions" : [
+
+ ],
+ "name" : "{client_name}",
+ "environment" : "{env_name}"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "transport" : {
+ "name" : "rabbitmq",
+ "reconnect_on_error" : true
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "transport.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}_{portid}" : {
+ "interval" : 15,
+ "command" : "check_ping.py -c 10 -i 0.5 -p 4f532d444e41 -w 10 -s 256 -f {otep_src_ip} -t {otep_dest_ip} -W 1%/301.11/600 -C 10%/1020.12/2000",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_otep.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "rabbitmq" : {
+ "port" : "{rabbitmq_port}",
+ "vhost" : "/sensu",
+ "password" : "{rabbitmq_pass}",
+ "host" : "{server_ip}",
+ "user" : "{rabbitmq_user}",
+ "ssl" : {
+ "cert_chain_file" : "/etc/sensu/ssl/cert.pem",
+ "private_key_file" : "/etc/sensu/ssl/key.pem"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "rabbitmq.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "api" : {
+ "port" : 4567,
+ "host" : "{server_ip}",
+ "bind" : "0.0.0.0"
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "api.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "client" : {
+ "address" : "sensu-server",
+ "socket" : {
+ "port" : 3030,
+ "bind" : "127.0.0.1"
+ },
+ "subscriptions" : [
+ "dev",
+ "base",
+ "test"
+ ],
+ "name" : "{server_name}",
+ "environment" : "{env_type}"
+ },
+ "keepalive" : {
+ "handlers" : [
+ "default",
+ "file"
+ ]
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "filters" : {
+ "state_change_only" : {
+ "negate" : true,
+ "attributes" : {
+ "check" : {
+ "history" : "eval: value.last == value[-2]"
+ }
+ }
+
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "filters.json"
+},
+{
+ "side" : "server",
+ "order" : "1",
+ "config" : {
+ "handlers" : {
+ "osdna-monitor" : {
+ "timeout" : 20,
+ "command" : "PYTHONPATH={app_path} {app_path}/monitoring/handlers/monitor.py -m /local_dir/calipso_mongo_access.conf",
+ "type" : "pipe",
+ "filter" : "state_change_only"
+ },
+ "file" : {
+ "timeout" : 20,
+ "command" : "/etc/sensu/plugins/event-file.rb",
+ "type" : "pipe",
+ "filter" : "state_change_only"
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "handlers.json"
+},
+{
+ "type" : "client_check_vedge.json",
+ "side" : "client",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vedge_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "order" : "1"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{vnictype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vnic_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vnic.json"
+},
+{
+ "side" : "client",
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_vedge_ovs.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "type" : "client_check_vedge.json",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS"
+ ]
+ },
+ "monitoring_system" : "sensu",
+ "order" : "1"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "link_{linktype}_{fromobjid}_{toobjid}" : {
+ "interval" : 15,
+ "command" : "check_vnic_vconnector.py {bridge} {mac_address}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_link_vnic-vconnector.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "VPP"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "check_pnic_vpp.py",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_pnic.json"
+},
+{
+ "side" : "client",
+ "order" : "1",
+ "condition" : {
+ "mechanism_drivers" : [
+ "OVS",
+ "LXB"
+ ]
+ },
+ "config" : {
+ "checks" : {
+ "{objtype}_{objid}" : {
+ "interval" : 15,
+ "command" : "PYTHONPATH=/etc/sensu/plugins check_vservice.py {service_type} {local_service_id}",
+ "standalone" : true,
+ "type": "metric",
+ "subscribers" : [
+ "base"
+ ],
+ "handlers" : [
+ "default",
+ "file",
+ "osdna-monitor"
+ ]
+ }
+ }
+ },
+ "monitoring_system" : "sensu",
+ "type" : "client_check_vservice.json"
+}
+]
diff --git a/app/install/db/network_agent_types.json b/app/install/db/network_agent_types.json
new file mode 100644
index 0000000..ba01ef2
--- /dev/null
+++ b/app/install/db/network_agent_types.json
@@ -0,0 +1,52 @@
+[
+{
+ "folder_text" : "VPNs",
+ "description" : "VPN agent",
+ "type" : "vpn"
+},
+{
+ "folder_text" : "Firewalls",
+ "description" : "Firewall agent",
+ "type" : "firewall"
+},
+{
+ "folder_text" : "vEdges",
+ "description" : "L2 agent",
+ "type" : "vedge"
+},
+{
+ "folder_text" : "Gateways",
+ "description" : "L3 agent",
+ "type" : "router"
+},
+{
+ "folder_text" : "Metadata",
+ "description" : "Metadata agent",
+ "type" : "metadata"
+},
+{
+ "folder_text" : "Load-Balancers",
+ "description" : "Load Balancing agent",
+ "type" : "load_balancer"
+},
+{
+ "folder_text" : "vEdges",
+ "description" : "Open vSwitch agent",
+ "type" : "vedge"
+},
+{
+ "folder_text" : "vConnectors",
+ "description" : "Linux bridge agent",
+ "type" : "vconnector"
+},
+{
+ "folder_text" : "DHCP servers",
+ "description" : "DHCP agent",
+ "type" : "dhcp"
+},
+{
+ "folder_text" : "Orchestrators",
+ "description" : "Orchestrator",
+ "type" : "orchestrator"
+}
+]
diff --git a/app/install/db/roles.json b/app/install/db/roles.json
new file mode 100644
index 0000000..78a7c75
--- /dev/null
+++ b/app/install/db/roles.json
@@ -0,0 +1,26 @@
+[
+{
+ "_id" : "z5W5wQqTnSHMYxXRB",
+ "name" : "manage-users"
+},
+{
+ "_id" : "q3o6nbR3xY88KyCkt",
+ "name" : "manage-link-types"
+},
+{
+ "_id" : "odxGpkgQ3oexmXYhc",
+ "name" : "manage-clique-types"
+},
+{
+ "_id" : "RcEgj6kJnkPqpyJzR",
+ "name" : "manage-clique-constraints"
+},
+{
+ "_id" : "7gs3Fyow2Cryc4cdf",
+ "name" : "view-env"
+},
+{
+ "_id" : "DyhzqcNymdYgkzyie",
+ "name" : "edit-env"
+}
+]
diff --git a/app/install/db/scans.json b/app/install/db/scans.json
new file mode 100644
index 0000000..cb480d2
--- /dev/null
+++ b/app/install/db/scans.json
@@ -0,0 +1,24 @@
+[
+{
+ "status" : "completed",
+ "inventory" : "inventory",
+ "start_timestamp" : "2017-05-17T11:00:17.939+0000",
+ "scan_only_inventory" : false,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-05-17T07:53:09.194+0000",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "clear" : true,
+ "environment" : "Mirantis-Liberty"
+},
+{
+ "status" : "failed",
+ "scan_only_inventory" : false,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-06-14T13:42:32.710+0000",
+ "log_level" : "info",
+ "scan_only_cliques" : false,
+ "clear" : true,
+ "environment" : "staging"
+}
+]
diff --git a/app/install/db/scheduled_scans.json b/app/install/db/scheduled_scans.json
new file mode 100644
index 0000000..8f0c516
--- /dev/null
+++ b/app/install/db/scheduled_scans.json
@@ -0,0 +1,43 @@
+[
+{
+ "clear" : true,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:29.206+0000"
+},
+{
+ "clear" : false,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:36.079+0000"
+},
+{
+ "clear" : false,
+ "environment" : "staging",
+ "freq" : "WEEKLY",
+ "log_level" : "warning",
+ "scan_only_cliques" : false,
+ "scan_only_inventory" : true,
+ "scan_only_links" : false,
+ "submit_timestamp" : "2017-07-02T13:46:41.004+0000"
+},
+{
+ "scan_only_inventory" : true,
+ "freq" : "WEEKLY",
+ "scan_only_links" : false,
+ "scan_only_cliques" : false,
+ "log_level" : "warning",
+ "environment" : "staging",
+ "submit_timestamp" : "2017-07-02T14:38:09.032+0000",
+ "scheduled_timestamp" : "2017-07-08T14:38:09.032+0000",
+ "clear" : true
+}
+]
diff --git a/app/install/db/statistics.json b/app/install/db/statistics.json
new file mode 100644
index 0000000..195e0ec
--- /dev/null
+++ b/app/install/db/statistics.json
@@ -0,0 +1,23 @@
+[
+{
+ "recordGenerationMillis" : 1487147277000,
+ "object_id" : "devstack-vpp2-VPP",
+ "sample_time" : "2017-02-15T08:15:10Z",
+ "ingressInterface" : 2,
+ "averageArrivalNanoSeconds" : 1487146510773984049,
+ "destinationMacAddress" : "fa:16:3e:5d:7b:ae",
+ "destination" : "iperf2",
+ "packetCount" : 2,
+ "source" : "iperf1",
+ "object_type" : "vedge",
+ "sourceMacAddress" : "fa:16:3e:58:64:c6",
+ "ethernetType" : 2048,
+ "egressInterface" : 3,
+ "environment" : "Devstack-VPP",
+ "flowType" : "L2",
+ "data_arrival_avg" : 1487146510,
+ "type" : "vedge_flows",
+ "averageThroughput" : 17280,
+ "hostIp" : "10.56.20.78"
+}
+]
diff --git a/app/install/db/supported_environments.json b/app/install/db/supported_environments.json
new file mode 100644
index 0000000..1214ef3
--- /dev/null
+++ b/app/install/db/supported_environments.json
@@ -0,0 +1,230 @@
+[
+{
+ "environment" : {
+ "distribution" : "Stratoscale-v2.1.6",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : false,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-6.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-7.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-8.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.1",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : false,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Liberty",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-8.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-6.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-7.0",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Mirantis-9.1",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Devstack-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "Devstack-Mitaka",
+ "mechanism_drivers" : "VPP",
+ "type_drivers" : "vxlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Mitaka",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+},
+{
+ "environment" : {
+ "distribution" : "RDO-Liberty",
+ "mechanism_drivers" : "OVS",
+ "type_drivers" : "vlan"
+ },
+ "features" : {
+ "monitoring" : true,
+ "listening" : true,
+ "scanning" : true
+ }
+}
+]
diff --git a/app/install/db/users.json b/app/install/db/users.json
new file mode 100644
index 0000000..a0ccdb3
--- /dev/null
+++ b/app/install/db/users.json
@@ -0,0 +1,51 @@
+[
+{
+ "_id" : "wNLeBJxNDyw8G7Ssg",
+ "createdAt" : "2017-06-22T18:20:43.963+0000",
+ "services" : {
+ "password" : {
+ "bcrypt" : "$2a$10$LRnem9Y5OIo0hYpqi8JY5.G7uMp1LKAyHiq.ha2xHWmbPRo7CJuUS"
+ },
+ "resume" : {
+ "loginTokens" : [
+ {
+ "when" : "2017-06-29T16:41:06.183+0000",
+ "hashedToken" : "x2WrmQXpX6slx1/y+ReTuTZAqmPFMCX1ZI+N9COkK7c="
+ },
+ {
+ "when" : "2017-07-02T08:22:34.591+0000",
+ "hashedToken" : "Ls5V0a2I90A5TWWYU8kIZ5ByJR/fK8Kt5R65ch/iPt8="
+ },
+ {
+ "when" : "2017-07-02T15:02:09.357+0000",
+ "hashedToken" : "tEHXx9BGQUATbrHEaQyXm693Dfe5mzf8QPM9zpGnykE="
+ },
+ {
+ "when" : "2017-07-03T06:34:38.405+0000",
+ "hashedToken" : "WiI6vfcQ9zAnMN8SqBmfrF14ndBVLAQzhpsf9DZarRA="
+ }
+ ]
+ }
+ },
+ "username" : "admin",
+ "emails" : [
+ {
+ "address" : "admin@example.com",
+ "verified" : false
+ }
+ ],
+ "profile" : {
+ "name" : "admin"
+ },
+ "roles" : {
+ "__global_roles__" : [
+ "manage-users",
+ "manage-link-types",
+ "manage-clique-types",
+ "manage-clique-constraints",
+ "view-env",
+ "edit-env"
+ ]
+ }
+}
+]
diff --git a/app/install/ldap.conf.example b/app/install/ldap.conf.example
new file mode 100644
index 0000000..b1798f7
--- /dev/null
+++ b/app/install/ldap.conf.example
@@ -0,0 +1,10 @@
+user admin
+password password
+url ldap://korlev-calipso-dev.cisco.com:389
+user_id_attribute CN
+user_pass_attribute userpassword
+user_objectclass inetOrgPerson
+user_tree_dn OU=Users,DC=openstack,DC=org
+query_scope one
+tls_req_cert allow
+group_member_attribute member
diff --git a/app/messages/message.py b/app/messages/message.py
new file mode 100644
index 0000000..03c9069
--- /dev/null
+++ b/app/messages/message.py
@@ -0,0 +1,65 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+from typing import Union
+
+from bson import ObjectId
+
+
+class Message:
+
+ LEVELS = ['info', 'warn', 'error']
+ DEFAULT_LEVEL = LEVELS[0]
+
+ def __init__(self,
+ msg_id: str,
+ msg: dict,
+ source: str,
+ env: str = None,
+ object_id: Union[str, ObjectId] = None,
+ display_context: Union[str, ObjectId] = None,
+ level: str = DEFAULT_LEVEL,
+ object_type: str = None,
+ ts: str = None,
+ received_ts: str = None,
+ finished_ts: str = None):
+ super().__init__()
+
+ if level and level.lower() in self.LEVELS:
+ self.level = level.lower()
+ else:
+ self.level = self.DEFAULT_LEVEL
+
+ self.id = msg_id
+ self.environment = env
+ self.source_system = source
+ self.related_object = object_id
+ self.related_object_type = object_type
+ self.display_context = display_context
+ self.message = msg
+ self.timestamp = ts if ts else received_ts
+ self.received_timestamp = received_ts
+ self.finished_timestamp = finished_ts
+ self.viewed = False
+
+ def get(self):
+ return {
+ "id": self.id,
+ "environment": self.environment,
+ "source_system": self.source_system,
+ "related_object": self.related_object,
+ "related_object_type": self.related_object_type,
+ "display_context": self.display_context,
+ "level": self.level,
+ "message": self.message,
+ "timestamp": self.timestamp,
+ "received_timestamp": self.received_timestamp,
+ "finished_timestamp": self.finished_timestamp,
+ "viewed": self.viewed
+ }
diff --git a/app/monitoring/__init__.py b/app/monitoring/__init__.py
new file mode 100644
index 0000000..1e85a2a
--- /dev/null
+++ b/app/monitoring/__init__.py
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
diff --git a/app/monitoring/checks/binary_converter.py b/app/monitoring/checks/binary_converter.py
new file mode 100644
index 0000000..4da1107
--- /dev/null
+++ b/app/monitoring/checks/binary_converter.py
@@ -0,0 +1,17 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+def binary2str(txt):
+ if not isinstance(txt, bytes):
+ return str(txt)
+ try:
+ s = txt.decode("utf-8")
+ except TypeError:
+ s = str(txt)
+ return s
diff --git a/app/monitoring/checks/check_interface.py b/app/monitoring/checks/check_interface.py
new file mode 100755
index 0000000..4140dfe
--- /dev/null
+++ b/app/monitoring/checks/check_interface.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import re
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 2:
+ print('name of interface must be specified')
+ exit(2)
+nic_name = str(sys.argv[1])
+
+rc = 0
+
+try:
+ out = subprocess.check_output(["ifconfig " + nic_name],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+ lines = out.splitlines()
+ line_number = 1
+ line = -1
+ while line_number < len(lines):
+ line = lines[line_number]
+ if ' BROADCAST ' in line:
+ break
+ line_number += 1
+ state_match = re.match('^\W+([A-Z]+)', line)
+ if not state_match:
+ rc = 2
+ print('Error: failed to find status in ifconfig output: ' + out)
+ else:
+ rc = 0 if state_match.group(1) == 'UP' else 2
+ print(out)
+except subprocess.CalledProcessError as e:
+ print("Error finding NIC {}: {}\n".format(nic_name, binary2str(e.output)))
+ rc = 2
+
+exit(rc)
diff --git a/app/monitoring/checks/check_ping.py b/app/monitoring/checks/check_ping.py
new file mode 100755
index 0000000..35e7234
--- /dev/null
+++ b/app/monitoring/checks/check_ping.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) #
+# and others #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+###############################################################################
+
+import argparse
+import re
+import sys
+import subprocess
+
+from binary_converter import binary2str
+
+
+if len(sys.argv) < 2:
+ raise ValueError('destination address must be specified')
+
+
+def thresholds_string(string):
+ matches = re.match('\d+%/\d+([.]\d+)?/\d+([.]\d+)?', string)
+ if not matches:
+ msg = "%r is not a valid thresholds string" % string
+ raise argparse.ArgumentTypeError(msg)
+ return string
+
+
+def get_args():
+ # try to read scan plan from command line parameters
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-W", "--warning", nargs="?",
+ type=thresholds_string,
+ default='1%/300/600',
+ help="warning thresholds: packet-loss "
+ "(%)/avg-rtt (ms)/max-rtt (ms)"
+ "(example: 1%/300ms/600ms)")
+ parser.add_argument("-C", "--critical", nargs="?",
+ type=thresholds_string,
+ default='10%/1000/2000',
+ help="critical thresholds: packet-loss "
+ "(%)/avg-rtt (ms)/max-rtt (ms)"
+ "(example: 1%/300ms/600ms)")
+ parser.add_argument("-f", "--source", nargs="?", type=str, default='',
+ help="source address")
+ parser.add_argument("-t", "--target", nargs="?", type=str, default='',
+ help="target address")
+ parser.add_argument("-c", "--count", nargs="?", type=int, default=5,
+ help="how many packets will be sent")
+ parser.add_argument("-i", "--interval", nargs="?", type=float, default=0.5,
+ help="seconds between sending each packet")
+ parser.add_argument("-p", "--pattern", nargs="?", type=str,
+ default='OS-DNA', help="pattern to pad packet with")
+ parser.add_argument("-w", "--wait", nargs="?", type=int, default=5,
+ help="seconds to wait for completion of all responses")
+ parser.add_argument("-s", "--packetsize", nargs="?", type=int, default=256,
+ help="size of packet vseconds to wait for completion "
+ "of all responses")
+ return parser.parse_args()
+
+args = get_args()
+
+if not args.target:
+ raise ValueError('target address must be specified')
+
+rc = 0
+
+try:
+ cmd = "ping -c {} -i {} -p {} -w {} -s {} {}{} {}".format(
+ args.count, args.interval,
+ args.pattern, args.wait,
+ args.packetsize,
+ '-I ' if args.source else '',
+ args.source, args.target)
+ out = subprocess.check_output([cmd],
+ stderr=subprocess.STDOUT,
+ shell=True)
+ out = binary2str(out)
+except subprocess.CalledProcessError as e:
+ print("Error doing ping: {}\n".format(binary2str(e.output)))
+
+# find packet loss data
+packet_loss_match = re.search('(\d+)[%] packet loss', out, re.M)
+if not packet_loss_match:
+ out += '\npacket loss data not found'
+ rc = 2
+
+# find rtt avg/max data
+rtt_results = None
+if rc < 2:
+ regexp = 'rtt min/avg/max/mdev = [0-9.]+/([0-9.]+)/([0-9.]+)/[0-9.]+ ms'
+ rtt_results = re.search(regexp, out, re.M)
+ if not rtt_results:
+ out += '\nrtt results not found'
+ rc = 2
+if rc < 2:
+ packet_loss = int(packet_loss_match.group(1))
+ avg_rtt = float(rtt_results.group(1))
+ max_rtt = float(rtt_results.group(2))
+ thresholds_regexp = r'(\d+)%/(\d+[.0-9]*)/(\d+[.0-9]*)'
+ warn_threshold_match = re.match(thresholds_regexp, args.warning)
+ critical_threshold_match = re.match(thresholds_regexp, args.critical)
+ packet_loss_warn = int(warn_threshold_match.group(1))
+ packet_loss_critical = int(critical_threshold_match.group(1))
+ avg_rtt_warn = float(warn_threshold_match.group(2))
+ avg_rtt_critical = float(critical_threshold_match.group(2))
+ max_rtt_warn = float(warn_threshold_match.group(3))
+ max_rtt_critical = float(critical_threshold_match.group(3))
+ if packet_loss > packet_loss_critical or avg_rtt >= avg_rtt_critical or \
+ max_rtt >= max_rtt_critical:
+ rc = 2
+ elif packet_loss > packet_loss_warn or avg_rtt >= avg_rtt_warn or \
+ max_rtt >= max_rtt_warn:
+ rc = 1
+
+print(out)
+exit(rc)
diff --git a/app/monitoring/checks/check_pnic_vpp.py b/app/monitoring/checks/check_pnic_vpp.py
new file mode 100755
index 0000000..942fdc2
--- /dev/null
+++ b/